From dd0fb2ff4df8df208ab071637ef5322368837466 Mon Sep 17 00:00:00 2001 From: olmai Date: Tue, 12 Mar 2024 09:18:13 +0100 Subject: [PATCH 01/39] new extra_functions: get_spike_features_of_chunk and get_spike_features_loss_of_chunk changed some imports to fix import bug --- src/CompNeuroPy/experiment.py | 2 +- src/CompNeuroPy/extra_functions.py | 180 ++++++++++++++++++++++++- src/CompNeuroPy/generate_simulation.py | 2 +- src/CompNeuroPy/monitors.py | 4 +- 4 files changed, 184 insertions(+), 4 deletions(-) diff --git a/src/CompNeuroPy/experiment.py b/src/CompNeuroPy/experiment.py index 234b826..30b22ce 100644 --- a/src/CompNeuroPy/experiment.py +++ b/src/CompNeuroPy/experiment.py @@ -1,6 +1,6 @@ from ANNarchy import reset from CompNeuroPy.monitors import RecordingTimes -from CompNeuroPy import CompNeuroMonitors +from CompNeuroPy.monitors import CompNeuroMonitors from CompNeuroPy import model_functions as mf from copy import deepcopy diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 9547447..9db85da 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -6,6 +6,7 @@ from CompNeuroPy import system_functions as sf from CompNeuroPy import model_functions as mf from CompNeuroPy.generate_model import CompNeuroModel +from CompNeuroPy.experiment import CompNeuroExp import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import cm @@ -2189,7 +2190,7 @@ def efel_loss(trace1, trace2, feature_list): return loss ### calculate and return the mean of the differences of the features - features_1, features_2 = efel.getFeatureValues( + features_1, features_2 = efel.get_feature_values( [trace1, trace2], feature_list, raise_warnings=False, @@ -2221,3 +2222,180 @@ def efel_loss(trace1, trace2, feature_list): if verbose: print(f"loss: {loss}") return loss + + +def get_spike_features_of_chunk(chunk: int, results: CompNeuroExp._ResultsCl): + """ + Get the features of the spikes of a chunk of the results of a CompNeuroExp. + + !!! warning + The results data dict has to contain the population name as key "pop_name". + The spikes have to be recorded. + + Args: + chunk (int): + index of the chunk + results (CompNeuroExp._ResultsCl): + results of the experiment + + Returns: + dict: + dictionary with the features of the spikes + """ + ### get number of spikes + spike_dict = results.recordings[chunk][f"{results.data['pop_name']};spike"] + t, _ = af.my_raster_plot(spike_dict) + nbr_spikes = len(t) + ### get time of 1st, 2nd, 3rd spike + if nbr_spikes > 0: + time_1st_spike = t[0] + if nbr_spikes > 1: + time_2nd_spike = t[1] + if nbr_spikes > 2: + time_3rd_spike = t[2] + else: + time_3rd_spike = None + else: + time_2nd_spike = None + time_3rd_spike = None + else: + time_1st_spike = None + time_2nd_spike = None + time_3rd_spike = None + ### get time of last spike + if nbr_spikes > 0: + time_last_spike = t[-1] + else: + time_last_spike = None + ### get CV of ISI + if nbr_spikes > 1: + isi = np.diff(t) + cv_isi = np.std(isi) / np.mean(isi) + else: + cv_isi = None + + return { + "spike_count": nbr_spikes, + "time_to_first_spike": time_1st_spike, + "time_to_second_spike": time_2nd_spike, + "time_to_third_spike": time_3rd_spike, + "time_to_last_spike": time_last_spike, + "ISI_CV": cv_isi, + } + + +def get_spike_features_loss_of_chunk( + chunk: int, + results1: CompNeuroExp._ResultsCl, + results2: CompNeuroExp._ResultsCl, + chunk2: None | int = None, + feature_list: list[str] | None = None, +): + """ + Calculate the loss/difference between the spike features of two chunks of the + results of CompNeuroExp. + + !!! warning + The results data dict has to contain the population name as key "pop_name". + The spikes have to be recorded. + + Args: + chunk (int): + index of the chunk + results1 (CompNeuroExp._ResultsCl): + results of the first experiment + results2 (CompNeuroExp._ResultsCl): + results of the second experiment + chunk2 (None|int): + index of the chunk of the second results, if None the same as chunk + feature_list (list[str]|None): + list of feature names which should be used to calculate the loss, if None + the default list is used + + Returns: + loss (float): + loss/difference between the spike features of the two chunks + """ + verbose = False + if chunk2 is None: + chunk2 = chunk + + ### get recording duration of chunk + nbr_periods = results1.recording_times.nbr_periods( + chunk=chunk, compartment=results1.data["pop_name"] + ) + chunk_duration_ms = 0 + chunk_duration_idx = 0 + for period in range(nbr_periods): + chunk_duration_ms += np.abs( + np.diff( + results1.recording_times.time_lims( + chunk=chunk, compartment=results1.data["pop_name"], period=period + ) + ) + ) + chunk_duration_idx += np.abs( + np.diff( + results1.recording_times.idx_lims( + chunk=chunk, compartment=results1.data["pop_name"], period=period + ) + ) + ) + + ### set a plausible "maximum" absolute difference for each feature + diff_max: dict[str, float] = { + "spike_count": chunk_duration_idx, + "time_to_first_spike": chunk_duration_ms, + "time_to_second_spike": chunk_duration_ms, + "time_to_third_spike": chunk_duration_ms, + "time_to_last_spike": chunk_duration_ms, + "ISI_CV": 1, + } + if verbose: + print(f"\ndiff_max: {diff_max}") + + ### set a plausible "close" absolute difference for each feature + diff_close: dict[str, float] = { + "spike_count": np.ceil(chunk_duration_ms / 200), + "time_to_first_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_second_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_third_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_last_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "ISI_CV": 0.1, + } + if verbose: + print(f"\ndiff_close: {diff_close}\n") + + ### catch if features from feature_list are not supported + if feature_list is None: + feature_list = list(diff_max.keys()) + features_not_supported = [ + feature for feature in feature_list if feature not in diff_max + ] + if features_not_supported: + raise ValueError(f"Features not supported: {features_not_supported}") + + ### calculate and return the mean of the differences of the features + features_1 = get_spike_features_of_chunk(chunk, results1) + features_2 = get_spike_features_of_chunk(chunk2, results2) + + if verbose: + print(f"\nfeatures_1: {features_1}\n") + print(f"features_2: {features_2}\n") + loss = 0.0 + for feature in feature_list: + ### if both features are None use 0 + if features_1[feature] is None and features_2[feature] is None: + diff = 0.0 + ### if single feature is None use diff_max + elif features_1[feature] is None or features_2[feature] is None: + diff = diff_max[feature] + else: + diff = float(np.absolute(features_1[feature] - features_2[feature])) + ### scale the difference by diff_close and add to loss + loss += diff / diff_close[feature] + loss /= len(feature_list) + + if verbose: + print(f"loss: {loss}") + return loss diff --git a/src/CompNeuroPy/generate_simulation.py b/src/CompNeuroPy/generate_simulation.py index e23de0c..3b845d5 100644 --- a/src/CompNeuroPy/generate_simulation.py +++ b/src/CompNeuroPy/generate_simulation.py @@ -1,6 +1,6 @@ from ANNarchy import get_time from CompNeuroPy import extra_functions as ef -from CompNeuroPy import CompNeuroMonitors +from CompNeuroPy.monitors import CompNeuroMonitors import numpy as np from typing import Callable diff --git a/src/CompNeuroPy/monitors.py b/src/CompNeuroPy/monitors.py index 48046f5..3bcefab 100644 --- a/src/CompNeuroPy/monitors.py +++ b/src/CompNeuroPy/monitors.py @@ -696,7 +696,7 @@ def all(self): """ return self.recording_times_list - def nr_periods(self, chunk=None, compartment=None): + def nbr_periods(self, chunk=None, compartment=None): """ Get the number of recording periods (start-pause) of a specified chunk/model compartment. @@ -717,6 +717,8 @@ def nr_periods(self, chunk=None, compartment=None): compartment = self.__check_compartment__(compartment, chunk) return self._get_nr_periods(chunk, compartment) + nr_periods = nbr_periods + def combine_periods( self, recordings: list, From 8649c09f1e92591231bfbc7bb68ae3ceefa0b7ab Mon Sep 17 00:00:00 2001 From: olmai Date: Tue, 12 Mar 2024 10:26:32 +0100 Subject: [PATCH 02/39] OptNeuron: adjusted recording period --- src/CompNeuroPy/opt_neuron.py | 44 +++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 88cece8..4b6101a 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -183,7 +183,9 @@ def __init__( self.bads_params_dict = bads_params_dict self.loss_history = [] self.start_time = time() - self.recording_period = recording_period + self.recording_period_str = self._get_recording_period_string( + recording_period + ) ### if using deap pop size is the number of individuals for the optimization if method == "deap": @@ -230,6 +232,28 @@ def __init__( self.monitors = monitors self.experiment = experiment(monitors=monitors) + def _get_recording_period_string(self, recording_period: float | None): + """ + Get the recording period string for the CompNeuroMonitors. If there is no + recording period or if there is only the variable "spike" recorded, the + recording period string is empty. + + Args: + recording_period (float, optional): + The recording period for the simulation in ms. Default: None. + + Returns: + recording_period_str (str): + The recording period string for the CompNeuroMonitors. + """ + recording_period_str = ( + f";{recording_period}" + if recording_period is not None + and ("spike" not in self.record or len(self.record) > 1) + else "" + ) + return recording_period_str + def _get_lower_upper_p0(self): """ Returns the lower and upper bounds and the initial values for the cma @@ -401,15 +425,9 @@ def _generate_models(self, popsize=1): ### create monitors if len(self.record) > 0: - recording_period_str = ( - f";{self.recording_period}" - if self.recording_period is not None - and ("spike" not in self.record or len(self.record) > 1) - else "" - ) monitors = CompNeuroMonitors( { - f"{pop_name}{recording_period_str}": self.record + f"{pop_name}{self.recording_period_str}": self.record for pop_name in [ model.populations[0], target_model.populations[0], @@ -438,14 +456,10 @@ def _generate_models(self, popsize=1): ) ### create monitors if len(self.record) > 0: - recording_period_str = ( - f";{self.recording_period}" - if self.recording_period is not None - and ("spike" not in self.record or len(self.record) > 1) - else "" - ) monitors = CompNeuroMonitors( - {f"{model.populations[0]}{recording_period_str}": self.record} + { + f"{model.populations[0]}{self.recording_period_str}": self.record + } ) return model, target_model, monitors From 3ef7af63d79340561b03e310975faa892eaf9de0 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 14 Mar 2024 18:04:52 +0100 Subject: [PATCH 03/39] new fitCorbit neuron VClampParamSearch now solves for external current --- src/CompNeuroPy/extra_functions.py | 267 ++++++++---------- .../experimental_models/fit_Corbit_nm.py | 51 ++++ 2 files changed, 174 insertions(+), 144 deletions(-) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 02bae73..e102bee 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -1060,9 +1060,10 @@ def __init__( self, neuron_model: Neuron, equations: str = """ - C*dv/dt = k*(v - v_r)*(v - v_t) - u + C*dv/dt = k*(v - v_r)*(v - v_t) - u + I du/dt = a*(b*(v - v_r) - u) """, + external_current_var: str = "I", bounds: dict[str, tuple[float, float]] = { "C": (0.1, 100), "v_r": (-90, -40), @@ -1090,9 +1091,14 @@ def __init__( equations (str, optional): The equations whose parameters should be obtained. Default: Izhikevich 2007 neuron model + external_current_var (str, optional): + The name of the variable in the neuron model which is used as the + external current. Has to be used in the neuron model and the given + equations Default: "I" bounds (dict, optional): - The bounds for the parameters. For each parameter a bound should be - given! Default: Izhikevich 2007 neuron model + The bounds for the parameters. For each parameter of the equation a + bound should be given (except for the external current variable)! + Default: Izhikevich 2007 neuron model p0 (dict, optional): The initial guess for the parameters. Dict keys should be the same as the keys of bounds. The values can be either a single number for each @@ -1131,6 +1137,7 @@ def __init__( self._verbose_extreme = False ### store the given neuron model and a voltage clamp version of it self.neuron_model = neuron_model + self.external_current_var = external_current_var self._neuron_model = deepcopy(neuron_model) self._neuron_model_clamp = self._get_neuron_model_clamp() @@ -1160,9 +1167,9 @@ def __init__( if self.do_plot: sf.create_dir("/".join(plot_file.split("/")[:-1])) - ### create the functions for v_clamp_inst and v_clamp_hold using the given + ### create the functions for I_clamp_inst and I_clamp_hold using the given ### izhikevich equations - self._f_inst, self._f_hold, self._f_variables = self._create_v_clamp_functions() + self._f_inst, self._f_hold, self._f_variables = self._create_I_clamp_functions() ### create the voltage step arrays self._v_0_arr, self._v_step_arr = self._create_voltage_step_arrays() @@ -1173,25 +1180,25 @@ def __init__( mf.cnp_clear() self._model_normal, self._model_clamp = self._create_model() - ### perform resting state and voltage step simulations to obtain v_clamp_inst, - ### v_clamp_hold and v_rest - self._v_clamp_inst_arr = None - self._v_clamp_hold_arr = None + ### perform resting state and voltage step simulations to obtain I_clamp_inst, + ### I_clamp_hold and v_rest + self._I_clamp_inst_arr = None + self._I_clamp_hold_arr = None if self.verbose: print("Performing simulations...") ( self._v_rest, - self._v_clamp_inst_arr, - self._v_clamp_hold_arr, + self._I_clamp_inst_arr, + self._I_clamp_hold_arr, self._v_step_unique, - self._v_clamp_hold_unique, + self._I_clamp_hold_unique, ) = self._simulations() - ### tune the free paramters of the functions for v_clamp_inst and v_clamp_hold + ### tune the free paramters of the functions for I_clamp_inst and I_clamp_hold ### to fit the data if self.verbose: print("Tuning parameters...") - self._p_opt = self._tune_v_clamp_functions() + self._p_opt = self._tune_I_clamp_functions() self.p_opt = { param_name: self._p_opt.get(param_name, None) for param_name in self.bounds.keys() @@ -1218,7 +1225,8 @@ def __init__( ) ### create a neuron model with the tuned parameters and the given equations - ### then run the simulations again with this neuron model + ### then run the simulations again with this neuron model to do the plots + ### with the tuned parameters if self.verbose: print("Running simulations with tuned parameters...") mf.cnp_clear() @@ -1262,24 +1270,28 @@ def _create_neuron_model_with_tuned_parameters(self): the neuron model with the tuned parameters and the given equations """ ### create the neuron with the tuned parameters, if a parameter is not tuned - ### use the mid of the bounds (these parameters should not affect v_clamp_inst - ### and v_clamp_hold) + ### use the mid of the bounds (these parameters should not affect I_clamp_inst + ### and I_clamp_hold) parameters = "\n".join( [ f"{key} = {self._p_opt.get(key,sum(self.bounds[key])/2)}" for key in self.bounds.keys() ] ) + ### also add the external current variable + parameters = parameters + "\n" + f"{self.external_current_var} = 0" neuron_mondel = Neuron( parameters=parameters, equations=self.equations + "\nr=0", ) + if self.verbose: + print(f"Neuron model with tuned parameters:\n{neuron_mondel}") return neuron_mondel - def _tune_v_clamp_functions(self): + def _tune_I_clamp_functions(self): """ - Tune the free paramters of the functions for v_clamp_inst and v_clamp_hold + Tune the free paramters of the functions for I_clamp_inst and I_clamp_hold to fit the data. """ ### get the names of the free parameters which will be tuned @@ -1290,7 +1302,7 @@ def _tune_v_clamp_functions(self): sub_var_names_list.append(str(var)) ### target array for the error function below - target_arr = np.concatenate([self._v_clamp_inst_arr, self._v_clamp_hold_unique]) + target_arr = np.concatenate([self._I_clamp_inst_arr, self._I_clamp_hold_unique]) ### create a function for the error def error_function(x): @@ -1390,16 +1402,16 @@ def error_function_deap(population): return result_dict - def _create_v_clamp_functions(self): + def _create_I_clamp_functions(self): """ - Create the functions for v_clamp_inst and v_clamp_hold using the given + Create the functions for I_clamp_inst and I_clamp_hold using the given izhikevich equations. Returns: f_inst (Callable): - Function for v_clamp_inst + Function for I_clamp_inst f_hold (Callable): - Function for v_clamp_hold + Function for I_clamp_hold variables (list): List of variables used for the functions """ @@ -1416,8 +1428,7 @@ def _create_v_clamp_functions(self): ### values variables_sympy_dict = {key: Symbol(key) for key in variables_name_list} - ### also create sympy symbols for v_clamp, v_0 and v_step - variables_sympy_dict["v_clamp"] = Symbol("v_clamp") + ### also create sympy symbols for v_0 and v_step variables_sympy_dict["v_0"] = Symbol("v_0") variables_sympy_dict["v_step"] = Symbol("v_step") @@ -1428,11 +1439,11 @@ def _create_v_clamp_functions(self): for line_idx, line in enumerate(eq_line_list): left_side = line.split("=")[0] right_side = line.split("=")[1] - ### check if line contains dv/dt, replace it with v_clamp and add v_clamp - ### to variables_to_solve_for_list, also set instant_update to True + ### check if line contains dv/dt, replace it with 0 and add external current + ### variable to variables_to_solve_for_list, also set instant_update to True if "dv/dt" in line: - variables_to_solve_for_list.append("v_clamp") - left_side = left_side.replace("dv/dt", "v_clamp") + variables_to_solve_for_list.append(self.external_current_var) + left_side = left_side.replace("dv/dt", "0") instant_update_list.append(True) ### check if line contains any other derivative with syntax "d/dt" ### using re, replace it with 0 and add the variable to @@ -1465,8 +1476,8 @@ def _create_v_clamp_functions(self): eq_sympy_list_hold_v_0, variables_to_solve_for_list, "holding v_0" ) - ### 2nd for v_clamp_inst set v to v_step only in equaitons which are - ### updated instantaneously (v_clamp and all non-derivatives), for all + ### 2nd for I_clamp_inst set v to v_step only in equations which are + ### updated instantaneously (I_clamp and all non-derivatives), for all ### derivatives use the solution for holding v_0 eq_sympy_list_inst = deepcopy(eq_sympy_list) for line_idx, line in enumerate(eq_sympy_list_inst): @@ -1489,7 +1500,7 @@ def _create_v_clamp_functions(self): eq_sympy_list_inst, variables_to_solve_for_list, "step from v_0 to v_step" ) - ### 3rd for v_clamp_hold (i.e. holding v_step) set v to v_step in all + ### 3rd for I_clamp_hold (i.e. holding v_step) set v to v_step in all ### equations eq_sympy_list_hold = deepcopy(eq_sympy_list) for line_idx, line in enumerate(eq_sympy_list_hold): @@ -1501,21 +1512,22 @@ def _create_v_clamp_functions(self): eq_sympy_list_hold, variables_to_solve_for_list, "holding v_step" ) - ### get the equations for v_clamp_inst and v_clamp_hold - eq_v_clamp_inst = solution_inst[variables_sympy_dict["v_clamp"]] - eq_v_clamp_hold = solution_hold[variables_sympy_dict["v_clamp"]] + ### get the equations for I_clamp_inst and I_clamp_hold (i.e. the external + ### current variable) + eq_I_clamp_inst = solution_inst[variables_sympy_dict[self.external_current_var]] + eq_I_clamp_hold = solution_hold[variables_sympy_dict[self.external_current_var]] if self.verbose: - print(f"Equation for v_clamp_inst: {factor(eq_v_clamp_inst)}") - print(f"Equation for v_clamp_hold: {factor(eq_v_clamp_hold)}") + print(f"Equation for I_clamp_inst: {factor(eq_I_clamp_inst)}") + print(f"Equation for I_clamp_hold: {factor(eq_I_clamp_hold)}") - ### create functions for v_clamp_inst and v_clamp_hold - ### 1st obtain all variables from the equations for v_clamp_inst and v_clamp_hold + ### create functions for I_clamp_inst and I_clamp_hold + ### 1st obtain all variables from the equations for I_clamp_inst and I_clamp_hold f_variables = list( - set(list(eq_v_clamp_inst.free_symbols) + list(eq_v_clamp_hold.free_symbols)) + set(list(eq_I_clamp_inst.free_symbols) + list(eq_I_clamp_hold.free_symbols)) ) ### 2nd create a function for each equation - f_inst = lambdify(f_variables, eq_v_clamp_inst) - f_hold = lambdify(f_variables, eq_v_clamp_hold) + f_inst = lambdify(f_variables, eq_I_clamp_inst) + f_hold = lambdify(f_variables, eq_I_clamp_hold) return f_inst, f_hold, f_variables @@ -1572,15 +1584,15 @@ def _get_variables_from_eq(self, eq: str): def _simulations(self): """ - Perform the resting state and voltage step simulations to obtain v_clamp_inst, - v_clamp_hold and v_rest. + Perform the resting state and voltage step simulations to obtain I_clamp_inst, + I_clamp_hold and v_rest. Returns: v_rest (float): resting state voltage - v_clamp_inst (np.array): + I_clamp_inst (np.array): array of the voltage clamp values directly after the voltage step - v_clamp_hold (np.array): + I_clamp_hold (np.array): array of the voltage clamp values after the holding period """ @@ -1592,71 +1604,72 @@ def _simulations(self): simulate(duration) get_population("pop_clamp").v = self._v_step_arr simulate(self._timestep) - v_clamp_inst_arr = get_population("pop_clamp").v_clamp + I_clamp_inst_arr = get_population("pop_clamp").I_clamp simulate(duration - self._timestep) - v_clamp_hold_arr = get_population("pop_clamp").v_clamp + I_clamp_hold_arr = get_population("pop_clamp").I_clamp v_rest = get_population("pop_normal").v[0] ### get unique values of v_step and their indices v_step_unique, v_step_unique_idx = np.unique( self._v_step_arr, return_index=True ) - ### get the corresponding values of v_clamp_hold (because it does only depend om + ### get the corresponding values of I_clamp_hold (because it does only depend on ### v_step) - v_clamp_hold_unique = v_clamp_hold_arr[v_step_unique_idx] + I_clamp_hold_unique = I_clamp_hold_arr[v_step_unique_idx] - if self.do_plot and not isinstance(self._v_clamp_inst_arr, type(None)): + if self.do_plot and not isinstance(self._I_clamp_inst_arr, type(None)): + plt.close("all") plt.figure(figsize=(6.4 * 3, 4.8 * 2)) - ### create a 2D color-coded plot of the data for v_clamp_inst and v_clamp_hold + ### create a 2D color-coded plot of the data for I_clamp_inst and I_clamp_hold x = self._v_0_arr y = self._v_step_arr - ### create 2 subplots for original v_clamp_inst and v_clamp_hold + ### create 2 subplots for original I_clamp_inst and I_clamp_hold plt.subplot(231) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_inst_arr, - "v_clamp_inst original", + self._I_clamp_inst_arr, + "I_clamp_inst original", ) plt.subplot(234) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_hold_arr, - "v_clamp_hold original", + self._I_clamp_hold_arr, + "I_clamp_hold original", ) - ### create 2 subplots for tuned v_clamp_inst and v_clamp_hold + ### create 2 subplots for tuned I_clamp_inst and I_clamp_hold plt.subplot(232) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - v_clamp_inst_arr, - "v_clamp_inst tuned", + I_clamp_inst_arr, + "I_clamp_inst tuned", ) plt.subplot(235) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - v_clamp_hold_arr, - "v_clamp_hold tuned", + I_clamp_hold_arr, + "I_clamp_hold tuned", ) ### create 2 subplots for differences plt.subplot(233) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_inst_arr - v_clamp_inst_arr, - "v_clamp_inst diff", + self._I_clamp_inst_arr - I_clamp_inst_arr, + "I_clamp_inst diff", ) plt.subplot(236) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_hold_arr - v_clamp_hold_arr, - "v_clamp_hold diff", + self._I_clamp_hold_arr - I_clamp_hold_arr, + "I_clamp_hold diff", ) plt.tight_layout() @@ -1665,17 +1678,17 @@ def _simulations(self): self.plot_file.split(".")[0] + "_data." + self.plot_file.split(".")[1], dpi=300, ) - plt.close() + plt.close("all") return ( v_rest, - v_clamp_inst_arr, - v_clamp_hold_arr, + I_clamp_inst_arr, + I_clamp_hold_arr, v_step_unique, - v_clamp_hold_unique, + I_clamp_hold_unique, ) - def _plot_v_clamp_subplot(self, x, y, c, label): + def _plot_I_clamp_subplot(self, x, y, c, label): plt.title(label) ci = c @@ -1838,9 +1851,9 @@ def _get_neuron_model_clamp(self): def _adjust_equations_for_voltage_clamp(self, eq_line_list: list): """ - Replaces the 'dv/dt' or 'v+=' equation with a voltage clamp version in which the - new variable 'v_clamp' is calculated from the right side of the 'dv/dt' or 'v+=' - equation. + Replaces the 'dv/dt' equation with a voltage clamp version (dv/dt=0) in which the + new variable 'I_clamp' is obtained by solving the 'dv/dt' equation for its + external current variable. Args: eq_line_list (list): @@ -1866,81 +1879,46 @@ def _adjust_equations_for_voltage_clamp(self, eq_line_list: list): ### remove whitespaces eq_v = eq_v.replace(" ", "") - ### split eqatuion at ":" to separate flags + ### split eqatuion at ":" to ignore flags eq_v_split = eq_v.split(":") eq_v = eq_v_split[0] - ### check if flags are present - if len(eq_v_split) == 1: - flags = "" - else: - flags = ":" + eq_v_split[1] ### adjust the equation for voltage clamp - if "+=" in eq_v: - eq_v, eq_v_clamp = self._adjust_equation_for_voltage_clamp_plus(eq_v, flags) - else: - eq_v, eq_v_clamp = self._adjust_equation_for_voltage_clamp_dvdt(eq_v, flags) + eq_v, eq_I_clamp = self._adjust_equation_for_voltage_clamp_dvdt(eq_v) ### delete old equation from equation list using the index of the equation eq_line_list.pop(line_is_v_list.index(True)) ### insert new equation at the same position eq_line_list.insert(line_is_v_list.index(True), eq_v) - ### insert new equation for "v_clamp" at the same position - eq_line_list.insert(line_is_v_list.index(True), eq_v_clamp) + ### insert new equation for "I_clamp" at the same position + eq_line_list.insert(line_is_v_list.index(True), eq_I_clamp) return eq_line_list - def _adjust_equation_for_voltage_clamp_plus(self, eq_v: str, flags: str): - """ - Convert the v-update equation using "v+=" into a voltage clamp version. - - Args: - eq_v (str): - the equation string for updating v (without flags) - flags (str): - the flags of the equation string - - Returns: - eq_v (str): - the adjusted equation string for updating v (without flags) - eq_v_clamp (str): - the equation string for "v_clamp" (with flags) - """ - ### split equations at "=" to separate left and right side - eq_v_left, eq_v_right = eq_v.split("=") - ### set right side to zero and combine equation again with "=" - eq_v = eq_v_left + "=" + "0" - ### create new equation for "v_clamp" with right side of original equation - eq_v_clamp = "v_clamp=" + eq_v_right + flags - - return eq_v, eq_v_clamp - - def _adjust_equation_for_voltage_clamp_dvdt(self, eq_v: str, flags: str): + def _adjust_equation_for_voltage_clamp_dvdt(self, eq_v: str): """ Convert the v-update equation using "dv/dt" into a voltage clamp version. + !!! warning + Equation needs to contain dv/dt and the external current variable. + Args: eq_v (str): - the equation string for updating v (without flags) - flags (str): - the flags of the equation string + the equation string for updating v (without flags and whitespace) Returns: eq_v (str): the adjusted equation string for updating v (without flags) - eq_v_clamp (str): - the equation string for "v_clamp" (with flags) + eq_I_clamp (str): + the equation string for "I_clamp" """ - ### if equation starts with "dv/dt=" do the same as for "v+=" - if eq_v.startswith("dv/dt="): - return self._adjust_equation_for_voltage_clamp_plus(eq_v, flags) ### if equation doesn't start with "dv/dt=" --> need to rearrange equation - ### i.e. solve the equation for dv/dt - eq_v = eq_v.replace("dv/dt", "delta_v") + ### set dv/dt to zero and solve the equation for the external current variable + ### (will be I_clamp) + eq_v = eq_v.replace("dv/dt", "0") ### split the equation at "=" and move everything on one side (other side = 0) - ### replace the whole right side with "right_side" making solving easier left_side, right_side = eq_v.split("=") - eq_v_one_side = f"(right_side) - {left_side}" + eq_v_one_side = f"{right_side} - {left_side}" ### prepare the sympy equation generation attributes_name_list = self._get_neuron_model_attributes(self._neuron_model) @@ -1951,30 +1929,31 @@ def _adjust_equation_for_voltage_clamp_dvdt(self, eq_v: str, flags: str): key: attributes_tuple[attributes_name_list.index(key)] for key in attributes_name_list } - ### further create symbols for delta_v and right_side - attributes_sympy_dict["delta_v"] = Symbol("delta_v") - attributes_sympy_dict["right_side"] = Symbol("right_side") ### now creating the sympy equation eq_sympy = sympify(eq_v_one_side) - ### solve the equation for delta_v - result = solve(eq_sympy, attributes_sympy_dict["delta_v"], dict=True) + ### solve the equation for the external current variable + if self.verbose: + print(f"attributes_sympy_dict: {attributes_sympy_dict}") + result = solve( + eq_sympy, attributes_sympy_dict[self.external_current_var], dict=True + ) if len(result) != 1: - raise ValueError("Could not solve equation of neuronmodel for dv/dt!") + raise ValueError( + f"Could not solve equation of neuronmodel for external current variable {self.external_current_var}!" + ) ### convert result to string - result = str(result[0][attributes_sympy_dict["delta_v"]]) - - ### replace "right_side" by the actual right side in brackets - result = result.replace("right_side", f"({right_side})") + result = str(result[0][attributes_sympy_dict[self.external_current_var]]) ### create new equation for dv/dt eq_v = "dv/dt = 0" - ### create new equation for "v_clamp" with the equation solved for dv/dt - eq_v_clamp = "v_clamp=" + result + flags + ### create new equation for "I_clamp" with the equation solved for the external + ### current variable + eq_I_clamp = "I_clamp=" + result - return eq_v, eq_v_clamp + return eq_v, eq_I_clamp def _get_line_is_v(self, line: str): """ diff --git a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py index ec397b3..bc66f24 100644 --- a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py +++ b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py @@ -432,3 +432,54 @@ name="_Izhikevich2007_Corbit12", description="Simple neuron model equations from Izhikevich (2007) adjusted version to fit the striatal FSI neuron model from Corbit et al. (2016) should be able to produce late spiking.", ) + +_Izhikevich2007_Corbit13 = Neuron( + parameters=""" + ### base parameters + C = 0 + k = 0 + v_r = 0 + v_t = 0 + a = 0 + b = 0 + c = 0 + d = 0 + v_peak = 30 + ### after spike current parameters + a_uu = 0 + dd = 0 + ### slow currents parameters + a_s = 0 + a_n = 0 + b_n = 0 + ### input current + I_app = 0 + ### synaptic current parameters + tau_ampa = 1 + tau_gaba = 1 + E_ampa = 0 + E_gaba = -90 + ### input current scaling + a_I = 1 + """, + equations=""" + dg_ampa/dt = -g_ampa/tau_ampa + dg_gaba/dt = -g_gaba/tau_gaba + I = a_I*(I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))) + + C * dv/dt = k*(v - v_r)*(v - v_t) - u - pos(uu*(v - E_gaba)) - n + I + du/dt = a*(b*(v - v_r) - u) + duu/dt = -a_uu*uu + + ds/dt = a_s*(I - s) + dn/dt = a_n*(b_n*(I - s) - n) + """, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + uu = uu + dd + """, + name="_Izhikevich2007_Corbit13", + description="Simple neuron model equations from Izhikevich (2007) adjusted version to fit the striatal FSI neuron model from Corbit et al. (2016) should be able to produce late spiking.", +) From baf249b63ec08000823f99d2f6485126bb2e36a1 Mon Sep 17 00:00:00 2001 From: olimaol Date: Fri, 15 Mar 2024 08:05:07 +0100 Subject: [PATCH 04/39] OptNeuron: run returns now all parameters from variable bounds in separate dict --- src/CompNeuroPy/opt_neuron.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 2bfd001..d277a9d 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -1323,6 +1323,7 @@ def run( best["std"] = fit["std"] best["results"] = fit["results"] best["results_soll"] = self.results_soll + best["parameters"] = self._get_final_parameters(best) self.results = best ### create loss history array @@ -1351,6 +1352,36 @@ def run( return best + def _get_final_parameters(self, best): + """ + Returns the final parameters as dictionary. + + Args: + best (dict): + dictionary containing the optimized parameters (as keys) and other keys. + + Returns: + final_parameters (dict): + dictionary containing all parameters of the variable bounds (as keys) + and their optimized values. + """ + final_parameters = {} + ### first all optimized variables (bounds=list) + for param_name, param_bounds in self.variables_bounds.items(): + if isinstance(param_bounds, list): + final_parameters[param_name] = best[param_name] + else: + final_parameters[param_name] = param_bounds + + ### now all string variables (bounds=str) + for param_name, param_bounds in self.variables_bounds.items(): + if isinstance(param_bounds, str): + final_parameters[param_name] = ef.evaluate_expression_with_dict( + param_bounds, final_parameters + ) + + return final_parameters + def _run_with_deap(self, max_evals, deap_plot_file): """ Runs the optimization with deap. @@ -1361,6 +1392,10 @@ def _run_with_deap(self, max_evals, deap_plot_file): deap_plot_file (str): the name of the figure which will be saved and shows the logbook + + Returns: + Dictionary containing the best parameters, the logbook, the last population + of individuals and the best fitness. """ return self._deap_cma.run( From bcfa757cb6ad72782d809f9ffda787256ed0ef00 Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 20 Mar 2024 12:53:30 +0100 Subject: [PATCH 05/39] extra_functions: get_spike_features_of_chunk: fixed bug with times --- src/CompNeuroPy/extra_functions.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index e102bee..44f529a 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -2250,11 +2250,11 @@ def get_spike_features_of_chunk(chunk: int, results: CompNeuroExp._ResultsCl): nbr_spikes = len(t) ### get time of 1st, 2nd, 3rd spike if nbr_spikes > 0: - time_1st_spike = t[0] + time_1st_spike = t[0] * results.recordings[chunk]["dt"] if nbr_spikes > 1: - time_2nd_spike = t[1] + time_2nd_spike = t[1] * results.recordings[chunk]["dt"] if nbr_spikes > 2: - time_3rd_spike = t[2] + time_3rd_spike = t[2] * results.recordings[chunk]["dt"] else: time_3rd_spike = None else: @@ -2266,12 +2266,12 @@ def get_spike_features_of_chunk(chunk: int, results: CompNeuroExp._ResultsCl): time_3rd_spike = None ### get time of last spike if nbr_spikes > 0: - time_last_spike = t[-1] + time_last_spike = t[-1] * results.recordings[chunk]["dt"] else: time_last_spike = None ### get CV of ISI if nbr_spikes > 1: - isi = np.diff(t) + isi = np.diff(t * results.recordings[chunk]["dt"]) cv_isi = np.std(isi) / np.mean(isi) else: cv_isi = None From 1505ada5b45cbe98cfdb9e1b5f4db49e8056a01b Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 20 Mar 2024 14:13:46 +0100 Subject: [PATCH 06/39] updated docs --- docs/examples/deap_cma.md | 156 +++++++++++++++++++++++++++ docs/main/optimize_neuron.md | 2 +- mkdocs.yml | 1 + src/CompNeuroPy/examples/deap_cma.py | 141 ++++++++++++++++++++++++ src/CompNeuroPy/extra_functions.py | 55 +++++++++- src/CompNeuroPy/opt_neuron.py | 16 +-- 6 files changed, 359 insertions(+), 12 deletions(-) create mode 100644 docs/examples/deap_cma.md create mode 100644 src/CompNeuroPy/examples/deap_cma.py diff --git a/docs/examples/deap_cma.md b/docs/examples/deap_cma.md new file mode 100644 index 0000000..1b45ab3 --- /dev/null +++ b/docs/examples/deap_cma.md @@ -0,0 +1,156 @@ +## Introduction +This example demonstrates how to use the DeapCma class to optimize parameters. + +## Code +```python +from CompNeuroPy import DeapCma +import numpy as np + + +### for DeapCma we need to define the evaluate_function +def evaluate_function(population): + """ + Calculate the loss for a population of individuals. + + Args: + population (np.ndarray): + population of individuals (i.e., parameter sets) to evaluate + + Returns: + loss_values (list[tuple]): + list of tuples, where each tuple contains the loss for an individual of the + population + """ + loss_list = [] + ### the population is a list of individuals + for individual in population: + ### the individual is a list of parameters + p0, p1, p2 = individual + ### calculate the loss of the individual + loss_of_individual = float((p0 - 3) ** 2 + (p1 - 7) ** 2 + (p2 - (-2)) ** 2) + ### insert the loss of the individual into the list of tuples + loss_list.append((loss_of_individual,)) + + return loss_list + + +def get_source_solutions(): + """ + DeapCma can use source solutions to initialize the optimization process. This + function returns an example of source solutions. + + Source solutions are a list of tuples, where each tuple contains the parameters of + an individual (np.ndarray) and its loss (float). + + Returns: + source_solutions (list[tuple]): + list of tuples, where each tuple contains the parameters of an individual + and its loss + """ + source_solutions_parameters = np.array( + [ + [1, 2, 3], + [3, 5, 3], + [5, 7, 3], + [7, 9, 3], + [9, 10, 3], + [-1, -2, -3], + [-3, -5, -3], + [-5, -7, -3], + [-7, -9, -3], + [-9, -10, -3], + ] + ) + source_solutions_losses = evaluate_function(source_solutions_parameters) + source_solutions = [ + (source_solutions_parameters[idx], source_solutions_losses[idx][0]) + for idx in range(len(source_solutions_parameters)) + ] + + return source_solutions + + +def main(): + ### define lower bounds of paramters to optimize + lb = np.array([-10, -10, -10]) + + ### define upper bounds of paramters to optimize + ub = np.array([10, 10, 10]) + + ### create an "minimal" instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + ) + + ### create an instance of the DeapCma class using all optional attributes + ### to initialize one could give a p0 array (same shape as lower and upper) or use + ### source solutions (as shown here) + deap_cma_optional = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + max_evals=1000, + p0=None, + param_names=["a", "b", "c"], + learn_rate_factor=0.5, + damping_factor=0.5, + verbose=False, + plot_file="logbook_optional.png", + cma_params_dict={}, + source_solutions=get_source_solutions(), + ) + + ### run the optimization, since max_evals was not defined during initialization of + ### the DeapCma instance, it has to be defined here + ### it automatically saves a plot file showing the loss over the generations + deap_cma_result = deap_cma.run(max_evals=1000) + + ### run the optimization with all optional attributes + deap_cma_optional_result = deap_cma_optional.run() + + ### print what deap_cma_result contains + print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + + ### print the best parameters and its loss, since we did not define the names of the + ### parameters during initialization of the DeapCma instance, the names are param0, + ### param1, param2 + best_param_dict = { + param_name: deap_cma_result[param_name] + for param_name in ["param0", "param1", "param2"] + } + print(f"Best parameters from first optimization: {best_param_dict}") + print( + f"Loss of best parameters from first optimization: {deap_cma_result['best_fitness']}" + ) + + ### print the same for the second optimization + best_param_dict = { + param_name: deap_cma_optional_result[param_name] + for param_name in ["a", "b", "c"] + } + print(f"Best parameters from second optimization: {best_param_dict}") + print( + f"Loss of best parameters from second optimization: {deap_cma_optional_result['best_fitness']}" + ) + + return 1 + + +if __name__ == "__main__": + main() +``` + +## Conosole Output +```console +$ python deap_cma.py +ANNarchy 4.7 (4.7.3b) on linux (posix). + 27%|█████████████████████████████████▊ | 266/1000 [00:00<00:00, 1618.63gen/s, best loss: 0.00000] + 44%|███████████████████████████████████████████████████████▎ | 436/1000 [00:00<00:00, 1576.76gen/s, best loss: 0.00000] +Dict from run function contains: ['param0', 'param1', 'param2', 'logbook', 'deap_pop', 'best_fitness'] +Best parameters from first optimization: {'param0': 3.0, 'param1': 7.0, 'param2': -2.0} +Loss of best parameters from first optimization: 0.0 +Best parameters from second optimization: {'a': 3.0, 'b': 7.0, 'c': -2.0} +Loss of best parameters from second optimization: 0.0 +``` \ No newline at end of file diff --git a/docs/main/optimize_neuron.md b/docs/main/optimize_neuron.md index 6dca039..91ba519 100644 --- a/docs/main/optimize_neuron.md +++ b/docs/main/optimize_neuron.md @@ -41,7 +41,7 @@ opt = OptNeuron( A full example is available in the [Examples](../examples/opt_neuron.md). ## Run the optimization -To run the optimization simply call the _run()_ function of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) object. +To run the optimization simply call the [_run()_](optimize_neuron.md#CompNeuroPy.opt_neuron.OptNeuron.run) function of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) object. This returns the optimized parameters and more. ## Define the experiment You have to define a [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) object containing a _run()_ function. In the _run()_ function simulations and recordings are performed. diff --git a/mkdocs.yml b/mkdocs.yml index 814b765..2d05224 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -74,4 +74,5 @@ nav: - Define Experiments: 'examples/experiment.md' - DBS Simulator: 'examples/dbs.md' - Optimize a neuron model: 'examples/opt_neuron.md' + - Cma Optimization: 'examples/deap_cma.md' - License: 'license.md' \ No newline at end of file diff --git a/src/CompNeuroPy/examples/deap_cma.py b/src/CompNeuroPy/examples/deap_cma.py new file mode 100644 index 0000000..54b1649 --- /dev/null +++ b/src/CompNeuroPy/examples/deap_cma.py @@ -0,0 +1,141 @@ +""" +This example demonstrates how to use the DeapCma class to optimize parameters. +""" + +from CompNeuroPy import DeapCma +import numpy as np + + +### for DeapCma we need to define the evaluate_function +def evaluate_function(population): + """ + Calculate the loss for a population of individuals. + + Args: + population (np.ndarray): + population of individuals (i.e., parameter sets) to evaluate + + Returns: + loss_values (list[tuple]): + list of tuples, where each tuple contains the loss for an individual of the + population + """ + loss_list = [] + ### the population is a list of individuals + for individual in population: + ### the individual is a list of parameters + p0, p1, p2 = individual + ### calculate the loss of the individual + loss_of_individual = float((p0 - 3) ** 2 + (p1 - 7) ** 2 + (p2 - (-2)) ** 2) + ### insert the loss of the individual into the list of tuples + loss_list.append((loss_of_individual,)) + + return loss_list + + +def get_source_solutions(): + """ + DeapCma can use source solutions to initialize the optimization process. This + function returns an example of source solutions. + + Source solutions are a list of tuples, where each tuple contains the parameters of + an individual (np.ndarray) and its loss (float). + + Returns: + source_solutions (list[tuple]): + list of tuples, where each tuple contains the parameters of an individual + and its loss + """ + source_solutions_parameters = np.array( + [ + [1, 2, 3], + [3, 5, 3], + [5, 7, 3], + [7, 9, 3], + [9, 10, 3], + [-1, -2, -3], + [-3, -5, -3], + [-5, -7, -3], + [-7, -9, -3], + [-9, -10, -3], + ] + ) + source_solutions_losses = evaluate_function(source_solutions_parameters) + source_solutions = [ + (source_solutions_parameters[idx], source_solutions_losses[idx][0]) + for idx in range(len(source_solutions_parameters)) + ] + + return source_solutions + + +def main(): + ### define lower bounds of paramters to optimize + lb = np.array([-10, -10, -10]) + + ### define upper bounds of paramters to optimize + ub = np.array([10, 10, 10]) + + ### create an "minimal" instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + ) + + ### create an instance of the DeapCma class using all optional attributes + ### to initialize one could give a p0 array (same shape as lower and upper) or use + ### source solutions (as shown here) + deap_cma_optional = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + max_evals=1000, + p0=None, + param_names=["a", "b", "c"], + learn_rate_factor=0.5, + damping_factor=0.5, + verbose=False, + plot_file="logbook_optional.png", + cma_params_dict={}, + source_solutions=get_source_solutions(), + ) + + ### run the optimization, since max_evals was not defined during initialization of + ### the DeapCma instance, it has to be defined here + ### it automatically saves a plot file showing the loss over the generations + deap_cma_result = deap_cma.run(max_evals=1000) + + ### run the optimization with all optional attributes + deap_cma_optional_result = deap_cma_optional.run() + + ### print what deap_cma_result contains + print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + + ### print the best parameters and its loss, since we did not define the names of the + ### parameters during initialization of the DeapCma instance, the names are param0, + ### param1, param2 + best_param_dict = { + param_name: deap_cma_result[param_name] + for param_name in ["param0", "param1", "param2"] + } + print(f"Best parameters from first optimization: {best_param_dict}") + print( + f"Loss of best parameters from first optimization: {deap_cma_result['best_fitness']}" + ) + + ### print the same for the second optimization + best_param_dict = { + param_name: deap_cma_optional_result[param_name] + for param_name in ["a", "b", "c"] + } + print(f"Best parameters from second optimization: {best_param_dict}") + print( + f"Loss of best parameters from second optimization: {deap_cma_optional_result['best_fitness']}" + ) + + return 1 + + +if __name__ == "__main__": + main() diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 44f529a..2da825a 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -683,13 +683,52 @@ class DeapCma: """ Class to run the deap Covariance Matrix Adaptation Evolution Strategy optimization. + Using the [CMAES](https://deap.readthedocs.io/en/master/api/algo.html#module-deap.cma) algorithm from [deap](https://github.com/deap/deap) + + * Fortin, F. A., De Rainville, F. M., Gardner, M. A. G., Parizeau, M., & Gagné, C. (2012). DEAP: Evolutionary algorithms made easy. The Journal of Machine Learning Research, 13(1), 2171-2175. [pdf](https://www.jmlr.org/papers/volume13/fortin12a/fortin12a.pdf) + Attributes: deap_dict (dict): Dictionary containing the toolbox, the hall of fame, the statistics, the lower and upper bounds, the parameter names, the inverse scaler and the strategy. + + Examples: + For complete example see [here](../examples/deap_cma.md) + ```python + from CompNeuroPy import DeapCma + import numpy as np + + + ### for DeapCma we need to define the evaluate_function + def evaluate_function(population): + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = float(individual[0] + individual[1] + individual[2]) + loss_list.append((loss_of_individual,)) + return loss_list + + + ### define lower bounds of paramters to optimize + lb = np.array([0, 0, 0]) + + ### define upper bounds of paramters to optimize + ub = np.array([10, 10, 10]) + + ### create an "minimal" instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + ) + + ### run the optimization + deap_cma_result = deap_cma.run(max_evals=1000) + ``` """ + @check_types() def __init__( self, lower: np.ndarray, @@ -722,7 +761,8 @@ def __init__( Initial guess for the parameters. By default the mean of lower and upper bounds. param_names (None | list[str], optional): - Names of the parameters. By default None. + Names of the parameters. By default None, i.e. the parameters are named + "param0", "param1", ... learn_rate_factor (float, optional): Learning rate factor (decrease -> slower). By default 1. damping_factor (float, optional): @@ -737,7 +777,10 @@ def __init__( details source_solutions (list[tuple[np.ndarray, float]], optional): List of tuples with the parameters and losses of source solutions. These - solutions are used to initialize the covariance matrix. By default []. + solutions are used to initialize the covariance matrix. Using source + solutions ignores the initial guess p0 and sets the cma parameter + 'cmatrix' (which will also be ignored if given in cma_params_dict). By + default []. """ ### store attributes self.max_evals = max_evals @@ -893,8 +936,10 @@ def run( Returns: best (dict): - Dictionary containing the best parameters, the logbook, the last population - of individuals and the best fitness. + Dictionary containing the best parameters (as key and value pairs), + the logbook of the optimization (key = 'logbook'), the last population + of individuals (key = 'deap_pop') and the best fitness (key = + 'best_fitness'). """ ### get attributes @@ -2241,7 +2286,7 @@ def get_spike_features_of_chunk(chunk: int, results: CompNeuroExp._ResultsCl): results of the experiment Returns: - dict: + spike_features (dict): dictionary with the features of the spikes """ ### get number of spikes diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 99db7f3..8100ded 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -1318,12 +1318,16 @@ def run( best (dict): dictionary containing the optimized parameters (as keys) and: - - "loss": the loss - - "all_loss": the individual losses of the get_loss_function - - "std": the SD of the loss (in case of noisy models with multiple - runs per loss calculation) - - "results": the results generated by the experiment - - "results_soll": the target results + - "loss" (float): the loss (of best run) + - "all_loss" (list): the individual losses of the get_loss_function + - "std" (float): the SD of the loss (in case of noisy models with + multiple runs per loss calculation) + - "results" (CompNeuroExp._ResultsCl): the results generated by the + experiment + - "results_soll" (as given by the user or CompNeuroExp._ResultsCl): the + target results + - "parameters" (dict): all parameters given in the variable bounds and + their optimized values """ self.verbose = False self.verbose_run = verbose From 41fa0033481f6452db89dfe9e6dca107988c798e Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 21 Mar 2024 10:17:02 +0100 Subject: [PATCH 07/39] DeapCma: - sig0 and hard_bounds can now be specified - improved verbose --- docs/examples/deap_cma.md | 93 +++++++++++++++------------- src/CompNeuroPy/examples/deap_cma.py | 64 +++++++++---------- src/CompNeuroPy/extra_functions.py | 79 +++++++++++++++++++---- 3 files changed, 145 insertions(+), 91 deletions(-) diff --git a/docs/examples/deap_cma.md b/docs/examples/deap_cma.md index 1b45ab3..7d4a1d1 100644 --- a/docs/examples/deap_cma.md +++ b/docs/examples/deap_cma.md @@ -34,7 +34,7 @@ def evaluate_function(population): return loss_list -def get_source_solutions(): +def get_source_solutions(lb, ub): """ DeapCma can use source solutions to initialize the optimization process. This function returns an example of source solutions. @@ -47,35 +47,28 @@ def get_source_solutions(): list of tuples, where each tuple contains the parameters of an individual and its loss """ - source_solutions_parameters = np.array( - [ - [1, 2, 3], - [3, 5, 3], - [5, 7, 3], - [7, 9, 3], - [9, 10, 3], - [-1, -2, -3], - [-3, -5, -3], - [-5, -7, -3], - [-7, -9, -3], - [-9, -10, -3], - ] - ) + ### create random solutions + source_solutions_parameters = np.random.uniform(0, 1, (100, 3)) * (ub - lb) + lb + ### evaluate the random solutions source_solutions_losses = evaluate_function(source_solutions_parameters) + ### create a list of tuples, where each tuple contains the parameters of an + ### individual and its loss source_solutions = [ (source_solutions_parameters[idx], source_solutions_losses[idx][0]) for idx in range(len(source_solutions_parameters)) ] + ### only use the best 10 as source solutions + source_solutions = sorted(source_solutions, key=lambda x: x[1])[:10] return source_solutions def main(): ### define lower bounds of paramters to optimize - lb = np.array([-10, -10, -10]) + lb = np.array([-10, -10, 0]) ### define upper bounds of paramters to optimize - ub = np.array([10, 10, 10]) + ub = np.array([10, 15, 5]) ### create an "minimal" instance of the DeapCma class deap_cma = DeapCma( @@ -85,21 +78,23 @@ def main(): ) ### create an instance of the DeapCma class using all optional attributes - ### to initialize one could give a p0 array (same shape as lower and upper) or use - ### source solutions (as shown here) + ### to initialize one could give a p0 array (same shape as lower and upper) and a + ### sig0 value or use source solutions (as shown here) deap_cma_optional = DeapCma( lower=lb, upper=ub, evaluate_function=evaluate_function, max_evals=1000, p0=None, + sig0=None, param_names=["a", "b", "c"], - learn_rate_factor=0.5, - damping_factor=0.5, - verbose=False, + learn_rate_factor=1, + damping_factor=1, + verbose=True, plot_file="logbook_optional.png", cma_params_dict={}, - source_solutions=get_source_solutions(), + source_solutions=get_source_solutions(lb=lb, ub=ub), + hard_bounds=True, ) ### run the optimization, since max_evals was not defined during initialization of @@ -108,32 +103,29 @@ def main(): deap_cma_result = deap_cma.run(max_evals=1000) ### run the optimization with all optional attributes - deap_cma_optional_result = deap_cma_optional.run() - - ### print what deap_cma_result contains - print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + deap_cma_optional_result = deap_cma_optional.run(verbose=False) ### print the best parameters and its loss, since we did not define the names of the ### parameters during initialization of the DeapCma instance, the names are param0, - ### param1, param2 + ### param1, param2, also print everything that is in the dict returned by the run best_param_dict = { param_name: deap_cma_result[param_name] for param_name in ["param0", "param1", "param2"] } - print(f"Best parameters from first optimization: {best_param_dict}") - print( - f"Loss of best parameters from first optimization: {deap_cma_result['best_fitness']}" - ) + print("\nFirst (minimal) optimization:") + print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_result['best_fitness']}\n") ### print the same for the second optimization best_param_dict = { param_name: deap_cma_optional_result[param_name] for param_name in ["a", "b", "c"] } - print(f"Best parameters from second optimization: {best_param_dict}") - print( - f"Loss of best parameters from second optimization: {deap_cma_optional_result['best_fitness']}" - ) + print("Second optimization (with all optional attributes):") + print(f"Dict from run function contains: {list(deap_cma_optional_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_optional_result['best_fitness']}") return 1 @@ -146,11 +138,28 @@ if __name__ == "__main__": ```console $ python deap_cma.py ANNarchy 4.7 (4.7.3b) on linux (posix). - 27%|█████████████████████████████████▊ | 266/1000 [00:00<00:00, 1618.63gen/s, best loss: 0.00000] - 44%|███████████████████████████████████████████████████████▎ | 436/1000 [00:00<00:00, 1576.76gen/s, best loss: 0.00000] +Starting optimization with: +centroid: [4.57628308 7.39815401 1.30602549], (scaled: [0.72881415 0.69592616 0.2612051 ]) +sigma: [2.90435163 3.63043954 0.72608791], (scaled: 0.14521758155307307) +lambda (The number of children to produce at each generation): 7 +mu (The number of parents to keep from the lambda children): 3 +weights: [0.63704257 0.28457026 0.07838717] +mueff: 2.0286114646100617 +ccum (Cumulation constant for covariance matrix.): 0.5714285714285714 +cs (Cumulation constant for step-size): 0.5017818438926943 +ccov1 (Learning rate for rank-one update): 0.09747248265066792 +ccovmu (Learning rate for rank-mu update): 0.038593139193450914 +damps (Damping for step-size): 1.5017818438926942 + 24%|██████████████████████████████▏ | 238/1000 [00:00<00:00, 1265.35gen/s, best loss: 0.00000] + 17%|█████████████████████ | 166/1000 [00:00<00:00, 1369.98gen/s, best loss: 4.00000] + +First (minimal) optimization: Dict from run function contains: ['param0', 'param1', 'param2', 'logbook', 'deap_pop', 'best_fitness'] -Best parameters from first optimization: {'param0': 3.0, 'param1': 7.0, 'param2': -2.0} -Loss of best parameters from first optimization: 0.0 -Best parameters from second optimization: {'a': 3.0, 'b': 7.0, 'c': -2.0} -Loss of best parameters from second optimization: 0.0 +Best parameters: {'param0': 3.0, 'param1': 7.0, 'param2': -2.0} +Loss of best parameters: 0.0 + +Second optimization (with all optional attributes): +Dict from run function contains: ['a', 'b', 'c', 'logbook', 'deap_pop', 'best_fitness'] +Best parameters: {'a': 3.000000004587328, 'b': 6.999999980571925, 'c': 0.0} +Loss of best parameters: 4.0 ``` \ No newline at end of file diff --git a/src/CompNeuroPy/examples/deap_cma.py b/src/CompNeuroPy/examples/deap_cma.py index 54b1649..14bb8e7 100644 --- a/src/CompNeuroPy/examples/deap_cma.py +++ b/src/CompNeuroPy/examples/deap_cma.py @@ -33,7 +33,7 @@ def evaluate_function(population): return loss_list -def get_source_solutions(): +def get_source_solutions(lb, ub): """ DeapCma can use source solutions to initialize the optimization process. This function returns an example of source solutions. @@ -46,35 +46,28 @@ def get_source_solutions(): list of tuples, where each tuple contains the parameters of an individual and its loss """ - source_solutions_parameters = np.array( - [ - [1, 2, 3], - [3, 5, 3], - [5, 7, 3], - [7, 9, 3], - [9, 10, 3], - [-1, -2, -3], - [-3, -5, -3], - [-5, -7, -3], - [-7, -9, -3], - [-9, -10, -3], - ] - ) + ### create random solutions + source_solutions_parameters = np.random.uniform(0, 1, (100, 3)) * (ub - lb) + lb + ### evaluate the random solutions source_solutions_losses = evaluate_function(source_solutions_parameters) + ### create a list of tuples, where each tuple contains the parameters of an + ### individual and its loss source_solutions = [ (source_solutions_parameters[idx], source_solutions_losses[idx][0]) for idx in range(len(source_solutions_parameters)) ] + ### only use the best 10 as source solutions + source_solutions = sorted(source_solutions, key=lambda x: x[1])[:10] return source_solutions def main(): ### define lower bounds of paramters to optimize - lb = np.array([-10, -10, -10]) + lb = np.array([-10, -10, 0]) ### define upper bounds of paramters to optimize - ub = np.array([10, 10, 10]) + ub = np.array([10, 15, 5]) ### create an "minimal" instance of the DeapCma class deap_cma = DeapCma( @@ -84,21 +77,23 @@ def main(): ) ### create an instance of the DeapCma class using all optional attributes - ### to initialize one could give a p0 array (same shape as lower and upper) or use - ### source solutions (as shown here) + ### to initialize one could give a p0 array (same shape as lower and upper) and a + ### sig0 value or use source solutions (as shown here) deap_cma_optional = DeapCma( lower=lb, upper=ub, evaluate_function=evaluate_function, max_evals=1000, p0=None, + sig0=None, param_names=["a", "b", "c"], - learn_rate_factor=0.5, - damping_factor=0.5, - verbose=False, + learn_rate_factor=1, + damping_factor=1, + verbose=True, plot_file="logbook_optional.png", cma_params_dict={}, - source_solutions=get_source_solutions(), + source_solutions=get_source_solutions(lb=lb, ub=ub), + hard_bounds=True, ) ### run the optimization, since max_evals was not defined during initialization of @@ -107,32 +102,29 @@ def main(): deap_cma_result = deap_cma.run(max_evals=1000) ### run the optimization with all optional attributes - deap_cma_optional_result = deap_cma_optional.run() - - ### print what deap_cma_result contains - print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + deap_cma_optional_result = deap_cma_optional.run(verbose=False) ### print the best parameters and its loss, since we did not define the names of the ### parameters during initialization of the DeapCma instance, the names are param0, - ### param1, param2 + ### param1, param2, also print everything that is in the dict returned by the run best_param_dict = { param_name: deap_cma_result[param_name] for param_name in ["param0", "param1", "param2"] } - print(f"Best parameters from first optimization: {best_param_dict}") - print( - f"Loss of best parameters from first optimization: {deap_cma_result['best_fitness']}" - ) + print("\nFirst (minimal) optimization:") + print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_result['best_fitness']}\n") ### print the same for the second optimization best_param_dict = { param_name: deap_cma_optional_result[param_name] for param_name in ["a", "b", "c"] } - print(f"Best parameters from second optimization: {best_param_dict}") - print( - f"Loss of best parameters from second optimization: {deap_cma_optional_result['best_fitness']}" - ) + print("Second optimization (with all optional attributes):") + print(f"Dict from run function contains: {list(deap_cma_optional_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_optional_result['best_fitness']}") return 1 diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 2da825a..1ae6deb 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -736,6 +736,7 @@ def __init__( evaluate_function: Callable, max_evals: None | int = None, p0: None | np.ndarray = None, + sig0: None | float = None, param_names: None | list[str] = None, learn_rate_factor: float = 1, damping_factor: float = 1, @@ -743,6 +744,7 @@ def __init__( plot_file: None | str = "logbook.png", cma_params_dict: dict = {}, source_solutions: list[tuple[np.ndarray, float]] = [], + hard_bounds: bool = False, ): """ @@ -760,6 +762,10 @@ def __init__( p0 (None | np.ndarray, optional): Initial guess for the parameters. By default the mean of lower and upper bounds. + sig0 (None | float, optional): + Initial guess for the standard deviation of the parameters. It will be + scaled by the range of the parameters. By default 0.25, i.e. 25% of the + range (for each parameter). param_names (None | list[str], optional): Names of the parameters. By default None, i.e. the parameters are named "param0", "param1", ... @@ -781,6 +787,9 @@ def __init__( solutions ignores the initial guess p0 and sets the cma parameter 'cmatrix' (which will also be ignored if given in cma_params_dict). By default []. + hard_bounds (bool, optional): + Whether or not to use hard bounds (parmeters are clipped to lower and + upper bounds). By default False. """ ### store attributes self.max_evals = max_evals @@ -788,6 +797,7 @@ def __init__( self.upper = upper self.evaluate_function = evaluate_function self.p0 = p0 + self.sig0 = sig0 self.param_names = param_names self.learn_rate_factor = learn_rate_factor self.damping_factor = damping_factor @@ -795,6 +805,7 @@ def __init__( self.plot_file = plot_file self.cma_params_dict = cma_params_dict self.source_solutions = source_solutions + self.hard_bounds = hard_bounds ### prepare the optimization self.deap_dict = self._prepare() @@ -815,6 +826,7 @@ def _prepare(self): upper = self.upper evaluate_function = self.evaluate_function p0 = self.p0 + sig0 = self.sig0 param_names = self.param_names learn_rate_factor = self.learn_rate_factor damping_factor = self.damping_factor @@ -825,12 +837,18 @@ def _prepare(self): upper_orig = deepcopy(upper) lower_orig = deepcopy(lower) - def scaler(x): - return (x - lower_orig) / (upper_orig - lower_orig) + def scaler(x, diff=False): + if not diff: + return (x - lower_orig) / (upper_orig - lower_orig) + else: + return x / (upper_orig - lower_orig) ### create inverse scaler to scale parameters back into original range [lower,upper] - def inv_scaler(x): - return x * (upper_orig - lower_orig) + lower_orig + def inv_scaler(x, diff=False): + if not diff: + return x * (upper_orig - lower_orig) + lower_orig + else: + return x * (upper_orig - lower_orig) ### scale upper and lower bounds lower = scaler(lower) @@ -861,9 +879,22 @@ def inv_scaler(x): gamma=1, ) cma_params_dict["cmatrix"] = cmatrix + + if self.hard_bounds: + ### clip centroid to [0,1] + centroid = np.clip(centroid, 0, 1) else: - centroid = (lower + upper) / 2 if isinstance(p0, type(None)) else scaler(p0) - sigma = (upper - lower) / 4 + ### lower + upper / 2 is always 0.5 since lower and upper are scaled + centroid = ( + (lower + upper) / 2 + if isinstance(p0, type(None)) + else ( + scaler(np.clip(p0, lower, upper)) + if self.hard_bounds + else scaler(p0) + ) + ) + sigma = 0.25 if isinstance(sig0, type(None)) else sig0 ### create the strategy strategy = cma.Strategy( @@ -872,6 +903,11 @@ def inv_scaler(x): **cma_params_dict, ) + if verbose: + print( + f"Starting optimization with:\ncentroid: {inv_scaler(strategy.centroid)}, (scaled: {strategy.centroid})\nsigma: {inv_scaler(strategy.sigma,diff=True)}, (scaled: {strategy.sigma})" + ) + ### slow down the learning rate and increase the damping strategy.ccov1 *= learn_rate_factor strategy.ccovmu *= learn_rate_factor @@ -912,6 +948,7 @@ def inv_scaler(x): "param_names": param_names, "inv_scaler": inv_scaler, "strategy": strategy, + "hard_bounds": self.hard_bounds, } def run( @@ -1027,13 +1064,17 @@ def _deap_ea_generate_update( stats = deap_dict["stats"] halloffame = deap_dict["hof"] strategy = deap_dict["strategy"] + hard_bounds = deap_dict["hard_bounds"] ### init logbook logbook = tools.Logbook() logbook.header = ["gen", "nevals"] + (stats.fields if stats else []) ### define progress bar - progress_bar = tqdm(range(ngen), total=ngen, unit="gen") + if verbose: + progress_bar = range(ngen) + else: + progress_bar = tqdm(range(ngen), total=ngen, unit="gen") early_stop = False ### loop over generations @@ -1042,9 +1083,10 @@ def _deap_ea_generate_update( population = toolbox.generate() ### clip individuals of population to variable bounds ### TODO only if bounds are hard - for ind in population: - for idx, val in enumerate(ind): - ind[idx] = np.clip(val, lower[idx], upper[idx]) + if hard_bounds: + for ind in population: + for idx, val in enumerate(ind): + ind[idx] = np.clip(val, lower[idx], upper[idx]) ### Evaluate the individuals (here whole population at once) ### scale parameters back into original range [lower,upper] population_inv_scaled = [inv_scaler(ind) for ind in deepcopy(population)] @@ -1074,12 +1116,23 @@ def _deap_ea_generate_update( record = stats.compile(population) if stats is not None else {} logbook.record(gen=gen, nevals=len(population), **record) if verbose: + ### print logbook print(logbook.stream) + ### print evaluated individuals and their fitnesses + print_dict = { + f"ind_{idx}": list(ind) + for idx, ind in enumerate(deepcopy(population_inv_scaled)) + } + for idx, key in enumerate(print_dict): + print_dict[key].append(fitnesses[idx][0]) + print_df(print_dict) + print("") ### update progress bar with current best loss - progress_bar.set_postfix_str( - f"best loss: {halloffame[0].fitness.values[0]:.5f}" - ) + if not verbose: + progress_bar.set_postfix_str( + f"best loss: {halloffame[0].fitness.values[0]:.5f}" + ) if early_stop and verbose: print("Stopping because convergence is reached.") From efa030c7e902a4e886cd6ce5d5df8c35d50286ab Mon Sep 17 00:00:00 2001 From: olimaol Date: Fri, 22 Mar 2024 15:08:23 +0100 Subject: [PATCH 08/39] implemented test for model_configurator adjusted interactive_plot --- .../examples/model_configurator/test.py | 193 ++++++++++++++++++ src/CompNeuroPy/extra_functions.py | 32 ++- 2 files changed, 221 insertions(+), 4 deletions(-) create mode 100644 src/CompNeuroPy/examples/model_configurator/test.py diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py new file mode 100644 index 0000000..947dd52 --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -0,0 +1,193 @@ +from ANNarchy import Neuron, Population, compile, simulate, get_time, setup, dt +from CompNeuroPy import ( + CompNeuroMonitors, + PlotRecordings, + interactive_plot, + timing_decorator, +) +from CompNeuroPy.neuron_models import Izhikevich2007 +import numpy as np + +setup(dt=0.1) + +neuron = Neuron( + parameters=""" + C = 100.0, + k = 0.7, + v_r = -60.0, + v_t = -40.0, + a = 0.03, + b = -2.0, + c = -50.0, + d = 100.0, + v_peak = 35.0, + I_app = 0.0, + tau = 300 + """, + equations=""" + ### Izhikevich spiking + I_v = I_app + C * dv/dt = k*(v - v_r)*(v - v_t) - u + I_v + du/dt = a*(b*(v - v_r) - u) + ### Spike tracking + tau * dspike_track/dt = - spike_track + ### I tracking + tau * dI_track/dt = - I_track + I_v + """, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + spike_track = 1.0 + """, +) + +pop = Population(100, neuron=neuron, name="pop") + + +monitors = CompNeuroMonitors( + mon_dict={"pop;1": ["I_v", "spike_track", "I_track", "spike"]} +) + +compile() + +monitors.start() + +### create an array with amplitudes between -200 and 200 +I_app_arr = np.arange(-200, 200, 5) + +### create an array with durations between 10 ms and 200 ms +duration_arr = np.arange(10, 200 + 10, 10) + +### TODO alwys draw random duration from duration_arr and set I_app of the whole population to the shuffeled I_app_arr +### --> I_app_arr is the size of the population +### --> or just draw from I_app_arr for each neuron... and the population is as large as we want... maybe better +total_duration = 1000 +duration_list = [] +while sum(duration_list) < total_duration: + duration_list.append(np.random.choice(duration_arr)) + +for duration in duration_list: + pop.I_app = np.random.choice(I_app_arr, size=pop.size) + simulate(duration) + +recordings = monitors.get_recordings() + +### concatenate the recorded arrays of all neurons +I_v = np.concatenate(recordings[0]["pop;I_v"].T) +spike_track = np.concatenate(recordings[0]["pop;spike_track"].T) +I_track = np.concatenate(recordings[0]["pop;I_track"].T) +### spikesw vom ersten neuron dann spiekes vom zweiten + simulierte gesamtzeit dann spikes vom dritten + simulierte gesamtzeit *2 +spike_times = np.concatenate( + [ + dt() * np.array(recordings[0]["pop;spike"][i]) + i * sum(duration_list) + for i in range(pop.size) + ] +) +### round spike times to full ms to be compatible with the other recording arrays +spike_times = np.round(spike_times, 0) +spikes_onehot = np.zeros(I_track.size) +spikes_onehot[spike_times.astype(int)] = 1 + + +def create_plot(axs, sliders): + + end_time = int(sliders[0]["slider"].val) + print(end_time) + + ### plot the variables + ### I tracking + axs[0].plot(I_v[end_time - 1000 : end_time], label="I_v") + axs[0].plot(I_track[end_time - 1000 : end_time], label="I_track") + axs[0].set_ylim(-200, 200) + axs[0].set_xlim(0, 1000) + axs[0].legend(loc="upper left") + ### spike tracking + axs[1].plot(spike_track[end_time - 1000 : end_time], label="spike_track") + axs[1].plot(spikes_onehot[end_time - 1000 : end_time], label="spikes") + axs[1].set_ylim(0, 1) + axs[1].set_xlim(0, 1000) + axs[1].legend(loc="upper left") + + +interactive_plot( + nrows=2, + ncols=1, + sliders=[ + {"label": "end time", "valmin": 1000, "valmax": I_track.size, "valinit": 1000}, + ], + create_plot=create_plot, +) + +### TODO: data looks good now train categorization model to predict spikes + + +# max_len = 1000 +# t_list = list(range(-max_len, 0, 1)) +# spike_track_list = [0] * max_len +# I_track_list = [0] * max_len +# I_v_list = [0] * max_len + + +# def track_var(var_list, var_name): +# """ +# Track a variable of the population pop. The current variable value is stored in the +# last element of the var_list. The first element is removed. +# """ +# var_list.append(getattr(pop, var_name)[0]) +# var_list.pop(0) + + +# def create_plot(axs, sliders, **kwargs): + +# ### update the rates variable +# pop.I_app = sliders[0]["slider"].val + +# ### plot the variables +# ### spike tracking +# axs[0].plot(kwargs["t_list"], kwargs["spike_track_list"], label="f") +# axs[0].set_ylim(0, 1) +# axs[0].set_xlim(kwargs["t_list"][0], kwargs["t_list"][-1]) +# ### I tracking +# axs[1].plot(kwargs["t_list"], kwargs["I_track_list"], label="f_0") +# axs[1].plot(kwargs["t_list"], kwargs["I_v_list"], label="f_1") +# axs[1].set_ylim(0, sliders[0]["slider"].val + 20) +# axs[1].set_xlim(kwargs["t_list"][0], kwargs["t_list"][-1]) +# ### legend +# axs[1].legend(loc="upper left") + + +# def update_loop(**kwargs): +# simulate(1.0) +# ### update the variable lists +# track_var(kwargs["spike_track_list"], "spike_track") +# track_var(kwargs["I_track_list"], "I_track") +# track_var(kwargs["I_v_list"], "I_v") +# ### update the time list +# kwargs["t_list"].append(get_time()) +# kwargs["t_list"].pop(0) + + +# interactive_plot( +# nrows=2, +# ncols=1, +# sliders=[ +# {"label": "I_app", "valmin": 0.0, "valmax": 200.0, "valinit": 0.0}, +# ], +# create_plot=lambda axs, sliders: create_plot( +# axs, +# sliders, +# spike_track_list=spike_track_list, +# I_track_list=I_track_list, +# I_v_list=I_v_list, +# t_list=t_list, +# ), +# update_loop=lambda: update_loop( +# spike_track_list=spike_track_list, +# I_track_list=I_track_list, +# I_v_list=I_v_list, +# t_list=t_list, +# ), +# figure_frequency=20, +# update_frequency=100, +# ) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 1ae6deb..36a3f9d 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -32,6 +32,7 @@ from screeninfo import get_monitors import cmaes import efel +from time import time def print_df(df: pd.DataFrame | dict, **kwargs): @@ -2087,6 +2088,9 @@ def interactive_plot( ncols: int, sliders: list[dict], create_plot: Callable, + update_loop: Callable | None = None, + figure_frequency: float = 20.0, + update_frequency: float = np.inf, ): """ Create an interactive plot with sliders. @@ -2111,6 +2115,13 @@ def interactive_plot( and sliders is the given sliders list with newly added keys "ax" (axes of the slider) and "slider" (the Slider object itself, so that you can access the slider values in the create_plot function using the .val attribute) + update_loop (Callable, optional): + Function which is called periodically. After each call the plot is updated. + If None, the plot is only updated when a slider is changed. Default is None. + figure_frequency (float, optional): + Frequency of the figure update in Hz. Default is 20.0. + update_frequency (float, optional): + Frequency of the update loop in Hz. Default is np.inf. Examples: ```python @@ -2137,8 +2148,6 @@ def update(axs, sliders): ax.cla() ### recreate the plot create_plot(axs, sliders) - ### redraw the canvas - fig.canvas.draw_idle() ### create the figure as large as the screen screen_width, screen_height = get_monitors()[0].width, get_monitors()[0].height @@ -2194,8 +2203,23 @@ def update(axs, sliders): ] ) - ### show the plot - plt.show() + if update_loop is None: + ### show the plot + plt.show() + else: + ### run update loop until figure is closed + figure_pause = 1 / figure_frequency + max_updates_per_pause = update_frequency / figure_frequency + while plt.fignum_exists(fig.number): + ### update figure + update(axs, sliders) + plt.pause(figure_pause) + ### in between do the update loop multiple times + start = time() + nr_updates = 0 + while time() - start < figure_pause and nr_updates < max_updates_per_pause: + update_loop() + nr_updates += 1 def efel_loss(trace1, trace2, feature_list): From a8c165d502fae1f31be6bf194719b8b207ab595b Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 28 Mar 2024 15:57:09 +0100 Subject: [PATCH 09/39] simulation_funcitons: new class SimulationEvents extra_functions: reqorked InteractivePlot --- src/CompNeuroPy/__init__.py | 4 +- src/CompNeuroPy/dbs.py | 62 ++- src/CompNeuroPy/experiment.py | 2 +- src/CompNeuroPy/extra_functions.py | 342 +++++++----- .../final_models/artificial_nm.py | 4 +- src/CompNeuroPy/simulation_functions.py | 498 +++++++++++++++++- src/CompNeuroPy/system_functions.py | 6 +- 7 files changed, 748 insertions(+), 170 deletions(-) diff --git a/src/CompNeuroPy/__init__.py b/src/CompNeuroPy/__init__.py index 2ba694e..f389456 100644 --- a/src/CompNeuroPy/__init__.py +++ b/src/CompNeuroPy/__init__.py @@ -30,7 +30,8 @@ evaluate_expression_with_dict, VClampParamSearch, DeapCma, - interactive_plot, + interactive_plot, # TODO remove + InteractivePlot, data_obj, # TODO remove my_linear_cmap_obj, # TODO remove decision_tree, # TODO remove @@ -51,6 +52,7 @@ attribute_step, attr_ramp, increasing_attr, + SimulationEvents, ) from CompNeuroPy.system_functions import ( clear_dir, diff --git a/src/CompNeuroPy/dbs.py b/src/CompNeuroPy/dbs.py index e60d810..e0643d8 100644 --- a/src/CompNeuroPy/dbs.py +++ b/src/CompNeuroPy/dbs.py @@ -102,9 +102,11 @@ def __init__( ### self.axon_rate_amp is None --> use the axon_rate_amp_pop_name_list and axon_rate_amp_value_list to create the dict self.axon_rate_amp: dict[Population | str, float] = { ### key is either a Populaiton or the string "default" - get_population(pop_name[4:]) - if pop_name.startswith("pop;") - else pop_name: axon_rate_amp_val + ( + get_population(pop_name[4:]) + if pop_name.startswith("pop;") + else pop_name + ): axon_rate_amp_val for pop_name, axon_rate_amp_val in zip( self.axon_rate_amp_pop_name_list, self.axon_rate_amp_value_list ) @@ -148,9 +150,11 @@ def analyze_model( ### if key is a Population, use the name of the Population and prepend pop; ### if key is the string "default", use the string self.axon_rate_amp_pop_name_list = [ - f"pop;{axon_rate_amp_key.name}" - if isinstance(axon_rate_amp_key, Population) - else axon_rate_amp_key + ( + f"pop;{axon_rate_amp_key.name}" + if isinstance(axon_rate_amp_key, Population) + else axon_rate_amp_key + ) for axon_rate_amp_key in axon_rate_amp.keys() ] self.axon_rate_amp_value_list = list(axon_rate_amp.values()) @@ -284,9 +288,9 @@ def analyze_projections(self): ] ### get the parameters of the connector function - connector_function_parameter_dict[ - proj.name - ] = self.get_connector_parameters(proj) + connector_function_parameter_dict[proj.name] = ( + self.get_connector_parameters(proj) + ) ### get the names of the pre- and post-synaptic populations pre_post_pop_name_dict[proj.name] = (proj.pre.name, proj.post.name) @@ -531,9 +535,9 @@ def add_DBS_to_spiking_neuron_model(self, neuron_model_init_parameter_dict): ) ### 3rd add axon spike term - neuron_model_init_parameter_dict[ - "axon_spike" - ] = "pulse(t)*dbs_on*unif_var_dbs1 > 1-prob_axon_spike" + neuron_model_init_parameter_dict["axon_spike"] = ( + "pulse(t)*dbs_on*unif_var_dbs1 > 1-prob_axon_spike" + ) ### 4th add axon reset term neuron_model_init_parameter_dict[ @@ -544,9 +548,9 @@ def add_DBS_to_spiking_neuron_model(self, neuron_model_init_parameter_dict): """ ### 5th extend description - neuron_model_init_parameter_dict[ - "description" - ] = f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + ) return neuron_model_init_parameter_dict @@ -677,9 +681,9 @@ def add_DBS_to_rate_coded_neuron_model(self, neuron_model_init_parameter_dict): ) ### 3rd extend description - neuron_model_init_parameter_dict[ - "description" - ] = f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + ) return neuron_model_init_parameter_dict @@ -795,14 +799,14 @@ def add_DBS_to_spiking_synapse_model(self, synapse_init_parameter_dict): synapse_init_parameter_dict["equations"] = "\n".join(equations_line_split_list) ### 3rd add pre_axon_spike - synapse_init_parameter_dict[ - "pre_axon_spike" - ] = "g_target+=ite(unif_var_dbs>> + # create DBS stimulator dbs = DBSstimulator( stimulated_population=population1, diff --git a/src/CompNeuroPy/experiment.py b/src/CompNeuroPy/experiment.py index 30b22ce..39ac3b7 100644 --- a/src/CompNeuroPy/experiment.py +++ b/src/CompNeuroPy/experiment.py @@ -20,7 +20,7 @@ class CompNeuroExp: data (dict): dict for storing optional data - Examples: + Example: ```python from CompNeuroPy import CompNeuroExp from ANNarchy import simulate diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 36a3f9d..da2f144 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -32,7 +32,9 @@ from screeninfo import get_monitors import cmaes import efel -from time import time +import time +import threading +from matplotlib.animation import FuncAnimation def print_df(df: pd.DataFrame | dict, **kwargs): @@ -106,7 +108,7 @@ def suppress_stdout(): """ Suppresses the print output of a function - Examples: + Example: ```python with suppress_stdout(): print("this will not be printed") @@ -694,8 +696,8 @@ class DeapCma: lower and upper bounds, the parameter names, the inverse scaler and the strategy. - Examples: - For complete example see [here](../examples/deap_cma.md) + Example: + For complete example see [here](../examples/deap_cma.md) ```python from CompNeuroPy import DeapCma import numpy as np @@ -2083,143 +2085,144 @@ def _get_line_is_v(self, line: str): return False -def interactive_plot( - nrows: int, - ncols: int, - sliders: list[dict], - create_plot: Callable, - update_loop: Callable | None = None, - figure_frequency: float = 20.0, - update_frequency: float = np.inf, -): - """ - Create an interactive plot with sliders. +class InteractivePlot: - Args: - nrows (int): - number of rows of subplots - ncols (int): - number of columns of subplots - sliders (list): - list of dictionaries with slider kwargs (see matplotlib.widgets.Slider), at - least the following keys have to be present: - - label (str): - label of the slider - - valmin (float): - minimum value of the slider - - valmax (float): - maximum value of the slider - create_plot (Callable): - function which fills the subplots, has to have the signature - create_plot(axs, sliders), where axs is a list of axes (for each subplot) - and sliders is the given sliders list with newly added keys "ax" (axes of - the slider) and "slider" (the Slider object itself, so that you can access - the slider values in the create_plot function using the .val attribute) - update_loop (Callable, optional): - Function which is called periodically. After each call the plot is updated. - If None, the plot is only updated when a slider is changed. Default is None. - figure_frequency (float, optional): - Frequency of the figure update in Hz. Default is 20.0. - update_frequency (float, optional): - Frequency of the update loop in Hz. Default is np.inf. + def __init__( + self, + nrows: int, + ncols: int, + sliders: list[dict], + create_plot: Callable, + update_loop: Callable | None = None, + figure_frequency: float = 20.0, + update_frequency: float = np.inf, + ): + """ + Create an interactive plot with sliders. - Examples: - ```python - def create_plot(axs, sliders): - axs[0].axhline(sliders[0]["slider"].val, color="r") - axs[1].axvline(sliders[1]["slider"].val, color="r") - - interactive_plot( - nrows=2, - ncols=1, - sliders=[ - {"label": "a", "valmin": 0.0, "valmax": 1.0, "valinit": 0.3}, - {"label": "b", "valmin": 0.0, "valmax": 1.0, "valinit": 0.7}, - ], - create_plot=create_plot, + Args: + nrows (int): + number of rows of subplots + ncols (int): + number of columns of subplots + sliders (list): + list of dictionaries with slider kwargs (see matplotlib.widgets.Slider), at + least the following keys have to be present: + - label (str): + label of the slider + - valmin (float): + minimum value of the slider + - valmax (float): + maximum value of the slider + create_plot (Callable): + function which fills the subplots, has to have the signature + create_plot(axs, sliders), where axs is a list of axes (for each subplot) + and sliders is the given sliders list with newly added keys "ax" (axes of + the slider) and "slider" (the Slider object itself, so that you can access + the slider values in the create_plot function using the .val attribute) + update_loop (Callable, optional): + Function which is called periodically. After each call the plot is updated. + If None, the plot is only updated when a slider is changed. Default is None. + figure_frequency (float, optional): + Frequency of the figure update in Hz. Default is 20.0. + update_frequency (float, optional): + Frequency of the update loop in Hz. Default is np.inf. + + Example: + ```python + def create_plot(axs, sliders): + axs[0].axhline(sliders[0]["slider"].val, color="r") + axs[1].axvline(sliders[1]["slider"].val, color="r") + + interactive_plot( + nrows=2, + ncols=1, + sliders=[ + {"label": "a", "valmin": 0.0, "valmax": 1.0, "valinit": 0.3}, + {"label": "b", "valmin": 0.0, "valmax": 1.0, "valinit": 0.7}, + ], + create_plot=create_plot, + ) + ``` + """ + self.create_plot = create_plot + self._waiter = _Waiter(duration=2.0, on_finish=self._recreate_plot) + plt.close("all") + + ### create the figure as large as the screen + screen_width, screen_height = get_monitors()[0].width, get_monitors()[0].height + figsize = (screen_width / 100, screen_height / 100) + fig, axs = plt.subplots(nrows, ncols, figsize=figsize) + self.fig = fig + self.axs = axs + + ### create the sliders figure, set the axes for the sliders + fig_sliders, axs_sliders = plt.subplots( + len(sliders), 1, figsize=(6.4, 4.8 * len(sliders)) ) - ``` - """ - - def update(axs, sliders): - ### remove everything from all axes except the sliders axes - for ax in axs: - if ax not in [slider["ax"] for slider in sliders]: - ax.cla() - ### recreate the plot + if len(sliders) == 1: + axs_sliders = [axs_sliders] + for slider_idx in range(len(sliders)): + sliders[slider_idx]["ax"] = axs_sliders[slider_idx] + + ### initialize the sliders + for slider_idx, slider_kwargs in enumerate(sliders): + ### if init out of min max, change min max + if "valinit" in slider_kwargs: + if slider_kwargs["valinit"] < slider_kwargs["valmin"]: + slider_kwargs["valmin"] = slider_kwargs["valinit"] + elif slider_kwargs["valinit"] > slider_kwargs["valmax"]: + slider_kwargs["valmax"] = slider_kwargs["valinit"] + slider = Slider(**slider_kwargs) + slider.on_changed(lambda val: self._waiter.start()) + sliders[slider_idx]["slider"] = slider + + self.sliders = sliders + + ### create the plot create_plot(axs, sliders) - ### create the figure as large as the screen - screen_width, screen_height = get_monitors()[0].width, get_monitors()[0].height - figsize = (screen_width / 100, screen_height / 100) - fig = plt.figure(figsize=figsize) - - ### create the axes filled with the create_plot function - grid = GridSpec((nrows + 1) * len(sliders), ncols * len(sliders), figure=fig) - axs = [] - for row_idx in range(nrows): - for col_idx in range(ncols): - ax = fig.add_subplot( - grid[ - row_idx * len(sliders) : (row_idx + 1) * len(sliders), - col_idx * len(sliders) : (col_idx + 1) * len(sliders), - ] + if update_loop is None: + ### show the plot + self.ani = FuncAnimation( + self.fig, + func=lambda frame: 0, + frames=10, + interval=(1.0 / figure_frequency) * 1000, + repeat=True, ) - axs.append(ax) - - ### create the sliders axes - for slider_idx, slider_kwargs in enumerate(sliders): - sliders[slider_idx]["ax"] = fig.add_subplot( - grid[nrows * len(sliders) + slider_idx, :] - ) + plt.show() + else: + ### run update loop until figure is closed + figure_pause = 1 / figure_frequency + max_updates_per_pause = update_frequency / figure_frequency + while plt.fignum_exists(fig.number): + ### update figure + self._recreate_plot + plt.pause(figure_pause) + ### in between do the update loop multiple times + start = time.time() + nr_updates = 0 + while ( + time.time() - start < figure_pause + and nr_updates < max_updates_per_pause + ): + update_loop() + nr_updates += 1 + + def _recreate_plot(self): + ### pause the animation + self.ani.event_source.stop() + ### clear the axes + for ax in self.axs.flatten(): + ax.cla() + ### recreate the plot + self.create_plot(self.axs, self.sliders) + ### restart the animation + self.ani.event_source.start() - ### initialize the sliders to their axes - for slider_idx, slider_kwargs in enumerate(sliders): - ### if init out of min max, change min max - if "valinit" in slider_kwargs: - if slider_kwargs["valinit"] < slider_kwargs["valmin"]: - slider_kwargs["valmin"] = slider_kwargs["valinit"] - elif slider_kwargs["valinit"] > slider_kwargs["valmax"]: - slider_kwargs["valmax"] = slider_kwargs["valinit"] - slider = Slider(**slider_kwargs) - slider.on_changed(lambda val: update(axs, sliders)) - sliders[slider_idx]["slider"] = slider - - ### create the plot - create_plot(axs, sliders) - ### arange subplots - plt.tight_layout() - new_right_border = 0.85 - new_left_border = 0.15 - for slider_idx, slider_kwargs in enumerate(sliders): - ax = sliders[slider_idx]["ax"] - ### set new borders - ax.set_position( - [ - new_left_border, - ax.get_position().y0, - new_right_border - new_left_border, - ax.get_position().height, - ] - ) - if update_loop is None: - ### show the plot - plt.show() - else: - ### run update loop until figure is closed - figure_pause = 1 / figure_frequency - max_updates_per_pause = update_frequency / figure_frequency - while plt.fignum_exists(fig.number): - ### update figure - update(axs, sliders) - plt.pause(figure_pause) - ### in between do the update loop multiple times - start = time() - nr_updates = 0 - while time() - start < figure_pause and nr_updates < max_updates_per_pause: - update_loop() - nr_updates += 1 +interactive_plot = InteractivePlot def efel_loss(trace1, trace2, feature_list): @@ -2523,3 +2526,76 @@ def get_spike_features_loss_of_chunk( if verbose: print(f"loss: {loss}") return loss + + +class _Waiter: + """ + Class that waits for a certain duration while the rest of the code continues to run. + + Attributes: + finished (bool): + True if the waiting is finished, False otherwise. + """ + + def __init__(self, duration=5, on_finish=None): + """ + Args: + duration (float): + The duration in seconds after which Waiter.finished will return True. + on_finish (callable): + A callable that will be called when the counter finishes. + """ + self.duration = duration + self.on_finish = on_finish + self._finished = False + self._running = False + self._lock = threading.Lock() + self._threads = {} + + def _start_waiting(self): + """ + The function that will be run in a separate thread to wait for the duration. It + will set finished to True when the duration is reached. It will also call the + on_finish callable if it is not None. + """ + ### at the beginning of the thread set the stop flags for all other threads + for thread_id, thread in self._threads.items(): + if thread[0].ident != threading.get_ident(): + thread[1].set() + ### wait duration + time.sleep(self.duration) + ### check if the current thread was already stopped, if not set finished to True + if not (self._threads[threading.get_ident()][1].is_set()): + with self._lock: + ### set finished to True + self._finished = True + ### remove the current thread from the threads dict + self._threads.pop(threading.get_ident()) + ### call the on_finish callable in the main thread + if self.on_finish is not None: + threading.Timer(0.01, self.on_finish).start() + else: + with self._lock: + ### do not set finished to True and remove the current thread from the + ### threads dict + self._threads.pop(threading.get_ident()) + + def start(self): + """ + Start the waiting process in a separate thread. The waiting will last for the + duration specified in the constructor. If the waiting is already running, it + will be stopped and restarted. + """ + ### start new waiting thread + thread = threading.Thread(target=self._start_waiting, daemon=True) + stop_flag = threading.Event() + ### start the thread + thread.start() + ### store the thread and the stop flag + with self._lock: + self._threads[thread.ident] = [thread, stop_flag] + + @property + def finished(self): + with self._lock: + return self._finished diff --git a/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py b/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py index 88e001d..9766a69 100644 --- a/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py @@ -27,7 +27,7 @@ class IntegratorNeuron(Neuron): threshold (float, optional): Threshold for the decision g_ampa has to reach. Default: 1. - Examples: + Example: ```python from ANNarchy import Population, simulate_until from CompNeuroPy.neuron_models import Integrator @@ -116,7 +116,7 @@ class IntegratorNeuronSimple(Neuron): tau (float, optional): Time constant in ms of the neuron. Default: 1. - Examples: + Example: ```python from ANNarchy import Population, simulate_until from CompNeuroPy.neuron_models import Integrator diff --git a/src/CompNeuroPy/simulation_functions.py b/src/CompNeuroPy/simulation_functions.py index 1f8c2da..7f8734a 100644 --- a/src/CompNeuroPy/simulation_functions.py +++ b/src/CompNeuroPy/simulation_functions.py @@ -1,4 +1,14 @@ -from ANNarchy import simulate, get_population, dt +from ANNarchy import ( + simulate, + get_population, + dt, + simulate_until, + get_current_step, + get_time, +) +from CompNeuroPy import analysis_functions as af +import numpy as np +from copy import deepcopy def attr_sim(pop: str, attr_dict, t=500): @@ -265,3 +275,489 @@ def increasing_current(pop: str, a0, da, nr_steps, dur_step): """ increasing_attr_return = increasing_attr(pop, "I_app", a0, da, nr_steps, dur_step) return {"current_list": increasing_attr_return["attr_list"]} + + +class SimulationEvents: + """ + Class to create a Simulation consiting of multiple events. Add the effects + (functions) of the events in a class which inherits from SimulationEvents. Within + the effect functions you can use the attributes of the class which inherits from + SimulationEvents. Do never simulate within the effect functions of the events. The + simulation is done between the events. + + Example: + ```python + from CompNeuroPy import SimulationEvents + + ### define a class which inherits from SimulationEvents + ### define the effects of the events in the class + class MySim(SimulationEvents): + + def __init__( + self, + p=0.8, + verbose=False, + ): + ### set attributes which should be used in the effect functions + self.p = p + super().__init__(verbose=verbose) + + def effect1(self): + ### set the parameter of a population to the value of p + pop.parameter = self.p + + def effect2(self): + ### set the parameter of a population to 0 + pop.parameter = 0 + + ### create the simulation object + my_sim = MySim() + + ### add events to the simulation + ### start event right at the beginning which triggers event1 after 100 ms + my_sim.add_event(name="start", trigger={"event1": 100}) + ### event1 causes effect1 and triggers event2 after 200 ms + my_sim.add_event(name="event1", effect=my_sim.effect1, trigger={"event2": 200}) + ### event2 causes effect2 and triggers end event after 300 ms + my_sim.add_event(name="event2", effect=my_sim.effect2, trigger={"end": 300}) + + ### run the simulation + my_sim.run() + ``` + """ + + def __init__(self, verbose=False): + """ + Args: + verbose (bool): + if True, additional information is printed during simulation + """ + ### set verbose + self.verbose = verbose + ### initialize events + self._initialize() + ### list for storing added events, without changing them + self.stored_event_list = [] + self.called_during_restore = False + ### add the end event + self.add_event(name="end", effect=self._end_sim) + + def _initialize(self): + """ + initialize locals + """ + if self.verbose: + print("initialize locals") + ### list of events + self.event_list = [] + self.event_name_list = [] + ### as long as end == False simulation runs + self.end = False + ### if events occur depends on happened events + self.happened_event_list = [] + ### initialize model triggers empty, before first simulation, there should not be model_trigger_events + ### model_trigger_list = name of populations of which the decision should be checked + self.model_trigger_list = [] + self.past_model_trigger_list = [] + + def add_event( + self, + name: str, + onset: int = None, + model_trigger: str = None, + requirement_string: str = None, + effect=None, + trigger: dict[str, int] = None, + ): + """ + Adds an event to the simulation. You always have to trigger the end event to end + the simulation. + + Args: + name (str): + name of the event + onset (int): + time in simulation steps when the event should occur + model_trigger (str): + name of population which can trigger the event (by setting variable + decision to -1) + requirement_string (str): + string containing the requirements for the event to occur TODO: replace with function + effect (function): + Function which is executed during the event. Within the effect function + you can use the attributes of the class which inherits from + SimulationEvents. + trigger (dict): + dictionary containing the names of other events as keys and the + relative time in simulation steps to the onset of the current event as + values + """ + self.event_list.append( + self._Event( + trial_procedure=self, + name=name, + onset=onset, + model_trigger=model_trigger, + requirement_string=requirement_string, + effect=effect, + trigger=trigger, + ) + ) + self.event_name_list.append(name) + + if not self.called_during_restore: + self.stored_event_list.append( + { + "name": name, + "onset": onset, + "model_trigger": model_trigger, + "requirement_string": requirement_string, + "effect": effect, + "trigger": trigger, + } + ) + + def _restore_event_list(self): + """ + Restore the event list after simulation to the state before the first call of + run. To be able to run the simulation multiple times. + """ + self.called_during_restore = True + for event in self.stored_event_list: + self.add_event(**event) + self.called_during_restore = False + + def run(self): + """ + Run the simulation. The simulation runs until the end event is triggered. The + simulation can be run multiple times by calling this function multiple times. + """ + ### check if there are events which have no onset and are not triggered by other + ### events and have no model_trigger --> they would never start + ### --> set their onset to current step --> they ar run directly after calling run + triggered_events = [] + for event in self.event_list: + if event.trigger is not None: + triggered_events.extend(list(event.trigger.keys())) + for event in self.event_list: + if ( + event.onset is None + and event.model_trigger is None + and event.name not in triggered_events + ): + event.onset = get_current_step() + if self.verbose: + print(event.name, "set onset to start of run") + + ### run simulation + while not (self.end): + ### check if model triggers were activated --> if yes run the corresponding events, model_trigger events can trigger other events (with onset) --> run current_step events after model trigger events + ### if that's the case --> model trigger event would run twice (because during first run it gets an onset) --> define here run_event_list which prevents events run twice + self.run_event_list = [] + self._run_model_trigger_events() + ### run the events of the current time, based on mode and happened events + self._run_current_events() + ### if event triggered end --> end simulation / skip rest + if self.end: + if self.verbose: + print("end event triggered --> end simulation") + continue + ### check then next events occur + next_events_time = self._get_next_events_time() + ### check if there are model triggers + self.model_trigger_list = self._get_model_trigger_list() + ### simulate until next event(s) or model triggers + if self.verbose: + print("check_triggers:", self.model_trigger_list) + if len(self.model_trigger_list) > 1: + ### multiple model triggers + simulate_until( + max_duration=next_events_time, + population=[ + get_population(pop_name) for pop_name in self.model_trigger_list + ], + operator="or", + ) + elif len(self.model_trigger_list) > 0: + ### a single model trigger + simulate_until( + max_duration=next_events_time, + population=get_population(self.model_trigger_list[0]), + ) + else: + ### no model_triggers + simulate(next_events_time) + + ### after run finishes initialize again + self._initialize() + + ### restore event_list + self._restore_event_list() + + def _run_current_events(self): + """ + Run all events with start == current step + """ + ### run all events of the current step + ### repeat this until no event was run, because events can set the onset of other events to the current step + ### due to repeat --> prevent that same event is run twice + event_run = True + while event_run: + event_run = False + for event in self.event_list: + if ( + event.onset == get_current_step() + and not (event.name in self.run_event_list) + and event._check_requirements() + ): + event.run() + event_run = True + self.run_event_list.append(event.name) + + def _run_model_trigger_events(self): + """ + check the current model triggers stored in self.model_trigger_list + if they are activated --> run corresponding events + prevent that these model triggers are stored again in self.model_trigger_list + """ + ### loop to check if model trigger got active + for model_trigger in self.model_trigger_list: + if ( + int(get_population(model_trigger).decision[0]) == -1 + ): ### TODO this is not generalized yet, only works if the model_trigger populations have the variable decision which is set to -1 if the model trigger is active + ### -1 means got active + ### find the events triggerd by the model_trigger and run them + for event in self.event_list: + if event.model_trigger == model_trigger: + event.run() + self.run_event_list.append(event.name) + ### prevent that these model_triggers are used again + self.past_model_trigger_list.append(model_trigger) + + def _get_next_events_time(self): + """ + go through all events and get onsets + get onset which are > current_step + return smallest diff in ms (ms value = full timesteps!) + + Returns: + time (float): + time in ms until the next event, rounded to full timesteps + """ + next_event_time = np.inf + for event in self.event_list: + ### skip events without onset + if event.onset == None: + continue + ### check if onset in the future and nearest + if ( + event.onset > get_current_step() + and (event.onset - get_current_step()) < next_event_time + ): + next_event_time = event.onset - get_current_step() + ### return difference (simulation duration until nearest next event) in ms, round to full timesteps + return round(next_event_time * dt(), af.get_number_of_decimals(dt())) + + def _get_model_trigger_list(self): + """ + check if there are events with model_triggers + check if these model triggers already happened + check if the requirements of the events are met + not happend + requirements met --> add model_trigger to model_trigger_list + returns the (new) model_trigger_list + + Returns: + model_trigger_list (list): + list of model triggers which are not in past_model_trigger_list and + have their requirements met + """ + ret = [] + for event in self.event_list: + if event.model_trigger != None: + if ( + not (event.model_trigger in self.past_model_trigger_list) + and event._check_requirements() + ): + ret.append(event.model_trigger) + return ret + + def _end_sim(self): + """ + Event to end the simulation + """ + self.end = True + + class _Event: + """ + Class for events in the simulation + """ + + def __init__( + self, + trial_procedure, + name, + onset=None, + model_trigger=None, + requirement_string=None, + effect=None, + trigger=None, + ): + """ + Args: + trial_procedure (SimulationEvents): + SimulationEvents object + name (str): + name of the event + onset (int): + time in simulation steps when the event should occur + model_trigger (str): + name of population which can trigger the event (by setting variable + decision to -1) + requirement_string (str): + string containing the requirements for the event to occur TODO: replace with function + effect (function): + function which is executed during the event + trigger (dict): + dictionary containing the names of other events as keys and the + relative time in simulation steps to the onset of the current event as + values + """ + self.trial_procedure = trial_procedure + self.name = name + self.onset = onset + self.model_trigger = model_trigger + self.requirement_string = requirement_string + self.effect = effect + self.trigger = trigger + + def run(self): + """ + Run the event i.e. execute the effect of the event and trigger other events + """ + ### check requirements + if self._check_requirements(): + ### run the event + if self.trial_procedure.verbose: + print("run event:", self.name, get_time()) + ### for events which are triggered by model --> set onset + if self.onset == None: + self.onset = get_current_step() + ### run the effect + if self.effect is not None: + self.effect() + ### trigger other events + if self.trigger is not None: + ### loop over all triggered events + for name, delay in self.trigger.items(): + ### get the other event + event_idx = self.trial_procedure.event_name_list.index(name) + ### set onset of other event + self.trial_procedure.event_list[event_idx].onset = ( + self.onset + delay + ) + ### store event in happened events + self.trial_procedure.happened_event_list.append(self.name) + + ### TODO replace requirement_string with a function (which has access to the + ### attributes) checking the requirements + def _check_requirements(self): + """ + Check if the requirements for the event are met + + Returns: + met (bool): + True if requirements are met, False otherwise + """ + if self.requirement_string != None: + ### check requirement with requirement string + return self._eval_requirement_string() + else: + ### no requirement + return True + + def _eval_requirement_string(self): + """ + evaluates a condition string in format like 'XXX==XXX and (XXX==XXX or + XXX==XXX)' + + Returns: + met (bool): + True if requirements are met, False otherwise + """ + ### split condition string + string = self.requirement_string + string = string.split(" and ") + string = [sub_string.split(" or ") for sub_string in string] + + ### loop over string splitted string parts + final_string = [] + for sub_idx, sub_string in enumerate(string): + ### combine outer list eelemts with and + ### and combine inner list elements with or + if len(sub_string) == 1: + if sub_idx < len(string) - 1: + final_string.append( + self._get_condition_part(sub_string[0]) + " and " + ) + else: + final_string.append(self._get_condition_part(sub_string[0])) + else: + for sub_sub_idx, sub_sub_string in enumerate(sub_string): + if sub_sub_idx < len(sub_string) - 1: + final_string.append( + self._get_condition_part(sub_sub_string) + " or " + ) + elif sub_idx < len(string) - 1: + final_string.append( + self._get_condition_part(sub_sub_string) + " and " + ) + else: + final_string.append( + self._get_condition_part(sub_sub_string) + ) + return eval("".join(final_string)) + + def _get_condition_part(self, string): + """ + converts a string in format like '((XXX==XXX)' into '((True)' + """ + ### remove spaces from string + string = string.strip() + string = string.split() + string = "".join(string) + + ### recursively remove brackets + ### at the end evaluate term (without brackets) and then return the evaluated value with the former brackets + if string[0] == "(": + return "(" + self._get_condition_part(string[1:]) + elif string[-1] == ")": + return self._get_condition_part(string[:-1]) + ")" + else: + return str(self._eval_condition_part(string)) + + def _eval_condition_part(self, string): + """ + gets string in format 'XXX==XXX' + + evaluates the term for mode and happened events + + returns True/False + """ + + var = string.split("==")[0] + val = string.split("==")[1] + if var == "mode": + test = self.trial_procedure.mode == val + elif var == "happened_event_list": + ### remove brackets + val = val.strip("[]") + ### split entries + val = val.split(",") + ### remove spaces from entries + happened_event_list_from_string = [val_val.strip() for val_val in val] + ### check if all events are in happened_event_list, if not --> return False + test = True + for event in happened_event_list_from_string: + if not (event in self.trial_procedure.happened_event_list): + test = False + return test diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index 799efae..b497a4f 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -93,7 +93,7 @@ def save_variables(variable_list: list, name_list: list, path: str | list = "./" save path for all variables, or save path for each variable of the variable_list. Default: "./" - Examples: + Example: ```python import numpy as np from CompNeuroPy import save_variables, load_variables @@ -146,7 +146,7 @@ def load_variables(name_list: list, path: str | list = "./"): dictionary with the loaded variables, keys are the names of the files, values are the loaded variables - Examples: + Example: ```python import numpy as np from CompNeuroPy import save_variables, load_variables @@ -292,7 +292,7 @@ def create_data_raw_folder( **kwargs (Any, optional): Global variables of the caller script. - Examples: + Example: ```python from CompNeuroPy import create_data_raw_folder From 1bd6ceb8bac34b273e947f253fa302c73d89f527 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 28 Mar 2024 17:28:20 +0100 Subject: [PATCH 10/39] extra_functions: new class RNG simulation_functions: SimulationEvents allows callable trigger times --- src/CompNeuroPy/__init__.py | 1 + src/CompNeuroPy/extra_functions.py | 27 +++++++++++++++++++++++++ src/CompNeuroPy/simulation_functions.py | 24 ++++++++++++++-------- 3 files changed, 44 insertions(+), 8 deletions(-) diff --git a/src/CompNeuroPy/__init__.py b/src/CompNeuroPy/__init__.py index f389456..38c8980 100644 --- a/src/CompNeuroPy/__init__.py +++ b/src/CompNeuroPy/__init__.py @@ -37,6 +37,7 @@ decision_tree, # TODO remove node_cl, # TODO remove efel_loss, + RNG, ) from CompNeuroPy.model_functions import ( compile_in_folder, diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index da2f144..1357280 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -2599,3 +2599,30 @@ def start(self): def finished(self): with self._lock: return self._finished + + +class RNG: + """ + Resettable random number generator. + + Attributes: + rng (np.random.Generator): + Random number generator. + + Example: + ```python + rng = RNG(seed=1234) + print(rng.rng.integers(0, 10, 5)) + rng.reset() + print(rng.rng.integers(0, 10, 5)) + ``` + """ + + def __init__(self, seed): + self.rng = np.random.default_rng(seed=seed) + self._original_seed = seed + + def reset(self): + self.rng.bit_generator.state = np.random.default_rng( + seed=self._original_seed + ).bit_generator.state diff --git a/src/CompNeuroPy/simulation_functions.py b/src/CompNeuroPy/simulation_functions.py index 7f8734a..a674150 100644 --- a/src/CompNeuroPy/simulation_functions.py +++ b/src/CompNeuroPy/simulation_functions.py @@ -8,7 +8,7 @@ ) from CompNeuroPy import analysis_functions as af import numpy as np -from copy import deepcopy +from typing import Callable def attr_sim(pop: str, attr_dict, t=500): @@ -366,8 +366,8 @@ def add_event( onset: int = None, model_trigger: str = None, requirement_string: str = None, - effect=None, - trigger: dict[str, int] = None, + effect: Callable = None, + trigger: dict[str, int | Callable[[], int]] = None, ): """ Adds an event to the simulation. You always have to trigger the end event to end @@ -390,7 +390,8 @@ def add_event( trigger (dict): dictionary containing the names of other events as keys and the relative time in simulation steps to the onset of the current event as - values + values. The values can also be callable functions which return the + time (without any aruments). They are called when this event is triggered. """ self.event_list.append( self._Event( @@ -620,7 +621,8 @@ def __init__( trigger (dict): dictionary containing the names of other events as keys and the relative time in simulation steps to the onset of the current event as - values + values. The values can also be callable functions which return the + time (without any aruments). They are called when this event is triggered. """ self.trial_procedure = trial_procedure self.name = name @@ -652,9 +654,15 @@ def run(self): ### get the other event event_idx = self.trial_procedure.event_name_list.index(name) ### set onset of other event - self.trial_procedure.event_list[event_idx].onset = ( - self.onset + delay - ) + if callable(delay): + add = delay() + self.trial_procedure.event_list[event_idx].onset = ( + self.onset + add + ) + else: + self.trial_procedure.event_list[event_idx].onset = ( + self.onset + delay + ) ### store event in happened events self.trial_procedure.happened_event_list.append(self.name) From 2788c3023fc49df7ce2f8f0f757bb1bad5d32ced Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 8 Apr 2024 15:32:20 +0200 Subject: [PATCH 11/39] RecordingTimes.combine_periods: returns np.nan array if variable was not recorded experimetnal neuron models: adjusted fit_Corbit_13 --- src/CompNeuroPy/monitors.py | 10 +++++++--- .../neuron_models/experimental_models/fit_Corbit_nm.py | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/CompNeuroPy/monitors.py b/src/CompNeuroPy/monitors.py index 3bcefab..75f09de 100644 --- a/src/CompNeuroPy/monitors.py +++ b/src/CompNeuroPy/monitors.py @@ -754,9 +754,6 @@ def combine_periods( time_step = recordings[0]["dt"] time_list = [] - ### get data arr - data_arr = recordings[chunk][recording_data_str] - ### get time arr for period in range(nr_periods): start_time, end_time = self.time_lims( @@ -768,6 +765,13 @@ def combine_periods( time_list.append(times) time_arr = np.concatenate(time_list, 0) + ### get data arr + try: + data_arr = recordings[chunk][recording_data_str] + except: + ### create an nan array with the same length as time_arr + data_arr = np.full(time_arr.shape, np.nan) + ### fill gaps with nan or interpolate if fill == "nan": time_arr, data_arr = af.time_data_add_nan( diff --git a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py index bc66f24..e13303a 100644 --- a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py +++ b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py @@ -467,7 +467,7 @@ dg_gaba/dt = -g_gaba/tau_gaba I = a_I*(I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))) - C * dv/dt = k*(v - v_r)*(v - v_t) - u - pos(uu*(v - E_gaba)) - n + I + C * dv/dt = k*(v - v_r)*(v - v_t) - u - pos(uu*(v - E_gaba)) - pos(n) + I du/dt = a*(b*(v - v_r) - u) duu/dt = -a_uu*uu From 7ecde34c49fd668e6ed93025f4ce7333430f821a Mon Sep 17 00:00:00 2001 From: olmai Date: Tue, 9 Apr 2024 13:58:59 +0200 Subject: [PATCH 12/39] new final neuron model IzhikevichGolomb --- src/CompNeuroPy/neuron_models/__init__.py | 1 + .../final_models/izhikevich_2007_like_nm.py | 135 ++++++++++++++++++ 2 files changed, 136 insertions(+) diff --git a/src/CompNeuroPy/neuron_models/__init__.py b/src/CompNeuroPy/neuron_models/__init__.py index 3733d12..d371537 100644 --- a/src/CompNeuroPy/neuron_models/__init__.py +++ b/src/CompNeuroPy/neuron_models/__init__.py @@ -44,6 +44,7 @@ Izhikevich2007CorbitFsiNoisyAmpa, Izhikevich2007CorbitFsiNoisyBase, Izhikevich2007NoisyAmpaOscillating, + IzhikevichGolomb, ) from .final_models.artificial_nm import ( integrator_neuron, diff --git a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py index 0f84d20..b495c00 100644 --- a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py @@ -1372,6 +1372,141 @@ def __init__( self._instantiated.append(True) +class IzhikevichGolomb(Neuron): + """ + PREDEFINED + + [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model + with conductance-based AMPA and GABA synapses, noise in the baseline current, a + separated after-spike hyperpolarization and an inductive-like current causing late + spiking. Mechanisms and parameters were adjusted to fit the striatal FSI neuron + model from [Golomb et al. (2007)](https://doi.org/10.1371/journal.pcbi.0030156) + also used by [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) + as striatal FSI neuron. + + Parameters: + I_app (float, optional): + External applied input current. + tau_ampa (float, optional): + Time constant of the AMPA synapse. + tau_gaba (float, optional): + Time constant of the GABA synapse. + E_ampa (float, optional): + Reversal potential of the AMPA synapse. + E_gaba (float, optional): + Reversal potential of the GABA synapse. + base_mean (float, optional): + Mean of the baseline current. + base_noise (float, optional): + Standard deviation of the baseline current noise. + rate_base_noise (float, optional): + Rate of the noise update (Poisson distributed) in the baseline current. + params_for_pop (bool, optional): + If True, the parameters are population-wide and not neuron-specific. + init (dict, optional): + Initial values for the variables. + + Variables to record: + - offset_base + - I_base + - g_ampa + - g_gaba + - I_v + - v + - u + - uu + - s + - n + """ + + # For reporting + _instantiated = [] + + def __init__( + self, + I_app: float = 0.0, + tau_ampa: float = 10.0, + tau_gaba: float = 10.0, + E_ampa: float = 0.0, + E_gaba: float = -90.0, + base_mean: float = 0.0, + base_noise: float = 0.0, + rate_base_noise: float = 0.0, + params_for_pop: bool = False, + init: dict = {}, + ): + # Create the arguments + parameters = f""" + ### base parameters + C = 100 + k = 2.3422021975590845 + v_r = -70 + v_t = -50 + a = 0.4077132173988824 + b = 37.027824808742196 + c = -50 + d = 0 + v_peak = 0 + ### after-spike current parameters + a_uu = 0.4077132173988824 + dd = 819.0218598481788 + ### slow currents parameters + a_s = 0.19087175635342485 + a_n = 0.008987424013380247 + b_n = 2.9609600149723434 + ### input current + I_app = {I_app} + ### synaptic current parameters + tau_ampa = {tau_ampa} {': population' if params_for_pop else ''} + tau_gaba = {tau_gaba} {': population' if params_for_pop else ''} + E_ampa = {E_ampa} {': population' if params_for_pop else ''} + E_gaba = {E_gaba} {': population' if params_for_pop else ''} + ### input current scaling + a_I = 223.0822501641062 + ### baseline current parameters + base_mean = {base_mean} + base_noise = {base_noise} + rate_base_noise = {rate_base_noise} + """ + + prefix = _I_base_noise + syn = _syn_default + i_v = f"a_I*(I_app {_I_syn} + I_base)" + dv = f"{_dv_default} - pos(uu*(v - E_gaba)) - pos(n)" + affix = """ + duu/dt = -a_uu*uu + ds/dt = a_s*(I_v - s) + dn/dt = a_n*(b_n*(I_v - s) - n) + """ + + # get equations + equations = _get_equation_izhikevich_2007( + syn=syn, i_v=i_v, dv=dv, prefix=prefix, affix=affix + ) + + # set initial values + equations = _set_init(equations, init) + + super().__init__( + parameters=parameters, + equations=equations, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + uu = uu + dd + """, + name="IzhikevichGolomb", + description=""" + Izhikevich (2007)-like neuron model fitted to the FSI neuron model + from Golomb et al. (2007) and Corbit et al. (2016). + """, + ) + + # For reporting + self._instantiated.append(True) + + ### create objects for backwards compatibility Izhikevich2007_record_currents = Izhikevich2007RecCur() Izhikevich2007_voltage_clamp = Izhikevich2007VoltageClamp() From 669711a09dcf53abfb03341025966f6e4d2ad6e2 Mon Sep 17 00:00:00 2001 From: olimaol Date: Wed, 10 Apr 2024 13:50:49 +0200 Subject: [PATCH 13/39] removed not used import --- src/CompNeuroPy/extra_functions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 1357280..118d817 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -28,7 +28,6 @@ import warnings import json from matplotlib.widgets import Slider -from matplotlib.gridspec import GridSpec from screeninfo import get_monitors import cmaes import efel From 463ace9a1bc1e97d4eceac00eb55ef8073224916 Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 15 Apr 2024 17:17:09 +0200 Subject: [PATCH 14/39] new distributions test for model_configurator --- .../examples/model_configurator/test.py | 432 ++++++++++++------ 1 file changed, 280 insertions(+), 152 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index 947dd52..69bb7c5 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -1,193 +1,321 @@ -from ANNarchy import Neuron, Population, compile, simulate, get_time, setup, dt +from ANNarchy import ( + Neuron, + Population, + compile, + simulate, + get_time, + setup, + dt, + Projection, +) from CompNeuroPy import ( CompNeuroMonitors, PlotRecordings, interactive_plot, timing_decorator, ) -from CompNeuroPy.neuron_models import Izhikevich2007 +from CompNeuroPy.neuron_models import PoissonNeuron import numpy as np +from sklearn.neighbors import KernelDensity +import matplotlib.pyplot as plt setup(dt=0.1) -neuron = Neuron( +neuron_izh = Neuron( parameters=""" - C = 100.0, - k = 0.7, - v_r = -60.0, - v_t = -40.0, - a = 0.03, - b = -2.0, - c = -50.0, - d = 100.0, - v_peak = 35.0, - I_app = 0.0, - tau = 300 + C = 100.0 + k = 0.7 + v_r = -60.0 + v_t = -40.0 + a = 0.03 + b = -2.0 + c = -50.0 + d = 100.0 + v_peak = 35.0 + I_app = 0.0 + E_ampa = 0.0 """, equations=""" + ### synaptic current + I_ampa = -neg(g_ampa*(v - E_ampa)) ### Izhikevich spiking - I_v = I_app + I_v = I_app + I_ampa C * dv/dt = k*(v - v_r)*(v - v_t) - u + I_v du/dt = a*(b*(v - v_r) - u) - ### Spike tracking - tau * dspike_track/dt = - spike_track - ### I tracking - tau * dI_track/dt = - I_track + I_v """, spike="v >= v_peak", reset=""" v = c u = u + d - spike_track = 1.0 """, ) -pop = Population(100, neuron=neuron, name="pop") +pop_pre = Population(100, neuron=PoissonNeuron(rates=10.0), name="pre") +pop_post = Population(100, neuron=neuron_izh, name="post") + +proj = Projection(pre=pop_pre, post=pop_post, target="ampa", name="proj") +proj.connect_fixed_probability(weights=10, probability=0.1) monitors = CompNeuroMonitors( - mon_dict={"pop;1": ["I_v", "spike_track", "I_track", "spike"]} + mon_dict={"pre": ["spike"], "post": ["v", "spike", "I_ampa"]} ) compile() monitors.start() -### create an array with amplitudes between -200 and 200 -I_app_arr = np.arange(-200, 200, 5) +simulate(100.0) +pop_pre.rates = 200.0 +simulate(100.0) + +recordings = monitors.get_recordings() +recording_times = monitors.get_recording_times() + +PlotRecordings( + figname="test.png", + recordings=recordings, + recording_times=recording_times, + shape=(2, 2), + plan={ + "position": [1, 3, 4], + "compartment": ["pre", "post", "post"], + "variable": ["spike", "spike", "v"], + "format": ["hybrid", "hybrid", "line"], + }, +) + -### create an array with durations between 10 ms and 200 ms -duration_arr = np.arange(10, 200 + 10, 10) +def get_binned_spikes( + spikes_dict: dict, time_lims_steps: tuple[int, int], BIN_SIZE_STEPS: int +): + """ + Bin the given spike dictionary into time bins of the given size. + + Args: + spikes_dict (dict): + A dictionary of spike times for each neuron. + time_lims_steps (tuple[int, int]): + The time limits of the spike_dict in steps. + BIN_SIZE_STEPS (int): + The size of the bins in steps. + + Returns: + counts_matrix (np.ndarray): + The binned spike counts for each neuron with shape (n_bins, n_neurons, 1). + """ + ### get the spike distribution of each time bin + bins_array = np.arange( + time_lims_steps[0], + (time_lims_steps[1] - time_lims_steps[0]) + BIN_SIZE_STEPS // 2, + BIN_SIZE_STEPS, + ).astype(int) + + counts_array_list = [ + np.histogram(spikes_list, bins=bins_array)[0] + for spikes_list in spikes_dict.values() + ] + counts_matrix = np.stack(counts_array_list, axis=1) + counts_matrix = counts_matrix.reshape(counts_matrix.shape + (1,)) -### TODO alwys draw random duration from duration_arr and set I_app of the whole population to the shuffeled I_app_arr -### --> I_app_arr is the size of the population -### --> or just draw from I_app_arr for each neuron... and the population is as large as we want... maybe better -total_duration = 1000 -duration_list = [] -while sum(duration_list) < total_duration: - duration_list.append(np.random.choice(duration_arr)) + return counts_matrix -for duration in duration_list: - pop.I_app = np.random.choice(I_app_arr, size=pop.size) - simulate(duration) -recordings = monitors.get_recordings() +def get_binned_variable(var_array: np.ndarray, BIN_SIZE_STEPS: int): + """ + Bin the given variable array into time bins of the given size. Bins are created on + the first axis of the array. The values of the bins are the average of the original + values in the bin. -### concatenate the recorded arrays of all neurons -I_v = np.concatenate(recordings[0]["pop;I_v"].T) -spike_track = np.concatenate(recordings[0]["pop;spike_track"].T) -I_track = np.concatenate(recordings[0]["pop;I_track"].T) -### spikesw vom ersten neuron dann spiekes vom zweiten + simulierte gesamtzeit dann spikes vom dritten + simulierte gesamtzeit *2 -spike_times = np.concatenate( - [ - dt() * np.array(recordings[0]["pop;spike"][i]) + i * sum(duration_list) - for i in range(pop.size) - ] + Args: + var_array (np.ndarray): + The variable array to bin. First axis is the time axis and should be + divisible by the bin size. Second axis is the number of time serieses. + BIN_SIZE_STEPS (int): + The size of the bins in steps. + + Returns: + np.ndarray: + The binned variable array with shape (n_bins, n_time_serieses, 1). + """ + ### reshape the array to bin the first axis + reshaped = var_array.reshape( + var_array.shape[0] // BIN_SIZE_STEPS, BIN_SIZE_STEPS, var_array.shape[1] + ) + ### get the average of the values in each bin + averages: np.ndarray = np.mean(reshaped, axis=1) + averages = averages.reshape(averages.shape + (1,)) + + return averages + + +BIN_SIZE_STEPS = int(round(5 / dt())) + + +class DistPreSpikes: + + def __init__(self, spikes_dict, time_lims_steps): + + self.spikes_binned = get_binned_spikes( + spikes_dict=spikes_dict, + time_lims_steps=time_lims_steps, + BIN_SIZE_STEPS=BIN_SIZE_STEPS, + ) + self._pdf_dict = {} + + @property + def pdf(self, time_bin=0): + return self._pdf_dict.get(time_bin, self._get_pdf(time_bin)) + + def _get_pdf(self, time_bin=0): + # Create the KDE object + kde = KernelDensity() + + # Fit the data to the KDE + kde.fit(self.spikes_binned[time_bin]) + + # Create points for which to estimate the PDF + x = np.linspace(0, 10, 100).reshape(-1, 1) + + # Estimate the PDF for these points + log_density = kde.score_samples(x) + pdf = np.exp(log_density) + + # spikes can only be positive + pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( + 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) + ) + pdf[x[:, 0] < 0] = 0 + + # store the pdf + self._pdf_dict[time_bin] = (x, pdf) + + return x, pdf + + +class DistPostSpikesAndVoltage: + + def __init__(self, spikes_dict, time_lims_steps, voltage_array): + + self.spikes_post_binned = get_binned_spikes( + spikes_dict=spikes_dict, + time_lims_steps=time_lims_steps, + BIN_SIZE_STEPS=BIN_SIZE_STEPS, + ) + self.voltage_binned = get_binned_variable( + voltage_array, BIN_SIZE_STEPS=BIN_SIZE_STEPS + ) + self._pdf_dict = {} + + @property + def pdf(self, time_bin=0): + return self._pdf_dict.get(time_bin, self._get_pdf(time_bin)) + + def _get_pdf(self, time_bin=0): + # Create the KDE object + kde = KernelDensity() + + # Fit the data to the KDE + kde.fit(self.spikes_binned[time_bin]) + + # Create points for which to estimate the PDF + x = np.linspace(0, 10, 100).reshape(-1, 1) + + # Estimate the PDF for these points + log_density = kde.score_samples(x) + pdf = np.exp(log_density) + + # spikes can only be positive + pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( + 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) + ) + pdf[x[:, 0] < 0] = 0 + + # store the pdf + self._pdf_dict[time_bin] = (x, pdf) + + return x, pdf + + +dist_pre_spikes = DistPreSpikes( + spikes_dict=recordings[0]["pre;spike"], time_lims_steps=(0, 2000) ) -### round spike times to full ms to be compatible with the other recording arrays -spike_times = np.round(spike_times, 0) -spikes_onehot = np.zeros(I_track.size) -spikes_onehot[spike_times.astype(int)] = 1 - - -def create_plot(axs, sliders): - - end_time = int(sliders[0]["slider"].val) - print(end_time) - - ### plot the variables - ### I tracking - axs[0].plot(I_v[end_time - 1000 : end_time], label="I_v") - axs[0].plot(I_track[end_time - 1000 : end_time], label="I_track") - axs[0].set_ylim(-200, 200) - axs[0].set_xlim(0, 1000) - axs[0].legend(loc="upper left") - ### spike tracking - axs[1].plot(spike_track[end_time - 1000 : end_time], label="spike_track") - axs[1].plot(spikes_onehot[end_time - 1000 : end_time], label="spikes") - axs[1].set_ylim(0, 1) - axs[1].set_xlim(0, 1000) - axs[1].legend(loc="upper left") - - -interactive_plot( - nrows=2, - ncols=1, - sliders=[ - {"label": "end time", "valmin": 1000, "valmax": I_track.size, "valinit": 1000}, - ], - create_plot=create_plot, + +DistPostSpikesAndVoltage( + spikes_dict=recordings[0]["post;spike"], + time_lims_steps=(0, 2000), + voltage_array=recordings[0]["post;v"], ) -### TODO: data looks good now train categorization model to predict spikes - - -# max_len = 1000 -# t_list = list(range(-max_len, 0, 1)) -# spike_track_list = [0] * max_len -# I_track_list = [0] * max_len -# I_v_list = [0] * max_len - - -# def track_var(var_list, var_name): -# """ -# Track a variable of the population pop. The current variable value is stored in the -# last element of the var_list. The first element is removed. -# """ -# var_list.append(getattr(pop, var_name)[0]) -# var_list.pop(0) - - -# def create_plot(axs, sliders, **kwargs): - -# ### update the rates variable -# pop.I_app = sliders[0]["slider"].val - -# ### plot the variables -# ### spike tracking -# axs[0].plot(kwargs["t_list"], kwargs["spike_track_list"], label="f") -# axs[0].set_ylim(0, 1) -# axs[0].set_xlim(kwargs["t_list"][0], kwargs["t_list"][-1]) -# ### I tracking -# axs[1].plot(kwargs["t_list"], kwargs["I_track_list"], label="f_0") -# axs[1].plot(kwargs["t_list"], kwargs["I_v_list"], label="f_1") -# axs[1].set_ylim(0, sliders[0]["slider"].val + 20) -# axs[1].set_xlim(kwargs["t_list"][0], kwargs["t_list"][-1]) -# ### legend -# axs[1].legend(loc="upper left") - - -# def update_loop(**kwargs): -# simulate(1.0) -# ### update the variable lists -# track_var(kwargs["spike_track_list"], "spike_track") -# track_var(kwargs["I_track_list"], "I_track") -# track_var(kwargs["I_v_list"], "I_v") -# ### update the time list -# kwargs["t_list"].append(get_time()) -# kwargs["t_list"].pop(0) - - -# interactive_plot( -# nrows=2, -# ncols=1, -# sliders=[ -# {"label": "I_app", "valmin": 0.0, "valmax": 200.0, "valinit": 0.0}, -# ], -# create_plot=lambda axs, sliders: create_plot( -# axs, -# sliders, -# spike_track_list=spike_track_list, -# I_track_list=I_track_list, -# I_v_list=I_v_list, -# t_list=t_list, -# ), -# update_loop=lambda: update_loop( -# spike_track_list=spike_track_list, -# I_track_list=I_track_list, -# I_v_list=I_v_list, -# t_list=t_list, -# ), -# figure_frequency=20, -# update_frequency=100, -# ) +# Plot the results +x, pdf_spikes_pre = dist_pre_spikes.pdf(time_bin=0) +plt.fill_between(x[:, 0], pdf_spikes_pre, alpha=0.5) +plt.xlabel("Value") +plt.ylabel("Density") +plt.title("Kernel Density Estimation") +plt.show() + + +def get_pdf_post(): + # Create the KDE object + kde = KernelDensity() + + time_bin = -1 + # combine the spike and voltage data + data = np.concatenate( + [spikes_post_binned[time_bin], voltage_binned[time_bin]], axis=1 + ) + + print(data) + # Fit the data to the KDE + kde.fit(data) + # Create points for which to estimate the PDF, here, 2D for spike and voltage + # spike between 0 and 10, voltage between -100 and 100 + x = np.mgrid[0:10:100j, -100:100:100j].reshape(2, -1).T + + print(x) + print(x.shape) + + # Estimate the PDF for these points + log_density = kde.score_samples(x) + pdf = np.exp(log_density) + + ### spikes can only be positive + pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( + 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) + ) + pdf[x[:, 0] < 0] = 0 + # print(x[18:22]) + # print(pdf[18:22]) + # print(x[118:122]) + # print(pdf[118:122]) + pdf = pdf.reshape(100, 100) + + print(np.sum(pdf) * (20 / 100) * (200 / 100)) + + return x, pdf + + +# plot the results +x, pdf_post = get_pdf_post() +plt.imshow(pdf_post, aspect="auto", extent=(-100, 100, 0, 10), origin="lower") +plt.xlabel("voltages") +plt.ylabel("spikes") +plt.title("Kernel Density Estimation") +plt.show() + + +# ### discrete because spikes are integers +# x = np.arange(-0.5, 20.5, 1.0) +# kde_discrete = np.histogram(counts_matrix[0], x, density=True)[0] +# kde_discrete2 = np.histogram(counts_matrix[-1], x, density=True)[0] +# plt.subplot(211) +# plt.bar(np.arange(kde_discrete.size).astype(int), kde_discrete, alpha=0.5) +# plt.xlabel("Value") +# plt.ylabel("Density") +# plt.title("Kernel Density Estimation") +# plt.subplot(212) +# plt.bar(np.arange(kde_discrete.size).astype(int), kde_discrete2, alpha=0.5) +# plt.xlabel("Value") +# plt.ylabel("Density") +# plt.show() From 702a7cfc2dc9b8dd117588db0750aadd76d99823 Mon Sep 17 00:00:00 2001 From: olmai Date: Tue, 16 Apr 2024 17:00:19 +0200 Subject: [PATCH 15/39] model_configurator: further developed transformation from pre spikes to post current distribution --- .../examples/model_configurator/test.py | 493 ++++++++++++++---- 1 file changed, 398 insertions(+), 95 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index 69bb7c5..a84e84c 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -18,25 +18,30 @@ import numpy as np from sklearn.neighbors import KernelDensity import matplotlib.pyplot as plt +from scipy.stats import binom +from functools import wraps +import time setup(dt=0.1) neuron_izh = Neuron( parameters=""" - C = 100.0 - k = 0.7 - v_r = -60.0 - v_t = -40.0 - a = 0.03 - b = -2.0 - c = -50.0 - d = 100.0 - v_peak = 35.0 + C = 100.0 : population + k = 0.7 : population + v_r = -60.0 : population + v_t = -40.0 : population + a = 0.03 : population + b = -2.0 : population + c = -50.0 : population + d = 100.0 : population + v_peak = 0.0 : population I_app = 0.0 - E_ampa = 0.0 + E_ampa = 0.0 : population + tau_ampa = 10.0 : population """, equations=""" ### synaptic current + tau_ampa * dg_ampa/dt = -g_ampa I_ampa = -neg(g_ampa*(v - E_ampa)) ### Izhikevich spiking I_v = I_app + I_ampa @@ -53,8 +58,10 @@ pop_pre = Population(100, neuron=PoissonNeuron(rates=10.0), name="pre") pop_post = Population(100, neuron=neuron_izh, name="post") +CONNECTION_PROB = 0.1 +WEIGHTS = 1.0 proj = Projection(pre=pop_pre, post=pop_post, target="ampa", name="proj") -proj.connect_fixed_probability(weights=10, probability=0.1) +proj.connect_fixed_probability(weights=WEIGHTS, probability=CONNECTION_PROB) monitors = CompNeuroMonitors( @@ -66,7 +73,7 @@ monitors.start() simulate(100.0) -pop_pre.rates = 200.0 +pop_pre.rates = 1000.0 simulate(100.0) recordings = monitors.get_recordings() @@ -149,12 +156,22 @@ def get_binned_variable(var_array: np.ndarray, BIN_SIZE_STEPS: int): return averages -BIN_SIZE_STEPS = int(round(5 / dt())) +BIN_SIZE_MS = 5 +BIN_SIZE_STEPS = int(round(BIN_SIZE_MS / dt())) class DistPreSpikes: def __init__(self, spikes_dict, time_lims_steps): + """ + Create a distribution object for the given spike dictionary. + + Args: + spikes_dict (dict): + A dictionary of spike times for each neuron. + time_lims_steps (tuple[int, int]): + The time limits of the spike_dict in steps. + """ self.spikes_binned = get_binned_spikes( spikes_dict=spikes_dict, @@ -163,40 +180,86 @@ def __init__(self, spikes_dict, time_lims_steps): ) self._pdf_dict = {} - @property def pdf(self, time_bin=0): - return self._pdf_dict.get(time_bin, self._get_pdf(time_bin)) + """ + Get the PDF of the spike counts over the population for the given time bin. + + Args: + time_bin (int): + The time bin to get the PDF for. + + Returns: + x (np.ndarray): + The spike count values of the PDF. + pdf (np.ndarray): + The PDF values for the corresponding spike count values. + """ + ret = self._pdf_dict.get(time_bin) + if ret is not None: + return ret - def _get_pdf(self, time_bin=0): # Create the KDE object kde = KernelDensity() - # Fit the data to the KDE kde.fit(self.spikes_binned[time_bin]) - # Create points for which to estimate the PDF - x = np.linspace(0, 10, 100).reshape(-1, 1) - + x = np.linspace(-10, 10, 200).reshape(-1, 1) # Estimate the PDF for these points log_density = kde.score_samples(x) pdf = np.exp(log_density) - # spikes can only be positive pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) ) - pdf[x[:, 0] < 0] = 0 - + pdf = pdf[x[:, 0] >= 0] + x = x[x[:, 0] >= 0] + # spikes are discrete, sum values between 0 and 1, and between 1 and 2, etc. + pdf_discrete = np.zeros(10) + for i in range(10): + pdf_discrete[i] = np.sum(pdf[(x[:, 0] >= i) & (x[:, 0] < i + 1)]) + # to keep the density, divide by 10 (before step size was 10/100, now 10/10) + pdf_discrete = pdf_discrete / 10 + # sum is almost 1, but not exactly, so normalize + pdf_discrete = pdf_discrete / np.sum(pdf_discrete) + x_discrete = np.arange(0, 10) # store the pdf - self._pdf_dict[time_bin] = (x, pdf) - - return x, pdf + ret = (x_discrete, pdf_discrete) + self._pdf_dict[time_bin] = ret + # return the pdf + return ret + + def show_dist(self, time_bin=0): + """ + Show the distribution of the spike counts over the population for the given time + bin. + + Args: + time_bin (int): + The time bin to show the distribution for. + """ + x, pdf = self.pdf(time_bin=time_bin) + plt.bar(x, pdf, alpha=0.5) + plt.xlabel("Spikes") + plt.ylabel("Density") + plt.title("Spikes Distribution") + plt.show() class DistPostSpikesAndVoltage: def __init__(self, spikes_dict, time_lims_steps, voltage_array): - + """ + Create a distribution object for the given spike dictionary and voltage array. + + Args: + spikes_dict (dict): + A dictionary of spike times for each neuron of the post population. + time_lims_steps (tuple[int, int]): + The time limits of the spike_dict in steps. + voltage_array (np.ndarray): + The voltage array of the post population. + """ + ### bin spikes and voltage over time self.spikes_post_binned = get_binned_spikes( spikes_dict=spikes_dict, time_lims_steps=time_lims_steps, @@ -205,104 +268,344 @@ def __init__(self, spikes_dict, time_lims_steps, voltage_array): self.voltage_binned = get_binned_variable( voltage_array, BIN_SIZE_STEPS=BIN_SIZE_STEPS ) + ### initial pdf dict self._pdf_dict = {} - @property def pdf(self, time_bin=0): - return self._pdf_dict.get(time_bin, self._get_pdf(time_bin)) + """ + Get the PDF of the spike counts and voltage values over the population for the + given time bin. + + Args: + time_bin (int): + The time bin to get the PDF for. + + Returns: + x (np.ndarray): + The spike and voltage values of the PDF. Array of shape (100**2, 2). + (:, 0) are the spike values and (:, 1) are the voltage values. + pdf (np.ndarray): + The PDF values for the corresponding spike and voltage value pairs. + """ + ret = self._pdf_dict.get(time_bin) + if ret is not None: + return ret - def _get_pdf(self, time_bin=0): # Create the KDE object kde = KernelDensity() - + # combine the spike and voltage data + data = np.concatenate( + [self.spikes_post_binned[time_bin], self.voltage_binned[time_bin]], axis=1 + ) # Fit the data to the KDE - kde.fit(self.spikes_binned[time_bin]) - - # Create points for which to estimate the PDF - x = np.linspace(0, 10, 100).reshape(-1, 1) - + kde.fit(data) + # Create points for which to estimate the PDF, here, 2D for spike and voltage + # spike between -10 and 10 (will later be changed to 0-10), voltage between + # -100 and 0 + x = np.mgrid[-10:10:200j, -100:0:100j].reshape(2, -1).T # Estimate the PDF for these points log_density = kde.score_samples(x) pdf = np.exp(log_density) - - # spikes can only be positive + ### spikes can only be positive (only use positive side of distribution) pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) ) - pdf[x[:, 0] < 0] = 0 - + pdf = pdf[x[:, 0] >= 0] + x = x[x[:, 0] >= 0] # store the pdf self._pdf_dict[time_bin] = (x, pdf) - + # return the pdf return x, pdf + def show_dist(self, time_bin=0): + """ + Show the distribution of the spike counts and voltage values over the population + for the given time bin. + + Args: + time_bin (int): + The time bin to show the distribution for. + """ + x, pdf = self.pdf(time_bin=time_bin) + pdf = pdf.reshape(100, 100) + extend = [x[:, 1].min(), x[:, 1].max(), x[:, 0].max(), x[:, 0].min()] + plt.imshow(pdf, extent=extend, aspect="auto") + plt.xlabel("Voltage") + plt.ylabel("Spikes") + plt.title("Voltage-Spikes Distribution") + plt.show() + + +class DistSynapses: + def __init__(self, pre_pop_size, connection_probability): + """ + Create a distribution object for the number of synapses of the post population + neurons. + + Args: + pre_pop_size (int): + The size of the pre population. + connection_probability (float): + The probability of connection between the pre and post populations. + """ + number_synapses = binom(pre_pop_size, connection_probability) + self._x = np.arange( + number_synapses.ppf(0.05), number_synapses.ppf(0.95) + 1 + ).astype(int) + self._pdf = number_synapses.pmf(self._x) + ### normalize the pdf to sum to 1 (sum is already almost 1) + self._pdf = self._pdf / np.sum(self._pdf) + + def pdf(self): + """ + Get the PDF of the number of synapses of the post population neurons. + + Returns: + x (np.ndarray): + The synapse count values of the PDF. + pdf (np.ndarray): + The PDF values for the corresponding synapse count values. + """ + return self._x, self._pdf + + def show_dist(self): + """ + Show the distribution of the number of synapses of the post population neurons. + """ + x, pdf = self.pdf() + plt.bar(x, pdf, alpha=0.5) + plt.xlabel("Synapses") + plt.ylabel("Density") + plt.title("Synapses Distribution") + plt.show() + + +class DistIncomingSpikes: + + def __init__(self, dist_pre_spikes: DistPreSpikes, dist_synapses: DistSynapses): + """ + Create a distribution object for the incoming spike counts over the post + population for the given pre spike and synapse distributions. + + Args: + dist_pre_spikes (DistPreSpikes): + The distribution of the pre spike counts. + dist_synapses (DistSynapses): + The distribution of the number of synapses of the post population neurons. + """ + self.dist_pre_spikes = dist_pre_spikes + self.dist_synapses = dist_synapses + self._pdf_dict = {} + + def pdf(self, time_bin=0): + """ + Get the PDF of the incoming spike counts over the post population for the given + time bin. + + Args: + time_bin (int): + The time bin to get the PDF for. + + Returns: + x (np.ndarray): + The incoming spike count values of the PDF. + pdf (np.ndarray): + The PDF values for the corresponding incoming spike count values. + """ + ret = self._pdf_dict.get(time_bin) + if ret is not None: + return ret + + ### get pdfs of pre spikes and synapses + spike_count_arr, pdf_spike_count_arr = self.dist_pre_spikes.pdf( + time_bin=time_bin + ) + synapse_count_arr, pdf_synapse_count_arr = self.dist_synapses.pdf() + ### calculate the incoming spike count array and the corresponding pdf values + incoming_spike_count_arr = np.outer( + spike_count_arr, synapse_count_arr + ).flatten() + pdf_incoming_spike_count_arr = np.outer( + pdf_spike_count_arr, pdf_synapse_count_arr + ).flatten() + ### sort the incoming spike count array (for later combining unique values) + sort_indices = np.argsort(incoming_spike_count_arr) + incoming_spike_count_arr = incoming_spike_count_arr[sort_indices] + pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr[sort_indices] + ### combine unique values of incoming spikes and sum the corresponding pdf values (slice + ### the pdf array at the unique indices and sum the values between the indices) + incoming_spike_count_arr, unique_indices = np.unique( + incoming_spike_count_arr, return_index=True + ) + pdf_incoming_spike_count_arr = np.add.reduceat( + pdf_incoming_spike_count_arr, unique_indices + ) + ### store the pdf + ret = (incoming_spike_count_arr, pdf_incoming_spike_count_arr) + self._pdf_dict[time_bin] = ret + ### return the pdf + return ret + + def show_dist(self, time_bin=0): + """ + Show the distribution of the incoming spike counts over the post population for + the given time bin. + """ + x, pdf = self.pdf(time_bin=time_bin) + plt.bar(x, pdf, alpha=0.5) + plt.xlabel("Incoming Spikes") + plt.ylabel("Density") + plt.title("Incoming Spikes Distribution") + plt.show() + + +class ConductanceCalc: + + def __init__(self, tau, w): + """ + Create a conductance calculator object with the given synaptic decay time + constant and weight. + + Args: + tau (float): + The synaptic decay time constant. + w (float): + The synaptic weight. + """ + self.tau = tau + self.w = w + + def g_mean(self, nbr_spikes: int | np.ndarray, g_init: np.ndarray): + """ + Calculate the mean conductance of the post population neurons for the given number + of incoming spikes and initial (prev) conductances. + + Args: + nbr_spikes (int | np.ndarray): + The number of incoming spikes. + g_init (np.ndarray): + The initial (prev) conductances of the post population neurons. + + Returns: + np.ndarray: + The mean conductance values for the given number of spikes and initial + (prev) conductances. First axis is the number of spikes and second axis + is the initial conductance values. + """ + + # initial (prev) conductance + self.g_init = g_init + # single number of spikes + if isinstance(nbr_spikes, int): + # isi duration in ms if spikes are evenly distributed + self.d = BIN_SIZE_MS / (nbr_spikes + 1) + # mean exp for calculating the mean conductance + self.mean_exp = np.mean(np.exp(-np.linspace(0, self.d, 100) / self.tau)) + # calculate the mean conductance + g_mean_arr = np.zeros((1, g_init.size)) + g_mean_arr[0] = self.mean_exp * np.mean( + np.stack(self._g_mean_recursive(lvl=nbr_spikes)), axis=0 + ) + return g_mean_arr + # multiple number of spikes + else: + g_mean_arr = np.zeros((nbr_spikes.size, g_init.size)) + for lvl_idx, lvl in enumerate(nbr_spikes): + # isi duration in ms if spikes are evenly distributed + self.d = BIN_SIZE_MS / (lvl + 1) + # mean exp for calculating the mean conductance + self.mean_exp = np.mean(np.exp(-np.linspace(0, self.d, 100) / self.tau)) + # calculate the mean conductance + g_mean_arr[lvl_idx] = self.mean_exp * np.mean( + np.stack(self._g_mean_recursive(lvl=lvl)), axis=0 + ) + print(lvl, g_mean_arr[lvl_idx]) + return g_mean_arr + + def _foo(self, lvl): + if lvl == 0: + ret = self.g_init * np.exp(-self.d / self.tau) + self.w + return ret + return self._foo(lvl - 1) * np.exp(-self.d / self.tau) + self.w + + def _g_mean_recursive(self, lvl): + if lvl == 0: + return [self.g_init] + ret_rec = self._g_mean_recursive(lvl - 1) + ret_rec.append(self._foo(lvl - 1)) + return ret_rec + + def show_conductance(self, nbr_spikes: int, g_init: np.ndarray): + """ + Show the conductance of the post population neurons for the given number of spikes + and initial (prev) conductances. + + Args: + nbr_spikes (int): + The number of incoming spikes. + g_init (np.ndarray): + The initial (prev) conductances of the post population neurons. + """ + timestep = 0.0001 + # time over bin + t_arr = np.arange(0, BIN_SIZE_MS, timestep) + # when spikes occur + spike_idx = np.arange( + t_arr.size // (nbr_spikes + 1), + t_arr.size - (t_arr.size // (nbr_spikes + 1)) / 2, + t_arr.size // (nbr_spikes + 1), + ) + # loop over time and calculate conductance + g = g_init + g_list = [] + for t_idx, t in enumerate(t_arr): + g = g - (g / self.tau) * timestep + if t_idx in spike_idx: + g = g + self.w + g_list.append(g) + # plot the conductance + g_mean = np.mean(np.stack(g_list), axis=0) + plt.title(g_mean) + plt.plot(t_arr, g_list) + plt.show() + dist_pre_spikes = DistPreSpikes( spikes_dict=recordings[0]["pre;spike"], time_lims_steps=(0, 2000) ) -DistPostSpikesAndVoltage( +dist_post = DistPostSpikesAndVoltage( spikes_dict=recordings[0]["post;spike"], time_lims_steps=(0, 2000), voltage_array=recordings[0]["post;v"], ) -# Plot the results -x, pdf_spikes_pre = dist_pre_spikes.pdf(time_bin=0) -plt.fill_between(x[:, 0], pdf_spikes_pre, alpha=0.5) -plt.xlabel("Value") -plt.ylabel("Density") -plt.title("Kernel Density Estimation") -plt.show() - - -def get_pdf_post(): - # Create the KDE object - kde = KernelDensity() - - time_bin = -1 - # combine the spike and voltage data - data = np.concatenate( - [spikes_post_binned[time_bin], voltage_binned[time_bin]], axis=1 - ) - - print(data) - # Fit the data to the KDE - kde.fit(data) - # Create points for which to estimate the PDF, here, 2D for spike and voltage - # spike between 0 and 10, voltage between -100 and 100 - x = np.mgrid[0:10:100j, -100:100:100j].reshape(2, -1).T - - print(x) - print(x.shape) +dist_synapses = DistSynapses( + pre_pop_size=pop_pre.size, connection_probability=CONNECTION_PROB +) - # Estimate the PDF for these points - log_density = kde.score_samples(x) - pdf = np.exp(log_density) +dist_incoming_spikes = DistIncomingSpikes( + dist_pre_spikes=dist_pre_spikes, dist_synapses=dist_synapses +) - ### spikes can only be positive - pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( - 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) +# Plot the results +dist_pre_spikes.show_dist(time_bin=10) +dist_pre_spikes.show_dist(time_bin=-1) +dist_post.show_dist(time_bin=10) +dist_post.show_dist(time_bin=-1) +dist_synapses.show_dist() +dist_incoming_spikes.show_dist(time_bin=10) +dist_incoming_spikes.show_dist(time_bin=-1) + + +conductance_calc = ConductanceCalc(tau=pop_post.tau_ampa, w=WEIGHTS) +print( + conductance_calc.g_mean( + nbr_spikes=np.array([5, 10]), g_init=np.array([0, 0.5, 1.0]) ) - pdf[x[:, 0] < 0] = 0 - # print(x[18:22]) - # print(pdf[18:22]) - # print(x[118:122]) - # print(pdf[118:122]) - pdf = pdf.reshape(100, 100) - - print(np.sum(pdf) * (20 / 100) * (200 / 100)) - - return x, pdf - - -# plot the results -x, pdf_post = get_pdf_post() -plt.imshow(pdf_post, aspect="auto", extent=(-100, 100, 0, 10), origin="lower") -plt.xlabel("voltages") -plt.ylabel("spikes") -plt.title("Kernel Density Estimation") -plt.show() +) +conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0])) +conductance_calc.show_conductance(nbr_spikes=10, g_init=np.array([0, 0.5, 1.0])) # ### discrete because spikes are integers From 1166622b69b1faa15b247e1db8e349821941f3d4 Mon Sep 17 00:00:00 2001 From: olimaol Date: Wed, 17 Apr 2024 15:01:50 +0200 Subject: [PATCH 16/39] implemented method to obtain current conductance distribution from previous conductance distribution and incoming spikes --- .../examples/model_configurator/test.py | 326 ++++++++++++++---- 1 file changed, 250 insertions(+), 76 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index a84e84c..66bb8e2 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -21,6 +21,7 @@ from scipy.stats import binom from functools import wraps import time +from collections.abc import Iterable setup(dt=0.1) @@ -199,29 +200,36 @@ def pdf(self, time_bin=0): return ret # Create the KDE object - kde = KernelDensity() + kde = KernelDensity(kernel="linear") + # scale data to have standard deviation of 1 + # if all values are the same return pdf with 1 + if np.std(self.spikes_binned[time_bin]) == 0: + return (np.array([self.spikes_binned[time_bin][0]]), np.array([1])) + else: + scale = 1 / np.std(self.spikes_binned[time_bin]) # Fit the data to the KDE - kde.fit(self.spikes_binned[time_bin]) + kde.fit(scale * self.spikes_binned[time_bin]) # Create points for which to estimate the PDF - x = np.linspace(-10, 10, 200).reshape(-1, 1) - # Estimate the PDF for these points - log_density = kde.score_samples(x) - pdf = np.exp(log_density) # spikes can only be positive - pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( - 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) + pdf_min = 0 + pdf_max = int( + round( + np.max(self.spikes_binned[time_bin]) + + np.std(self.spikes_binned[time_bin]) + ) ) - pdf = pdf[x[:, 0] >= 0] - x = x[x[:, 0] >= 0] + x = np.linspace(pdf_min, pdf_max, 100).reshape(-1, 1) + # Estimate the PDF for these points + log_density = kde.score_samples(scale * x) + pdf = np.exp(log_density) # spikes are discrete, sum values between 0 and 1, and between 1 and 2, etc. - pdf_discrete = np.zeros(10) - for i in range(10): + pdf_discrete_size = int(round(pdf_max - pdf_min)) + pdf_discrete = np.zeros(pdf_discrete_size) + for i in range(pdf_discrete_size): pdf_discrete[i] = np.sum(pdf[(x[:, 0] >= i) & (x[:, 0] < i + 1)]) - # to keep the density, divide by 10 (before step size was 10/100, now 10/10) - pdf_discrete = pdf_discrete / 10 - # sum is almost 1, but not exactly, so normalize + # stepsize is now 1 --> normalize to sum to 1 pdf_discrete = pdf_discrete / np.sum(pdf_discrete) - x_discrete = np.arange(0, 10) + x_discrete = np.arange(pdf_min, pdf_max).astype(int) # store the pdf ret = (x_discrete, pdf_discrete) self._pdf_dict[time_bin] = ret @@ -238,7 +246,7 @@ def show_dist(self, time_bin=0): The time bin to show the distribution for. """ x, pdf = self.pdf(time_bin=time_bin) - plt.bar(x, pdf, alpha=0.5) + plt.bar(x, pdf, alpha=0.5, width=1) plt.xlabel("Spikes") plt.ylabel("Density") plt.title("Spikes Distribution") @@ -292,30 +300,69 @@ def pdf(self, time_bin=0): return ret # Create the KDE object - kde = KernelDensity() + kde = KernelDensity(kernel="linear") + # scale data to have standard deviation of 1 + # if all values are the same, scale is 1 + if np.std(self.spikes_post_binned[time_bin]) == 0: + scale_spikes = 1 + else: + scale_spikes = 1 / np.std(self.spikes_post_binned[time_bin]) + if np.std(self.voltage_binned[time_bin]) == 0: + scale_voltage = 1 + else: + scale_voltage = 1 / np.std(self.voltage_binned[time_bin]) # combine the spike and voltage data - data = np.concatenate( - [self.spikes_post_binned[time_bin], self.voltage_binned[time_bin]], axis=1 + train_data = np.concatenate( + [ + scale_spikes * self.spikes_post_binned[time_bin], + scale_voltage * self.voltage_binned[time_bin], + ], + axis=1, ) # Fit the data to the KDE - kde.fit(data) + kde.fit(train_data) # Create points for which to estimate the PDF, here, 2D for spike and voltage - # spike between -10 and 10 (will later be changed to 0-10), voltage between - # -100 and 0 - x = np.mgrid[-10:10:200j, -100:0:100j].reshape(2, -1).T + # spike between 0 and pdf_spikes_max (depends on data), voltage between -100 and 0 + pdf_spikes_min = 0 + pdf_spikes_max = int( + round( + np.max(self.spikes_post_binned[time_bin]) + + np.std(self.spikes_post_binned[time_bin]) + ) + ) + pdf_spikes_max = max(pdf_spikes_max, 1) + x = np.mgrid[pdf_spikes_min:pdf_spikes_max:100j, -100:0:100j].reshape(2, -1).T # Estimate the PDF for these points - log_density = kde.score_samples(x) + x_estimate = np.copy(x) + x_estimate[:, 0] = scale_spikes * x_estimate[:, 0] + x_estimate[:, 1] = scale_voltage * x_estimate[:, 1] + log_density = kde.score_samples(x_estimate) pdf = np.exp(log_density) - ### spikes can only be positive (only use positive side of distribution) - pdf[x[:, 0] >= 0] = pdf[x[:, 0] >= 0] * ( - 1 + np.sum(pdf[x[:, 0] < 0]) / np.sum(pdf[x[:, 0] >= 0]) - ) - pdf = pdf[x[:, 0] >= 0] - x = x[x[:, 0] >= 0] + # spikes are discrete, sum values between 0 and 1, and between 1 and 2, etc. + pdf = pdf.reshape(100, 100) + x = x.reshape(100, 100, 2) + pdf_discrete_size = int(round(pdf_spikes_max - pdf_spikes_min)) + pdf_discrete = np.zeros((pdf_discrete_size, 100)) + for i in range(pdf_discrete_size): + pdf_discrete[i] = np.sum( + pdf[(x[:, 0, 0] >= i) & (x[:, 0, 0] < i + 1)], axis=0 + ) + x_discrete_spikes = np.arange(pdf_spikes_min, pdf_spikes_max).astype(int) + x_discrete_voltage = np.linspace(-100, 0, 100) + # x_discrete are all combinations of x_discrete_spikes and x_discrete_voltage + x_discrete = np.zeros((pdf_discrete_size * 100, 2)) + for i in range(pdf_discrete_size): + x_discrete[i * 100 : (i + 1) * 100, 0] = x_discrete_spikes[i] + x_discrete[i * 100 : (i + 1) * 100, 1] = x_discrete_voltage + x_discrete = x_discrete.reshape(pdf_discrete_size, 100, 2) + ### normalize so that sum*stepsize = 1 (stepsize of spikes is 1) + stepsize = 1 * (x_discrete[0, 1, 1] - x_discrete[0, 0, 1]) + pdf_discrete = pdf_discrete / np.sum(pdf_discrete) / stepsize # store the pdf - self._pdf_dict[time_bin] = (x, pdf) + ret = (x_discrete, pdf_discrete) + self._pdf_dict[time_bin] = ret # return the pdf - return x, pdf + return ret def show_dist(self, time_bin=0): """ @@ -327,8 +374,7 @@ def show_dist(self, time_bin=0): The time bin to show the distribution for. """ x, pdf = self.pdf(time_bin=time_bin) - pdf = pdf.reshape(100, 100) - extend = [x[:, 1].min(), x[:, 1].max(), x[:, 0].max(), x[:, 0].min()] + extend = [x[0, 0, 1], x[0, -1, 1], x[-1, 0, 0] + 0.5, x[0, 0, 0] - 0.5] plt.imshow(pdf, extent=extend, aspect="auto") plt.xlabel("Voltage") plt.ylabel("Spikes") @@ -353,7 +399,7 @@ def __init__(self, pre_pop_size, connection_probability): number_synapses.ppf(0.05), number_synapses.ppf(0.95) + 1 ).astype(int) self._pdf = number_synapses.pmf(self._x) - ### normalize the pdf to sum to 1 (sum is already almost 1) + ### normalize the pdf to sum to 1 (since stepsize is 1) self._pdf = self._pdf / np.sum(self._pdf) def pdf(self): @@ -373,7 +419,7 @@ def show_dist(self): Show the distribution of the number of synapses of the post population neurons. """ x, pdf = self.pdf() - plt.bar(x, pdf, alpha=0.5) + plt.bar(x, pdf, alpha=0.5, width=1) plt.xlabel("Synapses") plt.ylabel("Density") plt.title("Synapses Distribution") @@ -440,6 +486,11 @@ def pdf(self, time_bin=0): pdf_incoming_spike_count_arr = np.add.reduceat( pdf_incoming_spike_count_arr, unique_indices ) + ### normalize the pdf to sum to 1 (since stepsize is 1) (it is already + ### normalized but maybe rounding errors) + pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr / np.sum( + pdf_incoming_spike_count_arr + ) ### store the pdf ret = (incoming_spike_count_arr, pdf_incoming_spike_count_arr) self._pdf_dict[time_bin] = ret @@ -452,7 +503,7 @@ def show_dist(self, time_bin=0): the given time bin. """ x, pdf = self.pdf(time_bin=time_bin) - plt.bar(x, pdf, alpha=0.5) + plt.bar(x, pdf, alpha=0.5, width=1) plt.xlabel("Incoming Spikes") plt.ylabel("Density") plt.title("Incoming Spikes Distribution") @@ -495,16 +546,16 @@ def g_mean(self, nbr_spikes: int | np.ndarray, g_init: np.ndarray): # initial (prev) conductance self.g_init = g_init - # single number of spikes - if isinstance(nbr_spikes, int): + # single number of spikes (check if nbr_spikes is iterable) + if not isinstance(nbr_spikes, Iterable): # isi duration in ms if spikes are evenly distributed - self.d = BIN_SIZE_MS / (nbr_spikes + 1) + self.d = BIN_SIZE_MS / (int(nbr_spikes) + 1) # mean exp for calculating the mean conductance self.mean_exp = np.mean(np.exp(-np.linspace(0, self.d, 100) / self.tau)) # calculate the mean conductance g_mean_arr = np.zeros((1, g_init.size)) g_mean_arr[0] = self.mean_exp * np.mean( - np.stack(self._g_mean_recursive(lvl=nbr_spikes)), axis=0 + np.stack(self._g_mean_recursive(lvl=int(nbr_spikes))), axis=0 ) return g_mean_arr # multiple number of spikes @@ -512,14 +563,13 @@ def g_mean(self, nbr_spikes: int | np.ndarray, g_init: np.ndarray): g_mean_arr = np.zeros((nbr_spikes.size, g_init.size)) for lvl_idx, lvl in enumerate(nbr_spikes): # isi duration in ms if spikes are evenly distributed - self.d = BIN_SIZE_MS / (lvl + 1) + self.d = BIN_SIZE_MS / (int(lvl) + 1) # mean exp for calculating the mean conductance self.mean_exp = np.mean(np.exp(-np.linspace(0, self.d, 100) / self.tau)) # calculate the mean conductance g_mean_arr[lvl_idx] = self.mean_exp * np.mean( - np.stack(self._g_mean_recursive(lvl=lvl)), axis=0 + np.stack(self._g_mean_recursive(lvl=int(lvl))), axis=0 ) - print(lvl, g_mean_arr[lvl_idx]) return g_mean_arr def _foo(self, lvl): @@ -570,6 +620,140 @@ def show_conductance(self, nbr_spikes: int, g_init: np.ndarray): plt.show() +class DistCurrentConductance: + + def __init__(self, tau, w): + """ + Create a current conductance object with the given synaptic decay time constant + and weight. + + Args: + tau (float): + The synaptic decay time constant. + w (float): + The synaptic weight. + """ + self.conductance_calc = ConductanceCalc(tau=tau, w=w) + + def pdf( + self, + incoming_spikes_count_arr, + pdf_incoming_spikes_count_arr, + prev_g_arr=np.array([0, 10]), + pdf_prev_g_arr=np.array([0.8, 0.2]), + ): + """ + Get the PDF of the current conductances of the post population for the given + incoming spikes distribution and previous conductances distribution. + + Args: + incoming_spikes_count_arr (np.ndarray): + The incoming spike count values. + pdf_incoming_spikes_count_arr (np.ndarray): + The PDF values for the corresponding incoming spike count values. + prev_g_arr (np.ndarray): + The previous conductance values. + pdf_prev_g_arr (np.ndarray): + The PDF values for the corresponding previous conductance values. + + Returns: + x (np.ndarray): + The current conductance values of the PDF. + pdf (np.ndarray): + The PDF values for the corresponding current conductance values. + """ + ### get the pdf by combining the pdfs of the incoming spikes and previous conductances + pdf_current_g_arr = np.outer(pdf_incoming_spikes_count_arr, pdf_prev_g_arr) + print(pdf_current_g_arr) + + current_g_arr = np.empty((incoming_spikes_count_arr.size, prev_g_arr.size)) + for incoming_spikes_count_idx, incoming_spikes_count in enumerate( + incoming_spikes_count_arr + ): + current_g_arr[incoming_spikes_count_idx] = conductance_calc.g_mean( + nbr_spikes=incoming_spikes_count, g_init=prev_g_arr + ) + print(current_g_arr) + + ### use the conductance and corresponding pdf samples to estimate the density + pdf_current_g_arr = pdf_current_g_arr.flatten() + current_g_arr = current_g_arr.flatten() + + # scale the current_g_arr so that the standard deviation is 1 + # if all values are the same, scale is 1 + if np.std(current_g_arr) == 0: + scale = 1 + else: + scale = 1 / np.std(current_g_arr) + # Create the KDE object + kde = KernelDensity(kernel="linear") + # Fit the data to the KDE only use the samples with non-zero pdf + kde.fit( + X=scale * current_g_arr[pdf_current_g_arr > 0].reshape(-1, 1), + sample_weight=pdf_current_g_arr[pdf_current_g_arr > 0], + ) + # Create points for which to estimate the PDF, values can only be between 0 and max + x = np.linspace(0, current_g_arr.max(), 200).reshape(-1, 1) + # Estimate the PDF for these points + log_density = kde.score_samples(x * scale) + pdf = np.exp(log_density) + + print(np.sum(pdf)) + ### normalize so that sum*stepsize = 1 + stepsize = x[1, 0] - x[0, 0] + pdf = pdf / np.sum(pdf) / stepsize + print(np.sum(pdf) * stepsize) + print(stepsize) + ret = (x[:, 0], pdf) + + return ret + + def show_dist( + self, + incoming_spikes_count_arr, + pdf_incoming_spikes_count_arr, + prev_g_arr, + pdf_prev_g_arr, + ): + """ + Show the distribution of the current conductances of the post population for the + given incoming spikes distribution and previous conductances distribution. + """ + + x, pdf = self.pdf( + incoming_spikes_count_arr=incoming_spikes_count_arr, + pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, + prev_g_arr=prev_g_arr, + pdf_prev_g_arr=pdf_prev_g_arr, + ) + + plt.subplot(311) + plt.bar( + incoming_spikes_count_arr, pdf_incoming_spikes_count_arr, alpha=0.5, width=1 + ) + plt.xlabel("Incoming Spikes") + plt.ylabel("Density") + plt.title("Incoming Spikes Distribution") + + plt.subplot(312) + if len(prev_g_arr) > 1: + width = prev_g_arr[1] - prev_g_arr[0] + else: + width = 1 + plt.bar(prev_g_arr, pdf_prev_g_arr, alpha=0.5, width=width) + plt.xlabel("Conductance") + plt.ylabel("Density") + plt.title("Previous Conductance Distribution") + + plt.subplot(313) + plt.bar(x, pdf, alpha=0.5, width=x[1] - x[0]) + plt.xlabel("Conductance") + plt.ylabel("Density") + plt.title("Conductance Distribution") + plt.tight_layout() + plt.show() + + dist_pre_spikes = DistPreSpikes( spikes_dict=recordings[0]["pre;spike"], time_lims_steps=(0, 2000) ) @@ -588,37 +772,27 @@ def show_conductance(self, nbr_spikes: int, g_init: np.ndarray): dist_pre_spikes=dist_pre_spikes, dist_synapses=dist_synapses ) -# Plot the results -dist_pre_spikes.show_dist(time_bin=10) -dist_pre_spikes.show_dist(time_bin=-1) -dist_post.show_dist(time_bin=10) -dist_post.show_dist(time_bin=-1) -dist_synapses.show_dist() -dist_incoming_spikes.show_dist(time_bin=10) -dist_incoming_spikes.show_dist(time_bin=-1) +conductance_calc = ConductanceCalc(tau=pop_post.tau_ampa, w=WEIGHTS) +dist_current_conductance = DistCurrentConductance(tau=pop_post.tau_ampa, w=WEIGHTS) -conductance_calc = ConductanceCalc(tau=pop_post.tau_ampa, w=WEIGHTS) -print( - conductance_calc.g_mean( - nbr_spikes=np.array([5, 10]), g_init=np.array([0, 0.5, 1.0]) +# Plot the results +PLOT_EXAMPLES = True +if PLOT_EXAMPLES: + # dist_pre_spikes.show_dist(time_bin=10) + # dist_pre_spikes.show_dist(time_bin=-1) + # dist_post.show_dist(time_bin=10) + # dist_post.show_dist(time_bin=-1) + # dist_synapses.show_dist() + # dist_incoming_spikes.show_dist(time_bin=10) + # dist_incoming_spikes.show_dist(time_bin=-1) + # conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0, 8.0])) + incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( + time_bin=-1 + ) + dist_current_conductance.show_dist( + incoming_spikes_count_arr=incoming_spikes_count_arr, + pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, + prev_g_arr=np.array([0, 80.0]), + pdf_prev_g_arr=np.array([0.5, 0.5]), ) -) -conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0])) -conductance_calc.show_conductance(nbr_spikes=10, g_init=np.array([0, 0.5, 1.0])) - - -# ### discrete because spikes are integers -# x = np.arange(-0.5, 20.5, 1.0) -# kde_discrete = np.histogram(counts_matrix[0], x, density=True)[0] -# kde_discrete2 = np.histogram(counts_matrix[-1], x, density=True)[0] -# plt.subplot(211) -# plt.bar(np.arange(kde_discrete.size).astype(int), kde_discrete, alpha=0.5) -# plt.xlabel("Value") -# plt.ylabel("Density") -# plt.title("Kernel Density Estimation") -# plt.subplot(212) -# plt.bar(np.arange(kde_discrete.size).astype(int), kde_discrete2, alpha=0.5) -# plt.xlabel("Value") -# plt.ylabel("Density") -# plt.show() From c06cedd5c5730be478b1fa12e3f5e6e192c41fce Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 18 Apr 2024 14:40:40 +0200 Subject: [PATCH 17/39] . --- .../examples/model_configurator/test.py | 255 +++++++++++++++--- 1 file changed, 216 insertions(+), 39 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index 66bb8e2..f1cab4b 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -66,7 +66,7 @@ monitors = CompNeuroMonitors( - mon_dict={"pre": ["spike"], "post": ["v", "spike", "I_ampa"]} + mon_dict={"pre": ["spike"], "post": ["v", "spike", "I_ampa", "g_ampa"]} ) compile() @@ -89,7 +89,7 @@ "position": [1, 3, 4], "compartment": ["pre", "post", "post"], "variable": ["spike", "spike", "v"], - "format": ["hybrid", "hybrid", "line"], + "format": ["hybrid", "hybrid", "line_mean"], }, ) @@ -223,13 +223,13 @@ def pdf(self, time_bin=0): log_density = kde.score_samples(scale * x) pdf = np.exp(log_density) # spikes are discrete, sum values between 0 and 1, and between 1 and 2, etc. - pdf_discrete_size = int(round(pdf_max - pdf_min)) + pdf_discrete_size = int(round(pdf_max - pdf_min) + 1) pdf_discrete = np.zeros(pdf_discrete_size) for i in range(pdf_discrete_size): pdf_discrete[i] = np.sum(pdf[(x[:, 0] >= i) & (x[:, 0] < i + 1)]) # stepsize is now 1 --> normalize to sum to 1 pdf_discrete = pdf_discrete / np.sum(pdf_discrete) - x_discrete = np.arange(pdf_min, pdf_max).astype(int) + x_discrete = np.arange(pdf_min, pdf_max + 0.5).astype(int) # store the pdf ret = (x_discrete, pdf_discrete) self._pdf_dict[time_bin] = ret @@ -341,13 +341,13 @@ def pdf(self, time_bin=0): # spikes are discrete, sum values between 0 and 1, and between 1 and 2, etc. pdf = pdf.reshape(100, 100) x = x.reshape(100, 100, 2) - pdf_discrete_size = int(round(pdf_spikes_max - pdf_spikes_min)) + pdf_discrete_size = int(round(pdf_spikes_max - pdf_spikes_min) + 1) pdf_discrete = np.zeros((pdf_discrete_size, 100)) for i in range(pdf_discrete_size): pdf_discrete[i] = np.sum( pdf[(x[:, 0, 0] >= i) & (x[:, 0, 0] < i + 1)], axis=0 ) - x_discrete_spikes = np.arange(pdf_spikes_min, pdf_spikes_max).astype(int) + x_discrete_spikes = np.arange(pdf_spikes_min, pdf_spikes_max + 0.5).astype(int) x_discrete_voltage = np.linspace(-100, 0, 100) # x_discrete are all combinations of x_discrete_spikes and x_discrete_voltage x_discrete = np.zeros((pdf_discrete_size * 100, 2)) @@ -358,6 +358,12 @@ def pdf(self, time_bin=0): ### normalize so that sum*stepsize = 1 (stepsize of spikes is 1) stepsize = 1 * (x_discrete[0, 1, 1] - x_discrete[0, 0, 1]) pdf_discrete = pdf_discrete / np.sum(pdf_discrete) / stepsize + # get indices of rows and columns where sum of pdf is not 0 + row_indices = np.where(np.sum(pdf_discrete, axis=1) > 0)[0] + col_indices = np.where(np.sum(pdf_discrete, axis=0) > 0)[0] + # get the pdf values and corresponding x values for the non-zero values + pdf_discrete = pdf_discrete[row_indices][:, col_indices] + x_discrete = x_discrete[row_indices][:, col_indices] # store the pdf ret = (x_discrete, pdf_discrete) self._pdf_dict[time_bin] = ret @@ -374,11 +380,92 @@ def show_dist(self, time_bin=0): The time bin to show the distribution for. """ x, pdf = self.pdf(time_bin=time_bin) - extend = [x[0, 0, 1], x[0, -1, 1], x[-1, 0, 0] + 0.5, x[0, 0, 0] - 0.5] - plt.imshow(pdf, extent=extend, aspect="auto") - plt.xlabel("Voltage") - plt.ylabel("Spikes") - plt.title("Voltage-Spikes Distribution") + if x.shape[0] == 1: + plt.bar(x[0, :, 1], pdf[0, :]) + plt.xlabel("Voltage") + plt.ylabel("Density") + plt.title(f"Voltage Distribution, Spikes={x[0, 0, 0]}") + plt.show() + else: + plt.pcolormesh(x[:, :, 0], x[:, :, 1], pdf) + plt.xlabel("Spikes") + plt.ylabel("Voltage") + plt.title("Voltage-Spikes Distribution") + plt.show() + + +class DistPostConductance: + + def __init__(self, conductance_array): + """ + Create a distribution object for the given conductance array. + + Args: + conductance_array (np.ndarray): + The voltage array of the post population. + """ + ### bin conductance over time + self.g_binned = get_binned_variable( + conductance_array, BIN_SIZE_STEPS=BIN_SIZE_STEPS + ) + ### initial pdf dict + self._pdf_dict = {} + + def pdf(self, time_bin=0): + """ + Get the PDF of the conductance values over the population for the given time bin. + + Args: + time_bin (int): + The time bin to get the PDF for. + + Returns: + x (np.ndarray): + The conductance values of the PDF. Array of shape (100,). + pdf (np.ndarray): + The PDF values for the corresponding conductance values. + """ + ret = self._pdf_dict.get(time_bin) + if ret is not None: + return ret + + # Create the KDE object + kde = KernelDensity(kernel="linear") + # scale data to have standard deviation of 1 + # if all values are the same, return this value with pdf 1 + if np.std(self.g_binned[time_bin]) == 0: + return (np.array([self.g_binned[time_bin][0]]), np.array([1])) + else: + scale = 1 / np.std(self.g_binned[time_bin]) + # Fit the data to the KDE + kde.fit(scale * self.g_binned[time_bin].reshape(-1, 1)) + # Create points for which to estimate the PDF, values can only be between 0 and max + pdf_max = self.g_binned[time_bin].max() + np.std(self.g_binned[time_bin]) + x = np.linspace(0, pdf_max, 100).reshape(-1, 1) + # Estimate the PDF for these points + log_density = kde.score_samples(x * scale) + pdf = np.exp(log_density) + ### normalize so that sum*stepsize = 1 + stepsize = x[1, 0] - x[0, 0] + pdf = pdf / np.sum(pdf) / stepsize + ret = (x[:, 0], pdf) + + return ret + + def show_dist(self, time_bin=0): + """ + Show the distribution of the conductances over the population for the given time + bin. + + Args: + time_bin (int): + The time bin to show the distribution for. + """ + x, pdf = self.pdf(time_bin=time_bin) + plt.bar(x, pdf, alpha=0.5, width=x[1] - x[0]) + plt.xlabel("Conductance") + plt.ylabel("Density") + plt.title("Conductance Distribution") plt.show() @@ -538,10 +625,14 @@ def g_mean(self, nbr_spikes: int | np.ndarray, g_init: np.ndarray): The initial (prev) conductances of the post population neurons. Returns: - np.ndarray: + g_mean_arr (np.ndarray): The mean conductance values for the given number of spikes and initial (prev) conductances. First axis is the number of spikes and second axis is the initial conductance values. + g_end_arr (np.ndarray): + The end conductance values for the given number of spikes and initial + (prev) conductances. First axis is the number of spikes and second axis + is the initial conductance values. """ # initial (prev) conductance @@ -554,13 +645,16 @@ def g_mean(self, nbr_spikes: int | np.ndarray, g_init: np.ndarray): self.mean_exp = np.mean(np.exp(-np.linspace(0, self.d, 100) / self.tau)) # calculate the mean conductance g_mean_arr = np.zeros((1, g_init.size)) + g_end_arr = np.zeros((1, g_init.size)) g_mean_arr[0] = self.mean_exp * np.mean( np.stack(self._g_mean_recursive(lvl=int(nbr_spikes))), axis=0 ) - return g_mean_arr + g_end_arr[0] = self._foo(lvl=int(nbr_spikes)) - self.w + return g_mean_arr, g_end_arr # multiple number of spikes else: g_mean_arr = np.zeros((nbr_spikes.size, g_init.size)) + g_end_arr = np.zeros((nbr_spikes.size, g_init.size)) for lvl_idx, lvl in enumerate(nbr_spikes): # isi duration in ms if spikes are evenly distributed self.d = BIN_SIZE_MS / (int(lvl) + 1) @@ -570,7 +664,8 @@ def g_mean(self, nbr_spikes: int | np.ndarray, g_init: np.ndarray): g_mean_arr[lvl_idx] = self.mean_exp * np.mean( np.stack(self._g_mean_recursive(lvl=int(lvl))), axis=0 ) - return g_mean_arr + g_end_arr[lvl_idx] = self._foo(lvl=int(lvl)) - self.w + return g_mean_arr, g_end_arr def _foo(self, lvl): if lvl == 0: @@ -664,25 +759,35 @@ def pdf( """ ### get the pdf by combining the pdfs of the incoming spikes and previous conductances pdf_current_g_arr = np.outer(pdf_incoming_spikes_count_arr, pdf_prev_g_arr) - print(pdf_current_g_arr) current_g_arr = np.empty((incoming_spikes_count_arr.size, prev_g_arr.size)) + current_g_end_arr = np.empty((incoming_spikes_count_arr.size, prev_g_arr.size)) for incoming_spikes_count_idx, incoming_spikes_count in enumerate( incoming_spikes_count_arr ): - current_g_arr[incoming_spikes_count_idx] = conductance_calc.g_mean( + ( + current_g_arr[incoming_spikes_count_idx], + current_g_end_arr[incoming_spikes_count_idx], + ) = conductance_calc.g_mean( nbr_spikes=incoming_spikes_count, g_init=prev_g_arr ) - print(current_g_arr) + + ### get distribution for g_mean and g_end + ret_g_mean = self._get_pdf_of_g_arr(pdf_current_g_arr, current_g_arr) + ret_g_end = self._get_pdf_of_g_arr(pdf_current_g_arr, current_g_end_arr) + + return ret_g_mean, ret_g_end + + def _get_pdf_of_g_arr(self, pdf_current_g_arr, current_g_arr): ### use the conductance and corresponding pdf samples to estimate the density pdf_current_g_arr = pdf_current_g_arr.flatten() current_g_arr = current_g_arr.flatten() # scale the current_g_arr so that the standard deviation is 1 - # if all values are the same, scale is 1 + # if all values are the same, return this value with pdf 1 if np.std(current_g_arr) == 0: - scale = 1 + return (np.array([current_g_arr[0]]), np.array([1])) else: scale = 1 / np.std(current_g_arr) # Create the KDE object @@ -693,17 +798,13 @@ def pdf( sample_weight=pdf_current_g_arr[pdf_current_g_arr > 0], ) # Create points for which to estimate the PDF, values can only be between 0 and max - x = np.linspace(0, current_g_arr.max(), 200).reshape(-1, 1) + x = np.linspace(0, current_g_arr.max(), 100).reshape(-1, 1) # Estimate the PDF for these points log_density = kde.score_samples(x * scale) pdf = np.exp(log_density) - - print(np.sum(pdf)) ### normalize so that sum*stepsize = 1 stepsize = x[1, 0] - x[0, 0] pdf = pdf / np.sum(pdf) / stepsize - print(np.sum(pdf) * stepsize) - print(stepsize) ret = (x[:, 0], pdf) return ret @@ -720,14 +821,14 @@ def show_dist( given incoming spikes distribution and previous conductances distribution. """ - x, pdf = self.pdf( + dist_mean, dist_end = self.pdf( incoming_spikes_count_arr=incoming_spikes_count_arr, pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, prev_g_arr=prev_g_arr, pdf_prev_g_arr=pdf_prev_g_arr, ) - - plt.subplot(311) + x, pdf = dist_mean + plt.subplot(411) plt.bar( incoming_spikes_count_arr, pdf_incoming_spikes_count_arr, alpha=0.5, width=1 ) @@ -735,7 +836,7 @@ def show_dist( plt.ylabel("Density") plt.title("Incoming Spikes Distribution") - plt.subplot(312) + plt.subplot(412) if len(prev_g_arr) > 1: width = prev_g_arr[1] - prev_g_arr[0] else: @@ -745,11 +846,24 @@ def show_dist( plt.ylabel("Density") plt.title("Previous Conductance Distribution") - plt.subplot(313) - plt.bar(x, pdf, alpha=0.5, width=x[1] - x[0]) - plt.xlabel("Conductance") + plt.subplot(413) + plt.bar( + dist_mean[0], + dist_mean[1], + alpha=0.5, + width=dist_mean[0][1] - dist_mean[0][0], + ) + plt.xlabel("Conductance Mean") plt.ylabel("Density") - plt.title("Conductance Distribution") + plt.title("Conductance Mean Distribution") + + plt.subplot(414) + plt.bar( + dist_end[0], dist_end[1], alpha=0.5, width=dist_end[0][1] - dist_end[0][0] + ) + plt.xlabel("Conductance End") + plt.ylabel("Density") + plt.title("Conductance End Distribution") plt.tight_layout() plt.show() @@ -776,16 +890,20 @@ def show_dist( dist_current_conductance = DistCurrentConductance(tau=pop_post.tau_ampa, w=WEIGHTS) +dist_post_conductance = DistPostConductance( + conductance_array=recordings[0]["post;g_ampa"] +) + # Plot the results PLOT_EXAMPLES = True if PLOT_EXAMPLES: - # dist_pre_spikes.show_dist(time_bin=10) - # dist_pre_spikes.show_dist(time_bin=-1) - # dist_post.show_dist(time_bin=10) - # dist_post.show_dist(time_bin=-1) - # dist_synapses.show_dist() - # dist_incoming_spikes.show_dist(time_bin=10) - # dist_incoming_spikes.show_dist(time_bin=-1) + dist_pre_spikes.show_dist(time_bin=10) + dist_pre_spikes.show_dist(time_bin=-1) + dist_post.show_dist(time_bin=10) + dist_post.show_dist(time_bin=-1) + dist_synapses.show_dist() + dist_incoming_spikes.show_dist(time_bin=10) + dist_incoming_spikes.show_dist(time_bin=-1) # conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0, 8.0])) incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( time_bin=-1 @@ -796,3 +914,62 @@ def show_dist( prev_g_arr=np.array([0, 80.0]), pdf_prev_g_arr=np.array([0.5, 0.5]), ) + dist_post_conductance.show_dist(time_bin=10) + dist_post_conductance.show_dist(time_bin=-1) + + +nbr_bins = dist_pre_spikes.spikes_binned.shape[0] +g_end_dist = (np.array([0.0]), np.array([1.0])) +g_arr_list = [] +pdf_g_arr_list = [] +g_model_arr_list = [] +pdf_g_model_arr_list = [] +start = time.time() +for bin in range(nbr_bins): + ### get incoming spikes distribution + incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( + time_bin=bin + ) + ### get current conductance distribution + g_mean_dist, g_end_dist = dist_current_conductance.pdf( + incoming_spikes_count_arr=incoming_spikes_count_arr, + pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, + prev_g_arr=g_end_dist[0], + pdf_prev_g_arr=g_end_dist[1], + ) + ### get current conductance distribution from model + g_model_arr, pdf_g_model_arr = dist_post_conductance.pdf(time_bin=bin) + ### store the conductance distribution + g_arr_list.append(g_mean_dist[0]) + pdf_g_arr_list.append(g_mean_dist[1]) + g_model_arr_list.append(g_model_arr) + pdf_g_model_arr_list.append(pdf_g_model_arr) +print("loop time:", time.time() - start) + +start = time.time() +# Plotting +plt.subplot(211) +for x, y, z in zip(range(len(g_arr_list)), g_arr_list, pdf_g_arr_list): + norm = plt.Normalize(z.min(), z.max()) + cmap = plt.get_cmap("viridis") + colors = cmap(norm(z)) + plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) +plt.ylim(0, 200) +plt.xlabel("Time Bins") +plt.ylabel("Conductance") +plt.title("Conductance Distribution over Time Bins") +plt.subplot(212) +for x, y, z in zip( + range(len(g_model_arr_list)), g_model_arr_list, pdf_g_model_arr_list +): + norm = plt.Normalize(z.min(), z.max()) + cmap = plt.get_cmap("viridis") + colors = cmap(norm(z)) + plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) +plt.ylim(0, 200) +plt.xlabel("Time Bins") +plt.ylabel("Conductance") +plt.title("Model Conductance Distribution over Time Bins") +plt.tight_layout() +print("plot time:", time.time() - start) +plt.show() From 8ddb0845f292984799f16a442fbd97cab87e05fe Mon Sep 17 00:00:00 2001 From: olimaol Date: Thu, 2 May 2024 09:22:13 +0200 Subject: [PATCH 18/39] changed DistPreSpikes pdf calculation to simple histogram (before kde) added DistIncomingSpikesFromModel (to compare with DistIncomingSpikes using the pre spikes and the synapse count pdf) changed how DistIncomingSpikes is calculated (before it was only an outer product, now it's the PDF of the n-th sum of a random variable (here n is the number of synapses and the random variable is the pre spikes) --- .../examples/model_configurator/test.py | 457 +++++++++++++----- 1 file changed, 347 insertions(+), 110 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index f1cab4b..9927667 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -57,7 +57,7 @@ ) pop_pre = Population(100, neuron=PoissonNeuron(rates=10.0), name="pre") -pop_post = Population(100, neuron=neuron_izh, name="post") +pop_post = Population(1000, neuron=neuron_izh, name="post") CONNECTION_PROB = 0.1 WEIGHTS = 1.0 @@ -206,35 +206,122 @@ def pdf(self, time_bin=0): if np.std(self.spikes_binned[time_bin]) == 0: return (np.array([self.spikes_binned[time_bin][0]]), np.array([1])) else: - scale = 1 / np.std(self.spikes_binned[time_bin]) - # Fit the data to the KDE - kde.fit(scale * self.spikes_binned[time_bin]) - # Create points for which to estimate the PDF - # spikes can only be positive - pdf_min = 0 - pdf_max = int( - round( - np.max(self.spikes_binned[time_bin]) - + np.std(self.spikes_binned[time_bin]) + # create histogram from 0 to pdf_max with stepsize 1 + pdf_max = ( + int( + round( + np.max(self.spikes_binned[time_bin]) + + np.std(self.spikes_binned[time_bin]) + ) + ) + + 1 ) + ret = np.histogram( + self.spikes_binned[time_bin], + range=(0, pdf_max), + bins=np.arange(pdf_max + 1), + density=True, + ) + return (ret[1][:-1], ret[0]) + + def show_dist(self, time_bin=0): + """ + Show the distribution of the spike counts over the population for the given time + bin. + + Args: + time_bin (int): + The time bin to show the distribution for. + """ + x, pdf = self.pdf(time_bin=time_bin) + plt.bar(x, pdf, alpha=0.5, width=1) + plt.xlabel("Spikes") + plt.ylabel("Density") + plt.title("Spikes Distribution") + plt.show() + + +class DistIncomingSpikesFromModel: + + def __init__(self, pre_spikes_dict, proj, time_lims_steps): + """ + Create a distribution object for the incoming spikes. + + Args: + pre_spikes_dict (dict): + A dictionary of spike times for each neuron of the pre population. + proj (Projection): + The projection object from the pre to the post population. + time_lims_steps (tuple[int, int]): + The time limits of the pre_spikes_dict in steps. + """ + ### TODO check if calculated incoming spikes distribution (calculated using pre spikes distribution + synapse distribution) is correct + ### obtain this distribution from the model + ### use the spike dictionary of the pre pop and then transform it into incoming spikee dict for the post pop and then create a distribution + ### to transform the spike dict into an incoming spikes dict, wee need to know for each post neuron, to which pre neurons it is connected + + incoming_spikes_dict = {} + ### loop over each post neuron + for post_neuron_idx, post_neuron in enumerate(proj): + incoming_spikes_dict[post_neuron_idx] = [] + if post_neuron is not None: + ### loop over each pre neuron connected to the post neuron + for pre_neuron in post_neuron.pre_ranks: + ### store the spikes emitted by the pre neuron as incoming spikes for the post neuron + incoming_spikes_dict[post_neuron_idx].extend( + pre_spikes_dict[pre_neuron] + ) + ### sort the incoming spikes + incoming_spikes_dict[post_neuron_idx].sort() + + ### bin the incoming spikes + self.incoming_spikes_binned = get_binned_spikes( + spikes_dict=incoming_spikes_dict, + time_lims_steps=time_lims_steps, + BIN_SIZE_STEPS=BIN_SIZE_STEPS, ) - x = np.linspace(pdf_min, pdf_max, 100).reshape(-1, 1) - # Estimate the PDF for these points - log_density = kde.score_samples(scale * x) - pdf = np.exp(log_density) - # spikes are discrete, sum values between 0 and 1, and between 1 and 2, etc. - pdf_discrete_size = int(round(pdf_max - pdf_min) + 1) - pdf_discrete = np.zeros(pdf_discrete_size) - for i in range(pdf_discrete_size): - pdf_discrete[i] = np.sum(pdf[(x[:, 0] >= i) & (x[:, 0] < i + 1)]) - # stepsize is now 1 --> normalize to sum to 1 - pdf_discrete = pdf_discrete / np.sum(pdf_discrete) - x_discrete = np.arange(pdf_min, pdf_max + 0.5).astype(int) - # store the pdf - ret = (x_discrete, pdf_discrete) - self._pdf_dict[time_bin] = ret - # return the pdf - return ret + self._pdf_dict = {} + + def pdf(self, time_bin=0): + """ + Get the PDF of the incoming spike counts over the (post) population for the + given time bin. + + Args: + time_bin (int): + The time bin to get the PDF for. + + Returns: + x (np.ndarray): + The incoming spike count values of the PDF. + pdf (np.ndarray): + The PDF values for the corresponding incoming spike count values. + """ + ret = self._pdf_dict.get(time_bin) + if ret is not None: + return ret + + # if all values are the same return pdf with 1 + if np.std(self.incoming_spikes_binned[time_bin]) == 0: + return (np.array([self.incoming_spikes_binned[time_bin][0]]), np.array([1])) + else: + # create histogram from 0 to pdf_max with stepsize 1 + pdf_max = ( + int( + round( + np.max(self.incoming_spikes_binned[time_bin]) + + np.std(self.incoming_spikes_binned[time_bin]) + ) + ) + + 1 + ) + ret = np.histogram( + self.incoming_spikes_binned[time_bin], + range=(0, pdf_max), + bins=np.arange(pdf_max + 1), + density=True, + ) + return (ret[1][:-1], ret[0]) def show_dist(self, time_bin=0): """ @@ -554,14 +641,24 @@ def pdf(self, time_bin=0): time_bin=time_bin ) synapse_count_arr, pdf_synapse_count_arr = self.dist_synapses.pdf() - ### calculate the incoming spike count array and the corresponding pdf values - incoming_spike_count_arr = np.outer( - spike_count_arr, synapse_count_arr - ).flatten() - pdf_incoming_spike_count_arr = np.outer( - pdf_spike_count_arr, pdf_synapse_count_arr - ).flatten() - ### sort the incoming spike count array (for later combining unique values) + ### for each possible number of synapses, calculate the pdf of the sum of + ### incoming spikes and weight it with the probability of the corresponding + ### number of synapses + incoming_spike_count_list = [] + pdf_incoming_spike_count_list = [] + for synapse_count, pdf_synapse_count in zip( + synapse_count_arr, pdf_synapse_count_arr + ): + incoming_spike_count, pdf_incoming_spike_count = self.pdf_of_sum( + spike_count_arr, pdf_spike_count_arr, n=synapse_count + ) + incoming_spike_count_list.extend(incoming_spike_count) + pdf_incoming_spike_count_list.extend( + pdf_incoming_spike_count * pdf_synapse_count + ) + ### combine the pdf of unique values in incoming_spike_count_list + incoming_spike_count_arr = np.array(incoming_spike_count_list) + pdf_incoming_spike_count_arr = np.array(pdf_incoming_spike_count_list) sort_indices = np.argsort(incoming_spike_count_arr) incoming_spike_count_arr = incoming_spike_count_arr[sort_indices] pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr[sort_indices] @@ -573,8 +670,13 @@ def pdf(self, time_bin=0): pdf_incoming_spike_count_arr = np.add.reduceat( pdf_incoming_spike_count_arr, unique_indices ) + ### sort pdf and values based on pdf + sort_indices = np.argsort(pdf_incoming_spike_count_arr) + incoming_spike_count_arr = incoming_spike_count_arr[sort_indices] + pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr[sort_indices] + ### get cu ### normalize the pdf to sum to 1 (since stepsize is 1) (it is already - ### normalized but maybe rounding errors) + ### normalized but to not accumulate errors, normalize it again) pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr / np.sum( pdf_incoming_spike_count_arr ) @@ -596,6 +698,35 @@ def show_dist(self, time_bin=0): plt.title("Incoming Spikes Distribution") plt.show() + def pdf_of_sum(self, x_values, pdf_values, n): + """ + Calculate the PDF of the n-th sum of a random variable X, where X has the PDF given by + x_values and pdf_values. + + Args: + x_values(np.ndarray): + The values of the random variable X. + pdf_values(np.ndarray): + The PDF values for the corresponding values of the random variable X. + """ + result_x = x_values.copy() + result_pdf = pdf_values.copy() + for _ in range(n - 1): + result_x_new = [] + result_pdf_new = [] + # multiply combinations for pdf_values and add combinations for x_values + result_x_new = np.add.outer(result_x, x_values).flatten() + result_pdf_new = np.outer(result_pdf, pdf_values).flatten() + # sort based on x values + sort_indices = np.argsort(result_x_new) + result_x_new = result_x_new[sort_indices] + result_pdf_new = result_pdf_new[sort_indices] + # sum the pdf_values for each unique value in x_sum + result_x, unique_indices = np.unique(result_x_new, return_index=True) + result_pdf = np.add.reduceat(result_pdf_new, unique_indices) + + return result_x, result_pdf + class ConductanceCalc: @@ -691,6 +822,10 @@ def show_conductance(self, nbr_spikes: int, g_init: np.ndarray): g_init (np.ndarray): The initial (prev) conductances of the post population neurons. """ + # calculate g_mean and g_end and print values + g_mean, g_end = self.g_mean(nbr_spikes=nbr_spikes, g_init=g_init) + print(f"Mean Conductance: {g_mean}\nEnd Conductance: {g_end}") + # generate conductance over time timestep = 0.0001 # time over bin t_arr = np.arange(0, BIN_SIZE_MS, timestep) @@ -710,8 +845,14 @@ def show_conductance(self, nbr_spikes: int, g_init: np.ndarray): g_list.append(g) # plot the conductance g_mean = np.mean(np.stack(g_list), axis=0) - plt.title(g_mean) + g_end = g_list[-1] + for g_value in g_end: + plt.axhline(y=g_value, color="r", linestyle="--") + plt.title(f"Mean Conductance: {g_mean}\nEnd Conductance: {g_end}") plt.plot(t_arr, g_list) + plt.xlabel("Time (ms)") + plt.ylabel("Conductance") + plt.tight_layout() plt.show() @@ -894,82 +1035,178 @@ def show_dist( conductance_array=recordings[0]["post;g_ampa"] ) +dist_incoming_spikes_from_model = DistIncomingSpikesFromModel( + pre_spikes_dict=recordings[0]["pre;spike"], + proj=proj, + time_lims_steps=(0, 2000), +) + # Plot the results PLOT_EXAMPLES = True if PLOT_EXAMPLES: - dist_pre_spikes.show_dist(time_bin=10) - dist_pre_spikes.show_dist(time_bin=-1) - dist_post.show_dist(time_bin=10) - dist_post.show_dist(time_bin=-1) - dist_synapses.show_dist() - dist_incoming_spikes.show_dist(time_bin=10) - dist_incoming_spikes.show_dist(time_bin=-1) - # conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0, 8.0])) - incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( - time_bin=-1 + # dist_pre_spikes.show_dist(time_bin=10) + # dist_pre_spikes.show_dist(time_bin=-1) + # dist_post.show_dist(time_bin=10) + # dist_post.show_dist(time_bin=-1) + # dist_synapses.show_dist() + dist_incoming_start = dist_incoming_spikes.pdf(time_bin=10) + dist_incoming_end = dist_incoming_spikes.pdf(time_bin=-1) + print(dist_incoming_end) + dist_incoming_from_model_start = dist_incoming_spikes_from_model.pdf(time_bin=10) + dist_incoming_from_model_end = dist_incoming_spikes_from_model.pdf(time_bin=-1) + plt.subplot(211) + plt.bar( + dist_incoming_start[0], + dist_incoming_start[1], + alpha=0.5, + width=1, + color="b", + label="calculated", ) - dist_current_conductance.show_dist( - incoming_spikes_count_arr=incoming_spikes_count_arr, - pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, - prev_g_arr=np.array([0, 80.0]), - pdf_prev_g_arr=np.array([0.5, 0.5]), + plt.bar( + dist_incoming_from_model_start[0], + dist_incoming_from_model_start[1], + alpha=0.5, + width=1, + color="r", + label="model", ) - dist_post_conductance.show_dist(time_bin=10) - dist_post_conductance.show_dist(time_bin=-1) - - -nbr_bins = dist_pre_spikes.spikes_binned.shape[0] -g_end_dist = (np.array([0.0]), np.array([1.0])) -g_arr_list = [] -pdf_g_arr_list = [] -g_model_arr_list = [] -pdf_g_model_arr_list = [] -start = time.time() -for bin in range(nbr_bins): - ### get incoming spikes distribution - incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( - time_bin=bin + plt.legend() + plt.xlabel("Incoming Spikes") + plt.ylabel("Density") + plt.title("Incoming Spikes Distribution Start") + plt.subplot(212) + plt.bar( + dist_incoming_end[0], + dist_incoming_end[1], + alpha=0.5, + width=1, + color="b", + label="calculated", ) - ### get current conductance distribution - g_mean_dist, g_end_dist = dist_current_conductance.pdf( - incoming_spikes_count_arr=incoming_spikes_count_arr, - pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, - prev_g_arr=g_end_dist[0], - pdf_prev_g_arr=g_end_dist[1], + plt.bar( + dist_incoming_from_model_end[0], + dist_incoming_from_model_end[1], + alpha=0.5, + width=1, + color="r", + label="model", ) - ### get current conductance distribution from model - g_model_arr, pdf_g_model_arr = dist_post_conductance.pdf(time_bin=bin) - ### store the conductance distribution - g_arr_list.append(g_mean_dist[0]) - pdf_g_arr_list.append(g_mean_dist[1]) - g_model_arr_list.append(g_model_arr) - pdf_g_model_arr_list.append(pdf_g_model_arr) -print("loop time:", time.time() - start) - -start = time.time() -# Plotting -plt.subplot(211) -for x, y, z in zip(range(len(g_arr_list)), g_arr_list, pdf_g_arr_list): - norm = plt.Normalize(z.min(), z.max()) - cmap = plt.get_cmap("viridis") - colors = cmap(norm(z)) - plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) -plt.ylim(0, 200) -plt.xlabel("Time Bins") -plt.ylabel("Conductance") -plt.title("Conductance Distribution over Time Bins") -plt.subplot(212) -for x, y, z in zip( - range(len(g_model_arr_list)), g_model_arr_list, pdf_g_model_arr_list -): - norm = plt.Normalize(z.min(), z.max()) - cmap = plt.get_cmap("viridis") - colors = cmap(norm(z)) - plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) -plt.ylim(0, 200) -plt.xlabel("Time Bins") -plt.ylabel("Conductance") -plt.title("Model Conductance Distribution over Time Bins") -plt.tight_layout() -print("plot time:", time.time() - start) -plt.show() + plt.legend() + plt.xlabel("Incoming Spikes") + plt.ylabel("Density") + plt.title("Incoming Spikes Distribution End") + plt.tight_layout() + plt.show() + # conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0, 8.0])) + # incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( + # time_bin=-1 + # ) + # dist_current_conductance.show_dist( + # incoming_spikes_count_arr=incoming_spikes_count_arr, + # pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, + # prev_g_arr=np.array([0, 180.0]), + # pdf_prev_g_arr=np.array([0.5, 0.5]), + # ) + # dist_post_conductance.show_dist(time_bin=10) + # dist_post_conductance.show_dist(time_bin=-1) + +CONDUCTANCE_LOOP = False +if CONDUCTANCE_LOOP: + nbr_bins = dist_pre_spikes.spikes_binned.shape[0] + g_end_dist = (np.array([0.0]), np.array([1.0])) + g_arr_list = [] + pdf_g_arr_list = [] + g_model_arr_list = [] + pdf_g_model_arr_list = [] + incoming_spikes_count_arr_list, pdf_incoming_spikes_count_arr_list = [], [] + incoming_spikes_count_arr_model_list, pdf_incoming_spikes_count_arr_model_list = ( + [], + [], + ) + start = time.time() + for bin in range(nbr_bins): + ### get incoming spikes distribution from pre spikes and synapse distribution + incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = ( + dist_incoming_spikes.pdf(time_bin=bin) + ) + ### get incoming spikes distribution directly from model + incoming_spikes_count_arr_model, pdf_incoming_spikes_count_arr_model = ( + dist_incoming_spikes_from_model.pdf(time_bin=bin) + ) + ### get current conductance distribution + g_mean_dist, g_end_dist = dist_current_conductance.pdf( + incoming_spikes_count_arr=incoming_spikes_count_arr, + pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, + prev_g_arr=g_end_dist[0], + pdf_prev_g_arr=g_end_dist[1], + ) + ### get current conductance distribution from model + g_model_arr, pdf_g_model_arr = dist_post_conductance.pdf(time_bin=bin) + ### store the conductance distribution + g_arr_list.append(g_mean_dist[0]) + pdf_g_arr_list.append(g_mean_dist[1]) + g_model_arr_list.append(g_model_arr) + pdf_g_model_arr_list.append(pdf_g_model_arr) + ### store the incoming spikes distribution + incoming_spikes_count_arr_list.append(incoming_spikes_count_arr) + pdf_incoming_spikes_count_arr_list.append(pdf_incoming_spikes_count_arr) + incoming_spikes_count_arr_model_list.append(incoming_spikes_count_arr_model) + pdf_incoming_spikes_count_arr_model_list.append( + pdf_incoming_spikes_count_arr_model + ) + print("loop time:", time.time() - start) + + start = time.time() + # Plotting + plt.subplot(411) + for x, y, z in zip(range(len(g_arr_list)), g_arr_list, pdf_g_arr_list): + norm = plt.Normalize(z.min(), z.max()) + cmap = plt.get_cmap("viridis") + colors = cmap(norm(z)) + plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) + plt.ylim(0, 200) + plt.xlabel("Time Bins") + plt.ylabel("Conductance") + plt.title("Conductance Distribution over Time Bins") + plt.subplot(412) + for x, y, z in zip( + range(len(g_model_arr_list)), g_model_arr_list, pdf_g_model_arr_list + ): + norm = plt.Normalize(z.min(), z.max()) + cmap = plt.get_cmap("viridis") + colors = cmap(norm(z)) + plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) + plt.ylim(0, 200) + plt.xlabel("Time Bins") + plt.ylabel("Conductance") + plt.title("Model Conductance Distribution over Time Bins") + plt.subplot(413) + for x, y, z in zip( + range(len(incoming_spikes_count_arr_list)), + incoming_spikes_count_arr_list, + pdf_incoming_spikes_count_arr_list, + ): + norm = plt.Normalize(z.min(), z.max()) + cmap = plt.get_cmap("viridis") + colors = cmap(norm(z)) + plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) + plt.xlabel("Time Bins") + plt.ylabel("Incoming Spikes") + plt.title("Incoming Spikes Distribution over Time Bins") + plt.subplot(414) + for x, y, z in zip( + range(len(incoming_spikes_count_arr_model_list)), + incoming_spikes_count_arr_model_list, + pdf_incoming_spikes_count_arr_model_list, + ): + norm = plt.Normalize(z.min(), z.max()) + cmap = plt.get_cmap("viridis") + colors = cmap(norm(z)) + plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) + plt.xlabel("Time Bins") + plt.ylabel("Incoming Spikes") + plt.title("Model Incoming Spikes Distribution over Time Bins") + plt.tight_layout() + print("plot time:", time.time() - start) + plt.show() From e702e9bbb0a597203329631cb302468c669b7e5a Mon Sep 17 00:00:00 2001 From: olimaol Date: Thu, 23 May 2024 11:53:42 +0200 Subject: [PATCH 19/39] generate_model: added warn argument for create() and compile() --- src/CompNeuroPy/generate_model.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/CompNeuroPy/generate_model.py b/src/CompNeuroPy/generate_model.py index b8e6035..21bc097 100644 --- a/src/CompNeuroPy/generate_model.py +++ b/src/CompNeuroPy/generate_model.py @@ -145,7 +145,7 @@ def _update_attribute_df_weights(self): ) self._attribute_df_compiled = True - def compile(self, compile_folder_name=None): + def compile(self, compile_folder_name=None, warn=True): """ Compiles a created model. @@ -153,6 +153,9 @@ def compile(self, compile_folder_name=None): compile_folder_name (str, optional): Name of the folder in which the model is compiled. Default: value from initialization. + warn (bool, optional): + If True a warning is printed if other models are initialized but not + created (they will not be compiled). Default: True. """ ### check if this model is created if self.created: @@ -160,15 +163,16 @@ def compile(self, compile_folder_name=None): compile_folder_name = self.compile_folder_name ### check if other models were initialized but not created --> warn that they are not compiled - not_created_model_list = self._check_if_models_created() - if len(not_created_model_list) > 0: - print( - "\nWARNING during compile of model " - + self.name - + ": There are initialized models which are not created, thus not compiled! models:\n" - + "\n".join(not_created_model_list) - + "\n" - ) + if warn: + not_created_model_list = self._check_if_models_created() + if len(not_created_model_list) > 0: + print( + "\nWARNING during compile of model " + + self.name + + ": There are initialized models which are not created, thus not compiled! models:\n" + + "\n".join(not_created_model_list) + + "\n" + ) mf.compile_in_folder(compile_folder_name, silent=True) self.compiled = True @@ -183,7 +187,7 @@ def compile(self, compile_folder_name=None): + ": Only compile the model after it has been created!" ) - def create(self, do_compile=True, compile_folder_name=None): + def create(self, do_compile=True, compile_folder_name=None, warn=True): """ Creates a model and optionally compiles it directly. @@ -193,6 +197,9 @@ def create(self, do_compile=True, compile_folder_name=None): compile_folder_name (str, optional): Name of the folder in which the model is compiled. Default: value from initialization. + warn (bool, optional): + If True a warning is printed during compilation if other models are + initialized but not created (they will not be compiled). Default: True. """ if self.created: print("model", self.name, "already created!") @@ -222,7 +229,7 @@ def create(self, do_compile=True, compile_folder_name=None): self._attribute_df = self._get_attribute_df() if do_compile: - self.compile(compile_folder_name) + self.compile(compile_folder_name, warn) def _check_if_models_created(self): """ From b17f1742008eba5bb36bd4fe254c81270758a56c Mon Sep 17 00:00:00 2001 From: olmai Date: Fri, 24 May 2024 14:37:13 +0200 Subject: [PATCH 20/39] fixed sci function --- src/CompNeuroPy/extra_functions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 118d817..f7841ae 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -125,7 +125,7 @@ def suppress_stdout(): def sci(nr): """ Rounds a number to a single decimal. - If number is smaller than 0 it is converted to scientific notation with 1 decimal. + If number is smaller than 1 it is converted to scientific notation with 1 decimal. Args: nr (float or int): @@ -145,10 +145,10 @@ def sci(nr): >>> sci(177.22) '177.2' """ - if af.get_number_of_zero_decimals(nr) == 0: + if nr >= 1: return str(round(nr, 1)) else: - return f"{nr*10**af.get_number_of_zero_decimals(nr):.1f}e-{af.get_number_of_zero_decimals(nr)}" + return f"{nr:.1e}" class Cmap: @@ -2190,6 +2190,7 @@ def create_plot(axs, sliders): interval=(1.0 / figure_frequency) * 1000, repeat=True, ) + self.fig.tight_layout() plt.show() else: ### run update loop until figure is closed From 7a1d7dc352797f401c809196c3296e741c2ae29d Mon Sep 17 00:00:00 2001 From: olmai Date: Tue, 28 May 2024 16:23:38 +0200 Subject: [PATCH 21/39] model_configurator: percentile for distributions can now be used --- .../model_configurator_user.py | 16 +- .../examples/model_configurator/test.py | 184 +++++++++++++----- 2 files changed, 144 insertions(+), 56 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 00fcaa6..ba8c130 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -12,7 +12,9 @@ Izhikevich2003_flexible_noisy_I, ) from CompNeuroPy import generate_model, Monitors, plot_recordings, my_raster_plot -from model_configurator_cnp import model_configurator +from CompNeuroPy.examples.model_configurator.model_configurator_cnp import ( + model_configurator, +) import matplotlib.pyplot as plt import numpy as np @@ -332,12 +334,12 @@ def BGM_part_function(params): ### do a test simulation mon = Monitors( { - "pop;cor_exc": ["spike"], - "pop;cor_inh": ["spike"], - "pop;stn": ["spike", "g_ampa", "g_gaba"], - "pop;gpe": ["spike", "g_ampa", "g_gaba"], - "pop;snr": ["spike", "g_ampa", "g_gaba"], - "pop;thal": ["spike", "g_ampa", "g_gaba"], + "cor_exc": ["spike"], + "cor_inh": ["spike"], + "stn": ["spike", "g_ampa", "g_gaba"], + "gpe": ["spike", "g_ampa", "g_gaba"], + "snr": ["spike", "g_ampa", "g_gaba"], + "thal": ["spike", "g_ampa", "g_gaba"], } ) get_population("cor_exc").rates = target_firing_rate_dict["cor_exc"] diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index 9927667..1ab2cc8 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -22,6 +22,7 @@ from functools import wraps import time from collections.abc import Iterable +from tqdm import tqdm setup(dt=0.1) @@ -73,9 +74,11 @@ monitors.start() +start = time.time() simulate(100.0) pop_pre.rates = 1000.0 simulate(100.0) +print("simulate time:", time.time() - start) recordings = monitors.get_recordings() recording_times = monitors.get_recording_times() @@ -157,6 +160,41 @@ def get_binned_variable(var_array: np.ndarray, BIN_SIZE_STEPS: int): return averages +def get_top_n_percentile(x, pdf, percentile): + """ + Get the top n percentile of the given data. + + Args: + x (np.ndarray): + The data values. + pdf (np.ndarray): + The PDF values for the corresponding data values. + percentile (float): + The percentile to get. + + Returns: + x (np.ndarray): + The data values of the top n percentile. + pdf (np.ndarray): + The PDF values for the corresponding data values of the top n percentile. + """ + ### sort the data based on the pdf values + sort_indices = np.argsort(pdf) + x = x[sort_indices] + pdf = pdf[sort_indices] + ### get the top n percentile + pdf_norm = pdf / np.sum(pdf) + pdf_cumsum = np.cumsum(pdf_norm) + top_n_percentile = np.where(pdf_cumsum > 1 - percentile / 100)[0][0] + x = x[top_n_percentile:] + pdf = pdf[top_n_percentile:] + ### sort the data based on the data values + sort_indices = np.argsort(x) + x = x[sort_indices] + pdf = pdf[sort_indices] + return x, pdf + + BIN_SIZE_MS = 5 BIN_SIZE_STEPS = int(round(BIN_SIZE_MS / dt())) @@ -199,8 +237,6 @@ def pdf(self, time_bin=0): if ret is not None: return ret - # Create the KDE object - kde = KernelDensity(kernel="linear") # scale data to have standard deviation of 1 # if all values are the same return pdf with 1 if np.std(self.spikes_binned[time_bin]) == 0: @@ -321,7 +357,9 @@ def pdf(self, time_bin=0): bins=np.arange(pdf_max + 1), density=True, ) - return (ret[1][:-1], ret[0]) + pdf = ret[0] + spike_values = ret[1][:-1] + return (spike_values[pdf > 0], pdf[pdf > 0]) def show_dist(self, time_bin=0): """ @@ -452,7 +490,7 @@ def pdf(self, time_bin=0): pdf_discrete = pdf_discrete[row_indices][:, col_indices] x_discrete = x_discrete[row_indices][:, col_indices] # store the pdf - ret = (x_discrete, pdf_discrete) + ret = (x_discrete[pdf_discrete > 0], pdf_discrete[pdf_discrete > 0]) self._pdf_dict[time_bin] = ret # return the pdf return ret @@ -535,7 +573,8 @@ def pdf(self, time_bin=0): ### normalize so that sum*stepsize = 1 stepsize = x[1, 0] - x[0, 0] pdf = pdf / np.sum(pdf) / stepsize - ret = (x[:, 0], pdf) + conductance_values = x[:, 0] + ret = (conductance_values[pdf > 0], pdf[pdf > 0]) return ret @@ -586,7 +625,7 @@ def pdf(self): pdf (np.ndarray): The PDF values for the corresponding synapse count values. """ - return self._x, self._pdf + return self._x[self._pdf > 0], self._pdf[self._pdf > 0] def show_dist(self): """ @@ -641,6 +680,13 @@ def pdf(self, time_bin=0): time_bin=time_bin ) synapse_count_arr, pdf_synapse_count_arr = self.dist_synapses.pdf() + ### if using pdfs to combine them and calculate new pdfs --> only use i.e. the top 95% + spike_count_arr, pdf_spike_count_arr = get_top_n_percentile( + spike_count_arr, pdf_spike_count_arr, 95 + ) + synapse_count_arr, pdf_synapse_count_arr = get_top_n_percentile( + synapse_count_arr, pdf_synapse_count_arr, 95 + ) ### for each possible number of synapses, calculate the pdf of the sum of ### incoming spikes and weight it with the probability of the corresponding ### number of synapses @@ -674,14 +720,16 @@ def pdf(self, time_bin=0): sort_indices = np.argsort(pdf_incoming_spike_count_arr) incoming_spike_count_arr = incoming_spike_count_arr[sort_indices] pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr[sort_indices] - ### get cu ### normalize the pdf to sum to 1 (since stepsize is 1) (it is already ### normalized but to not accumulate errors, normalize it again) pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr / np.sum( pdf_incoming_spike_count_arr ) ### store the pdf - ret = (incoming_spike_count_arr, pdf_incoming_spike_count_arr) + ret = ( + incoming_spike_count_arr[pdf_incoming_spike_count_arr > 0], + pdf_incoming_spike_count_arr[pdf_incoming_spike_count_arr > 0], + ) self._pdf_dict[time_bin] = ret ### return the pdf return ret @@ -869,6 +917,7 @@ def __init__(self, tau, w): w (float): The synaptic weight. """ + self.w = w self.conductance_calc = ConductanceCalc(tau=tau, w=w) def pdf( @@ -898,6 +947,13 @@ def pdf( pdf (np.ndarray): The PDF values for the corresponding current conductance values. """ + ### if using pdfs to combine them and calculate new pdfs --> only use i.e. the top 95% + incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = get_top_n_percentile( + incoming_spikes_count_arr, pdf_incoming_spikes_count_arr, 95 + ) + prev_g_arr, pdf_prev_g_arr = get_top_n_percentile( + prev_g_arr, pdf_prev_g_arr, 95 + ) ### get the pdf by combining the pdfs of the incoming spikes and previous conductances pdf_current_g_arr = np.outer(pdf_incoming_spikes_count_arr, pdf_prev_g_arr) @@ -914,41 +970,35 @@ def pdf( ) ### get distribution for g_mean and g_end - ret_g_mean = self._get_pdf_of_g_arr(pdf_current_g_arr, current_g_arr) - ret_g_end = self._get_pdf_of_g_arr(pdf_current_g_arr, current_g_end_arr) + ret_g_mean = self._get_pdf_of_g_arr( + pdf_current_g_arr.flatten(), current_g_arr.flatten() + ) + ret_g_end = self._get_pdf_of_g_arr( + pdf_current_g_arr.flatten(), current_g_end_arr.flatten() + ) return ret_g_mean, ret_g_end def _get_pdf_of_g_arr(self, pdf_current_g_arr, current_g_arr): ### use the conductance and corresponding pdf samples to estimate the density - pdf_current_g_arr = pdf_current_g_arr.flatten() - current_g_arr = current_g_arr.flatten() - - # scale the current_g_arr so that the standard deviation is 1 - # if all values are the same, return this value with pdf 1 + # scale data to have standard deviation of 1 + # if all values are the same return pdf with 1 if np.std(current_g_arr) == 0: return (np.array([current_g_arr[0]]), np.array([1])) else: - scale = 1 / np.std(current_g_arr) - # Create the KDE object - kde = KernelDensity(kernel="linear") - # Fit the data to the KDE only use the samples with non-zero pdf - kde.fit( - X=scale * current_g_arr[pdf_current_g_arr > 0].reshape(-1, 1), - sample_weight=pdf_current_g_arr[pdf_current_g_arr > 0], - ) - # Create points for which to estimate the PDF, values can only be between 0 and max - x = np.linspace(0, current_g_arr.max(), 100).reshape(-1, 1) - # Estimate the PDF for these points - log_density = kde.score_samples(x * scale) - pdf = np.exp(log_density) - ### normalize so that sum*stepsize = 1 - stepsize = x[1, 0] - x[0, 0] - pdf = pdf / np.sum(pdf) / stepsize - ret = (x[:, 0], pdf) - - return ret + # create histogram from 0 to pdf_max with stepsize self.w/100 + pdf_max = np.max(current_g_arr) + ret = np.histogram( + current_g_arr, + weights=pdf_current_g_arr, + range=(0, pdf_max), + bins=np.arange(0, pdf_max + (self.w / 100) / 2, self.w / 100), + density=True, + ) + pdf = ret[0] + g_values = ret[1][:-1] + return (g_values[pdf > 0], pdf[pdf > 0]) def show_dist( self, @@ -968,7 +1018,10 @@ def show_dist( prev_g_arr=prev_g_arr, pdf_prev_g_arr=pdf_prev_g_arr, ) - x, pdf = dist_mean + mask = dist_mean[1] > 0 + x_mean = (dist_mean[0][mask], dist_mean[1][mask]) + mask = dist_end[1] > 0 + x_end = (dist_end[0][mask], dist_end[1][mask]) plt.subplot(411) plt.bar( incoming_spikes_count_arr, pdf_incoming_spikes_count_arr, alpha=0.5, width=1 @@ -989,10 +1042,16 @@ def show_dist( plt.subplot(413) plt.bar( - dist_mean[0], - dist_mean[1], + x_mean[0], + x_mean[1], alpha=0.5, - width=dist_mean[0][1] - dist_mean[0][0], + width=np.min(np.diff(x_mean[0])), + ) + plt.xlim( + np.min(np.concatenate([x_mean[0], x_end[0]])) + - np.min(np.diff(x_mean[0])) / 2, + np.max(np.concatenate([x_mean[0], x_end[0]])) + + np.min(np.diff(x_mean[0])) / 2, ) plt.xlabel("Conductance Mean") plt.ylabel("Density") @@ -1000,7 +1059,18 @@ def show_dist( plt.subplot(414) plt.bar( - dist_end[0], dist_end[1], alpha=0.5, width=dist_end[0][1] - dist_end[0][0] + x_end[0], + x_end[1], + alpha=0.5, + width=np.min(np.diff(x_end[0])), + ) + plt.xlim( + np.min( + np.concatenate([x_mean[0], x_end[0]]) - np.min(np.diff(x_end[0])) / 2 + ), + np.max( + np.concatenate([x_mean[0], x_end[0]]) + np.min(np.diff(x_end[0])) / 2 + ), ) plt.xlabel("Conductance End") plt.ylabel("Density") @@ -1042,18 +1112,21 @@ def show_dist( ) # Plot the results -PLOT_EXAMPLES = True +PLOT_EXAMPLES = False if PLOT_EXAMPLES: + ### dist pre spikes (directly from recordings) # dist_pre_spikes.show_dist(time_bin=10) # dist_pre_spikes.show_dist(time_bin=-1) + ### dist post spikes and voltage (directly from recordings) # dist_post.show_dist(time_bin=10) # dist_post.show_dist(time_bin=-1) + ### dist synapses (calculated from connection probability) # dist_synapses.show_dist() - dist_incoming_start = dist_incoming_spikes.pdf(time_bin=10) - dist_incoming_end = dist_incoming_spikes.pdf(time_bin=-1) - print(dist_incoming_end) - dist_incoming_from_model_start = dist_incoming_spikes_from_model.pdf(time_bin=10) - dist_incoming_from_model_end = dist_incoming_spikes_from_model.pdf(time_bin=-1) + ### dist incoming spikes (calculated from dist pre spikes and dist synapses) + dist_incoming_start = dist_incoming_spikes.pdf(time_bin=5) + dist_incoming_end = dist_incoming_spikes.pdf(time_bin=25) + dist_incoming_from_model_start = dist_incoming_spikes_from_model.pdf(time_bin=5) + dist_incoming_from_model_end = dist_incoming_spikes_from_model.pdf(time_bin=25) plt.subplot(211) plt.bar( dist_incoming_start[0], @@ -1098,7 +1171,18 @@ def show_dist( plt.title("Incoming Spikes Distribution End") plt.tight_layout() plt.show() + ### conductance calc (update conductance values based on incoming spikes) # conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0, 8.0])) + ### dist current conductance (calculated from dist incoming spikes and some example previous conductance) + # incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( + # time_bin=10 + # ) + # dist_current_conductance.show_dist( + # incoming_spikes_count_arr=incoming_spikes_count_arr, + # pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, + # prev_g_arr=np.array([0, 180.0]), + # pdf_prev_g_arr=np.array([0.5, 0.5]), + # ) # incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( # time_bin=-1 # ) @@ -1108,10 +1192,12 @@ def show_dist( # prev_g_arr=np.array([0, 180.0]), # pdf_prev_g_arr=np.array([0.5, 0.5]), # ) + ### dist post conductance (directly from recordings) # dist_post_conductance.show_dist(time_bin=10) # dist_post_conductance.show_dist(time_bin=-1) + pass -CONDUCTANCE_LOOP = False +CONDUCTANCE_LOOP = True if CONDUCTANCE_LOOP: nbr_bins = dist_pre_spikes.spikes_binned.shape[0] g_end_dist = (np.array([0.0]), np.array([1.0])) @@ -1125,8 +1211,8 @@ def show_dist( [], ) start = time.time() - for bin in range(nbr_bins): - ### get incoming spikes distribution from pre spikes and synapse distribution + for bin in tqdm(range(nbr_bins)): + ### calculate incoming spikes distribution from pre spikes and synapse distribution incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = ( dist_incoming_spikes.pdf(time_bin=bin) ) @@ -1134,7 +1220,7 @@ def show_dist( incoming_spikes_count_arr_model, pdf_incoming_spikes_count_arr_model = ( dist_incoming_spikes_from_model.pdf(time_bin=bin) ) - ### get current conductance distribution + ### calculate current conductance distribution g_mean_dist, g_end_dist = dist_current_conductance.pdf( incoming_spikes_count_arr=incoming_spikes_count_arr, pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, From f711458d96556588b2f126fdca7c8bb7c9e15f25 Mon Sep 17 00:00:00 2001 From: olmai Date: Tue, 28 May 2024 16:48:57 +0200 Subject: [PATCH 22/39] can run model_configurator again --- .../model_configurator_cnp.py | 43 ++++++++----------- src/CompNeuroPy/system_functions.py | 29 +++++++++++++ 2 files changed, 48 insertions(+), 24 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 4ba8467..d46531c 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -1,15 +1,15 @@ from CompNeuroPy import ( cnp_clear, compile_in_folder, - find_folder_with_prefix, data_obj, - replace_names_with_dict, + evaluate_expression_with_dict, timing_decorator, print_df, save_variables, load_variables, clear_dir, ) +from CompNeuroPy.system_functions import _find_folder_with_prefix from CompNeuroPy.neuron_models import poisson_neuron from ANNarchy import ( Population, @@ -211,13 +211,13 @@ def get_max_syn(self, cache=True, clear=False): self.syn_contr_dict[pop_name] = {} for target_type in ["ampa", "gaba"]: self.log(f"get synaptic contributions for {pop_name} {target_type}") - self.syn_contr_dict[pop_name][ - target_type - ] = self.get_syn_contr_dict( - pop_name=pop_name, - target_type=target_type, - use_max_weights=True, - normalize=True, + self.syn_contr_dict[pop_name][target_type] = ( + self.get_syn_contr_dict( + pop_name=pop_name, + target_type=target_type, + use_max_weights=True, + normalize=True, + ) ) ### create the synaptic load template dict @@ -350,9 +350,9 @@ def create_single_neuron_networks( ### the network with the voltage clamp version neuron if single_net_v_clamp: - self.net_single_v_clamp_dict[ - pop_name - ] = self.create_net_single_voltage_clamp(pop_name=pop_name) + self.net_single_v_clamp_dict[pop_name] = ( + self.create_net_single_voltage_clamp(pop_name=pop_name) + ) else: ### dummy network for the pop net_single_v_clamp_dummy = Network() @@ -1311,9 +1311,9 @@ def get_interpolation(self): ### with interpolation get the firing rates for all extreme values of I_app, g_ampa, g_gaba for pop_name in self.pop_name_list: - self.extreme_firing_rates_df_dict[ - pop_name - ] = self.get_extreme_firing_rates_df(pop_name) + self.extreme_firing_rates_df_dict[pop_name] = ( + self.get_extreme_firing_rates_df(pop_name) + ) def get_extreme_firing_rates_df(self, pop_name): """ @@ -2204,7 +2204,7 @@ def compile_net_many_parallel(self): ### get the name of the run folder of the network ### search for a folder which starts with run_ ### there should only be 1 --> get run_folder_name as str - run_folder_name = find_folder_with_prefix( + run_folder_name = _find_folder_with_prefix( base_path=f"annarchy_folders/many_net_{net_idx}", prefix="run_" ) run_folder_name = f"/scratch/olmai/Projects/PhD/CompNeuroPy/CompNeuroPy/examples/model_configurator/annarchy_folders/many_net_{net_idx}//{run_folder_name}" @@ -3304,16 +3304,11 @@ def get_voltage_clamp_equations(self, init_arguments_dict, pop_name): attributes_sympy_dict["delta_v"] = Symbol("delta_v") attributes_sympy_dict["right_side"] = Symbol("right_side") - ### now replace the symbolds in the eq_v string with the dictionary items - eq_v_replaced = replace_names_with_dict( - expression=eq_v_one_side, - name_of_dict="attributes_sympy_dict", - dictionary=attributes_sympy_dict, + ### get the sympy equation expression by evaluating the string + eq_sympy = evaluate_expression_with_dict( + expression=eq_v_one_side, value_dict=attributes_sympy_dict ) - ### from this string get the sympy equation expression - eq_sympy = eval(eq_v_replaced) - ### solve the equation to delta_v result = solve(eq_sympy, attributes_sympy_dict["delta_v"], dict=True) if len(result) != 1: diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index b497a4f..ea4a448 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -507,3 +507,32 @@ def create_data_raw_folder( f.write("# CompNeuroPy was installed locally with commit:\n") compneuropy_commit = compneuropy_git_log[0].replace("\n", "") f.write(f"# {compneuropy_commit}") + + +def _find_folder_with_prefix(base_path, prefix): + """ + Find a folder with a specified prefix in the given base path. + + Args: + base_path (str): + Path to the base directory to search in. + prefix (str): + Prefix of the folder to find. + + Returns: + str or None: + Name of the folder with the specified prefix if found, otherwise None. + """ + # List all items (files and directories) in the base_path + items = os.listdir(base_path) + + # Iterate through the items to find a folder with the specified prefix + for item in items: + item_path = os.path.join(base_path, item) + + # Check if the item is a directory and its name starts with the given prefix + if os.path.isdir(item_path) and item.startswith(prefix): + return item + + # If no folder with the specified prefix is found, return None + return None From 63ff90780be96299a61d857b82a0e4fc325c6dce Mon Sep 17 00:00:00 2001 From: olimaol Date: Wed, 29 May 2024 14:54:57 +0200 Subject: [PATCH 23/39] model_configurator: tested model reduction --- .../model_configurator_user.py | 5 +- .../examples/model_configurator/test.py | 1358 ++--------------- .../examples/model_configurator/test2.py | 95 ++ 3 files changed, 254 insertions(+), 1204 deletions(-) create mode 100644 src/CompNeuroPy/examples/model_configurator/test2.py diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index ba8c130..c1611e5 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -257,7 +257,7 @@ def BGM_part_function(params): params["snr__snr.probability"] = 0.6 ### create model which should be configurated - ### if you create or compile has no effect + ### create or compile have no effect setup(dt=0.1) model = generate_model( model_creation_function=BGM_part_function, @@ -279,6 +279,9 @@ def BGM_part_function(params): } do_not_config_list = ["cor_exc", "cor_inh"] + ### TODO for the do not config populations: check if the populations have the + ### given rates, if not, maybe print warning + ### initialize model_configurator model_conf = model_configurator( model, diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index 1ab2cc8..0d45902 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -2,11 +2,16 @@ Neuron, Population, compile, - simulate, get_time, setup, dt, Projection, + Synapse, + Binomial, + get_projection, + get_population, + CurrentInjection, + simulate, ) from CompNeuroPy import ( CompNeuroMonitors, @@ -23,9 +28,11 @@ import time from collections.abc import Iterable from tqdm import tqdm +from math import ceil setup(dt=0.1) + neuron_izh = Neuron( parameters=""" C = 100.0 : population @@ -57,17 +64,114 @@ """, ) -pop_pre = Population(100, neuron=PoissonNeuron(rates=10.0), name="pre") -pop_post = Population(1000, neuron=neuron_izh, name="post") + +neuron_izh_aux = Neuron( + parameters=""" + C = 100.0 : population + k = 0.7 : population + v_r = -60.0 : population + v_t = -40.0 : population + a = 0.03 : population + b = -2.0 : population + c = -50.0 : population + d = 100.0 : population + v_peak = 0.0 : population + I_app = 0.0 + E_ampa = 0.0 : population + tau_ampa = 10.0 : population + """, + equations=""" + ### synaptic current + tau_ampa * dg_ampa/dt = -g_ampa + tau_ampa*g_exc/dt + I_ampa = -neg(g_ampa*(v - E_ampa)) + ### Izhikevich spiking + I_v = I_app + I_ampa + C * dv/dt = k*(v - v_r)*(v - v_t) - u + I_v + du/dt = a*(b*(v - v_r) - u) + """, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + """, +) + +neuron_aux1 = Neuron( + parameters=""" + pre_size = 1 : population + """, + equations=""" + r = g_ampa/pre_size + g_ampa = 0 + """, +) + +neuron_aux2 = Neuron( + parameters=""" + number_synapses = 0 + """, + equations=""" + ### r = incoming spikes + r = Normal(0, 1) * sqrt(number_synapses * sum(spikeprob) * (1 - sum(spikeprob))) + number_synapses * sum(spikeprob) : min=0, max=number_synapses + """, +) + CONNECTION_PROB = 0.1 WEIGHTS = 1.0 +POP_PRE_SIZE = 1000 +POP_POST_SIZE = 100 +POP_REDUCED_SIZE = 100 + +### create not reduced model +### pre +pop_pre = Population(POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pre") +### post +pop_post = Population(POP_POST_SIZE, neuron=neuron_izh, name="post") +### pre to post proj = Projection(pre=pop_pre, post=pop_post, target="ampa", name="proj") proj.connect_fixed_probability(weights=WEIGHTS, probability=CONNECTION_PROB) +### create reduced model +### pre +pop_pre2 = Population(POP_REDUCED_SIZE, neuron=PoissonNeuron(rates=10.0), name="pre2") +### post +pop_post2 = Population(POP_REDUCED_SIZE, neuron=neuron_izh_aux, name="post2") +### aux +pop_aux1 = Population(1, neuron=neuron_aux1, name="aux1") +pop_aux1.pre_size = POP_REDUCED_SIZE +pop_aux2 = Population( + POP_REDUCED_SIZE, + neuron=neuron_aux2, + name="aux2", +) +pop_aux2.number_synapses = Binomial(n=POP_PRE_SIZE, p=CONNECTION_PROB).get_values( + POP_REDUCED_SIZE +) +### pre to aux +proj_pre__aux = Projection( + pre=pop_pre2, post=pop_aux1, target="ampa", name="proj_pre__aux" +) +proj_pre__aux.connect_all_to_all(weights=1) +### aux2 to aux2 +proj_aux__aux = Projection( + pre=pop_aux1, post=pop_aux2, target="spikeprob", name="proj_aux__aux" +) +proj_aux__aux.connect_all_to_all(weights=1) +### aux to post +proj_aux__pre = CurrentInjection(pop_aux2, pop_post2, "exc") +proj_aux__pre.connect_current() + monitors = CompNeuroMonitors( - mon_dict={"pre": ["spike"], "post": ["v", "spike", "I_ampa", "g_ampa"]} + mon_dict={ + "pre": ["spike"], + "post": ["v", "spike", "I_ampa", "g_ampa"], + "pre2": ["spike"], + "post2": ["v", "spike", "I_ampa", "g_ampa", "g_exc"], + "aux1": ["r"], + "aux2": ["r"], + } ) compile() @@ -77,6 +181,7 @@ start = time.time() simulate(100.0) pop_pre.rates = 1000.0 +pop_pre2.rates = 1000.0 simulate(100.0) print("simulate time:", time.time() - start) @@ -87,1212 +192,59 @@ figname="test.png", recordings=recordings, recording_times=recording_times, - shape=(2, 2), + shape=(4, 2), plan={ - "position": [1, 3, 4], - "compartment": ["pre", "post", "post"], - "variable": ["spike", "spike", "v"], - "format": ["hybrid", "hybrid", "line_mean"], + "position": [1, 3, 5, 2, 4, 6, 8], + "compartment": ["pre", "post", "post", "pre2", "post2", "post2", "aux1"], + "variable": ["spike", "spike", "g_ampa", "spike", "spike", "g_ampa", "r"], + "format": [ + "hybrid", + "hybrid", + "line_mean", + "hybrid", + "hybrid", + "line_mean", + "line", + ], }, ) - -def get_binned_spikes( - spikes_dict: dict, time_lims_steps: tuple[int, int], BIN_SIZE_STEPS: int -): - """ - Bin the given spike dictionary into time bins of the given size. - - Args: - spikes_dict (dict): - A dictionary of spike times for each neuron. - time_lims_steps (tuple[int, int]): - The time limits of the spike_dict in steps. - BIN_SIZE_STEPS (int): - The size of the bins in steps. - - Returns: - counts_matrix (np.ndarray): - The binned spike counts for each neuron with shape (n_bins, n_neurons, 1). - """ - ### get the spike distribution of each time bin - bins_array = np.arange( - time_lims_steps[0], - (time_lims_steps[1] - time_lims_steps[0]) + BIN_SIZE_STEPS // 2, - BIN_SIZE_STEPS, - ).astype(int) - - counts_array_list = [ - np.histogram(spikes_list, bins=bins_array)[0] - for spikes_list in spikes_dict.values() - ] - counts_matrix = np.stack(counts_array_list, axis=1) - counts_matrix = counts_matrix.reshape(counts_matrix.shape + (1,)) - - return counts_matrix - - -def get_binned_variable(var_array: np.ndarray, BIN_SIZE_STEPS: int): - """ - Bin the given variable array into time bins of the given size. Bins are created on - the first axis of the array. The values of the bins are the average of the original - values in the bin. - - Args: - var_array (np.ndarray): - The variable array to bin. First axis is the time axis and should be - divisible by the bin size. Second axis is the number of time serieses. - BIN_SIZE_STEPS (int): - The size of the bins in steps. - - Returns: - np.ndarray: - The binned variable array with shape (n_bins, n_time_serieses, 1). - """ - ### reshape the array to bin the first axis - reshaped = var_array.reshape( - var_array.shape[0] // BIN_SIZE_STEPS, BIN_SIZE_STEPS, var_array.shape[1] +### compare incoming spikes, i.e. the r of aux 2 and the incoming spikes of post +### idx: [neuron][nr_spikes] +pre_spikes = recordings[0]["pre;spike"] +incoming_spikes_dict = {} + +### loop over post neuron dendrites (only first post neuron) +for post_dendrite in proj: + incoming_spikes_dict[post_dendrite.post_rank] = [] + if post_dendrite is None: + continue + ### if post neuron has incoming synapses, loop over pre neurons + for pre_neuron in post_dendrite: + incoming_spikes_dict[post_dendrite.post_rank].extend( + pre_spikes[pre_neuron.rank] + ) + ### sort incoming spikes + incoming_spikes_dict[post_dendrite.post_rank] = np.sort( + incoming_spikes_dict[post_dendrite.post_rank] ) - ### get the average of the values in each bin - averages: np.ndarray = np.mean(reshaped, axis=1) - averages = averages.reshape(averages.shape + (1,)) - - return averages - - -def get_top_n_percentile(x, pdf, percentile): - """ - Get the top n percentile of the given data. - - Args: - x (np.ndarray): - The data values. - pdf (np.ndarray): - The PDF values for the corresponding data values. - percentile (float): - The percentile to get. - - Returns: - x (np.ndarray): - The data values of the top n percentile. - pdf (np.ndarray): - The PDF values for the corresponding data values of the top n percentile. - """ - ### sort the data based on the pdf values - sort_indices = np.argsort(pdf) - x = x[sort_indices] - pdf = pdf[sort_indices] - ### get the top n percentile - pdf_norm = pdf / np.sum(pdf) - pdf_cumsum = np.cumsum(pdf_norm) - top_n_percentile = np.where(pdf_cumsum > 1 - percentile / 100)[0][0] - x = x[top_n_percentile:] - pdf = pdf[top_n_percentile:] - ### sort the data based on the data values - sort_indices = np.argsort(x) - x = x[sort_indices] - pdf = pdf[sort_indices] - return x, pdf - - -BIN_SIZE_MS = 5 -BIN_SIZE_STEPS = int(round(BIN_SIZE_MS / dt())) - - -class DistPreSpikes: - - def __init__(self, spikes_dict, time_lims_steps): - """ - Create a distribution object for the given spike dictionary. - - Args: - spikes_dict (dict): - A dictionary of spike times for each neuron. - time_lims_steps (tuple[int, int]): - The time limits of the spike_dict in steps. - """ - - self.spikes_binned = get_binned_spikes( - spikes_dict=spikes_dict, - time_lims_steps=time_lims_steps, - BIN_SIZE_STEPS=BIN_SIZE_STEPS, - ) - self._pdf_dict = {} - - def pdf(self, time_bin=0): - """ - Get the PDF of the spike counts over the population for the given time bin. - - Args: - time_bin (int): - The time bin to get the PDF for. - - Returns: - x (np.ndarray): - The spike count values of the PDF. - pdf (np.ndarray): - The PDF values for the corresponding spike count values. - """ - ret = self._pdf_dict.get(time_bin) - if ret is not None: - return ret - - # scale data to have standard deviation of 1 - # if all values are the same return pdf with 1 - if np.std(self.spikes_binned[time_bin]) == 0: - return (np.array([self.spikes_binned[time_bin][0]]), np.array([1])) - else: - # create histogram from 0 to pdf_max with stepsize 1 - pdf_max = ( - int( - round( - np.max(self.spikes_binned[time_bin]) - + np.std(self.spikes_binned[time_bin]) - ) - ) - + 1 - ) - ret = np.histogram( - self.spikes_binned[time_bin], - range=(0, pdf_max), - bins=np.arange(pdf_max + 1), - density=True, - ) - return (ret[1][:-1], ret[0]) - - def show_dist(self, time_bin=0): - """ - Show the distribution of the spike counts over the population for the given time - bin. - - Args: - time_bin (int): - The time bin to show the distribution for. - """ - x, pdf = self.pdf(time_bin=time_bin) - plt.bar(x, pdf, alpha=0.5, width=1) - plt.xlabel("Spikes") - plt.ylabel("Density") - plt.title("Spikes Distribution") - plt.show() - - -class DistIncomingSpikesFromModel: - - def __init__(self, pre_spikes_dict, proj, time_lims_steps): - """ - Create a distribution object for the incoming spikes. - - Args: - pre_spikes_dict (dict): - A dictionary of spike times for each neuron of the pre population. - proj (Projection): - The projection object from the pre to the post population. - time_lims_steps (tuple[int, int]): - The time limits of the pre_spikes_dict in steps. - """ - ### TODO check if calculated incoming spikes distribution (calculated using pre spikes distribution + synapse distribution) is correct - ### obtain this distribution from the model - ### use the spike dictionary of the pre pop and then transform it into incoming spikee dict for the post pop and then create a distribution - ### to transform the spike dict into an incoming spikes dict, wee need to know for each post neuron, to which pre neurons it is connected - - incoming_spikes_dict = {} - ### loop over each post neuron - for post_neuron_idx, post_neuron in enumerate(proj): - incoming_spikes_dict[post_neuron_idx] = [] - if post_neuron is not None: - ### loop over each pre neuron connected to the post neuron - for pre_neuron in post_neuron.pre_ranks: - ### store the spikes emitted by the pre neuron as incoming spikes for the post neuron - incoming_spikes_dict[post_neuron_idx].extend( - pre_spikes_dict[pre_neuron] - ) - ### sort the incoming spikes - incoming_spikes_dict[post_neuron_idx].sort() - - ### bin the incoming spikes - self.incoming_spikes_binned = get_binned_spikes( - spikes_dict=incoming_spikes_dict, - time_lims_steps=time_lims_steps, - BIN_SIZE_STEPS=BIN_SIZE_STEPS, - ) - self._pdf_dict = {} - - def pdf(self, time_bin=0): - """ - Get the PDF of the incoming spike counts over the (post) population for the - given time bin. - - Args: - time_bin (int): - The time bin to get the PDF for. - - Returns: - x (np.ndarray): - The incoming spike count values of the PDF. - pdf (np.ndarray): - The PDF values for the corresponding incoming spike count values. - """ - ret = self._pdf_dict.get(time_bin) - if ret is not None: - return ret - - # if all values are the same return pdf with 1 - if np.std(self.incoming_spikes_binned[time_bin]) == 0: - return (np.array([self.incoming_spikes_binned[time_bin][0]]), np.array([1])) - else: - # create histogram from 0 to pdf_max with stepsize 1 - pdf_max = ( - int( - round( - np.max(self.incoming_spikes_binned[time_bin]) - + np.std(self.incoming_spikes_binned[time_bin]) - ) - ) - + 1 - ) - ret = np.histogram( - self.incoming_spikes_binned[time_bin], - range=(0, pdf_max), - bins=np.arange(pdf_max + 1), - density=True, - ) - pdf = ret[0] - spike_values = ret[1][:-1] - return (spike_values[pdf > 0], pdf[pdf > 0]) - - def show_dist(self, time_bin=0): - """ - Show the distribution of the spike counts over the population for the given time - bin. - - Args: - time_bin (int): - The time bin to show the distribution for. - """ - x, pdf = self.pdf(time_bin=time_bin) - plt.bar(x, pdf, alpha=0.5, width=1) - plt.xlabel("Spikes") - plt.ylabel("Density") - plt.title("Spikes Distribution") - plt.show() - - -class DistPostSpikesAndVoltage: - - def __init__(self, spikes_dict, time_lims_steps, voltage_array): - """ - Create a distribution object for the given spike dictionary and voltage array. - - Args: - spikes_dict (dict): - A dictionary of spike times for each neuron of the post population. - time_lims_steps (tuple[int, int]): - The time limits of the spike_dict in steps. - voltage_array (np.ndarray): - The voltage array of the post population. - """ - ### bin spikes and voltage over time - self.spikes_post_binned = get_binned_spikes( - spikes_dict=spikes_dict, - time_lims_steps=time_lims_steps, - BIN_SIZE_STEPS=BIN_SIZE_STEPS, - ) - self.voltage_binned = get_binned_variable( - voltage_array, BIN_SIZE_STEPS=BIN_SIZE_STEPS - ) - ### initial pdf dict - self._pdf_dict = {} - - def pdf(self, time_bin=0): - """ - Get the PDF of the spike counts and voltage values over the population for the - given time bin. - - Args: - time_bin (int): - The time bin to get the PDF for. - - Returns: - x (np.ndarray): - The spike and voltage values of the PDF. Array of shape (100**2, 2). - (:, 0) are the spike values and (:, 1) are the voltage values. - pdf (np.ndarray): - The PDF values for the corresponding spike and voltage value pairs. - """ - ret = self._pdf_dict.get(time_bin) - if ret is not None: - return ret - - # Create the KDE object - kde = KernelDensity(kernel="linear") - # scale data to have standard deviation of 1 - # if all values are the same, scale is 1 - if np.std(self.spikes_post_binned[time_bin]) == 0: - scale_spikes = 1 - else: - scale_spikes = 1 / np.std(self.spikes_post_binned[time_bin]) - if np.std(self.voltage_binned[time_bin]) == 0: - scale_voltage = 1 - else: - scale_voltage = 1 / np.std(self.voltage_binned[time_bin]) - # combine the spike and voltage data - train_data = np.concatenate( - [ - scale_spikes * self.spikes_post_binned[time_bin], - scale_voltage * self.voltage_binned[time_bin], - ], - axis=1, - ) - # Fit the data to the KDE - kde.fit(train_data) - # Create points for which to estimate the PDF, here, 2D for spike and voltage - # spike between 0 and pdf_spikes_max (depends on data), voltage between -100 and 0 - pdf_spikes_min = 0 - pdf_spikes_max = int( - round( - np.max(self.spikes_post_binned[time_bin]) - + np.std(self.spikes_post_binned[time_bin]) - ) - ) - pdf_spikes_max = max(pdf_spikes_max, 1) - x = np.mgrid[pdf_spikes_min:pdf_spikes_max:100j, -100:0:100j].reshape(2, -1).T - # Estimate the PDF for these points - x_estimate = np.copy(x) - x_estimate[:, 0] = scale_spikes * x_estimate[:, 0] - x_estimate[:, 1] = scale_voltage * x_estimate[:, 1] - log_density = kde.score_samples(x_estimate) - pdf = np.exp(log_density) - # spikes are discrete, sum values between 0 and 1, and between 1 and 2, etc. - pdf = pdf.reshape(100, 100) - x = x.reshape(100, 100, 2) - pdf_discrete_size = int(round(pdf_spikes_max - pdf_spikes_min) + 1) - pdf_discrete = np.zeros((pdf_discrete_size, 100)) - for i in range(pdf_discrete_size): - pdf_discrete[i] = np.sum( - pdf[(x[:, 0, 0] >= i) & (x[:, 0, 0] < i + 1)], axis=0 - ) - x_discrete_spikes = np.arange(pdf_spikes_min, pdf_spikes_max + 0.5).astype(int) - x_discrete_voltage = np.linspace(-100, 0, 100) - # x_discrete are all combinations of x_discrete_spikes and x_discrete_voltage - x_discrete = np.zeros((pdf_discrete_size * 100, 2)) - for i in range(pdf_discrete_size): - x_discrete[i * 100 : (i + 1) * 100, 0] = x_discrete_spikes[i] - x_discrete[i * 100 : (i + 1) * 100, 1] = x_discrete_voltage - x_discrete = x_discrete.reshape(pdf_discrete_size, 100, 2) - ### normalize so that sum*stepsize = 1 (stepsize of spikes is 1) - stepsize = 1 * (x_discrete[0, 1, 1] - x_discrete[0, 0, 1]) - pdf_discrete = pdf_discrete / np.sum(pdf_discrete) / stepsize - # get indices of rows and columns where sum of pdf is not 0 - row_indices = np.where(np.sum(pdf_discrete, axis=1) > 0)[0] - col_indices = np.where(np.sum(pdf_discrete, axis=0) > 0)[0] - # get the pdf values and corresponding x values for the non-zero values - pdf_discrete = pdf_discrete[row_indices][:, col_indices] - x_discrete = x_discrete[row_indices][:, col_indices] - # store the pdf - ret = (x_discrete[pdf_discrete > 0], pdf_discrete[pdf_discrete > 0]) - self._pdf_dict[time_bin] = ret - # return the pdf - return ret - - def show_dist(self, time_bin=0): - """ - Show the distribution of the spike counts and voltage values over the population - for the given time bin. - - Args: - time_bin (int): - The time bin to show the distribution for. - """ - x, pdf = self.pdf(time_bin=time_bin) - if x.shape[0] == 1: - plt.bar(x[0, :, 1], pdf[0, :]) - plt.xlabel("Voltage") - plt.ylabel("Density") - plt.title(f"Voltage Distribution, Spikes={x[0, 0, 0]}") - plt.show() - else: - plt.pcolormesh(x[:, :, 0], x[:, :, 1], pdf) - plt.xlabel("Spikes") - plt.ylabel("Voltage") - plt.title("Voltage-Spikes Distribution") - plt.show() - - -class DistPostConductance: - - def __init__(self, conductance_array): - """ - Create a distribution object for the given conductance array. - - Args: - conductance_array (np.ndarray): - The voltage array of the post population. - """ - ### bin conductance over time - self.g_binned = get_binned_variable( - conductance_array, BIN_SIZE_STEPS=BIN_SIZE_STEPS - ) - ### initial pdf dict - self._pdf_dict = {} - - def pdf(self, time_bin=0): - """ - Get the PDF of the conductance values over the population for the given time bin. - - Args: - time_bin (int): - The time bin to get the PDF for. - - Returns: - x (np.ndarray): - The conductance values of the PDF. Array of shape (100,). - pdf (np.ndarray): - The PDF values for the corresponding conductance values. - """ - ret = self._pdf_dict.get(time_bin) - if ret is not None: - return ret - - # Create the KDE object - kde = KernelDensity(kernel="linear") - # scale data to have standard deviation of 1 - # if all values are the same, return this value with pdf 1 - if np.std(self.g_binned[time_bin]) == 0: - return (np.array([self.g_binned[time_bin][0]]), np.array([1])) - else: - scale = 1 / np.std(self.g_binned[time_bin]) - # Fit the data to the KDE - kde.fit(scale * self.g_binned[time_bin].reshape(-1, 1)) - # Create points for which to estimate the PDF, values can only be between 0 and max - pdf_max = self.g_binned[time_bin].max() + np.std(self.g_binned[time_bin]) - x = np.linspace(0, pdf_max, 100).reshape(-1, 1) - # Estimate the PDF for these points - log_density = kde.score_samples(x * scale) - pdf = np.exp(log_density) - ### normalize so that sum*stepsize = 1 - stepsize = x[1, 0] - x[0, 0] - pdf = pdf / np.sum(pdf) / stepsize - conductance_values = x[:, 0] - ret = (conductance_values[pdf > 0], pdf[pdf > 0]) - - return ret - - def show_dist(self, time_bin=0): - """ - Show the distribution of the conductances over the population for the given time - bin. - - Args: - time_bin (int): - The time bin to show the distribution for. - """ - x, pdf = self.pdf(time_bin=time_bin) - plt.bar(x, pdf, alpha=0.5, width=x[1] - x[0]) - plt.xlabel("Conductance") - plt.ylabel("Density") - plt.title("Conductance Distribution") - plt.show() - - -class DistSynapses: - def __init__(self, pre_pop_size, connection_probability): - """ - Create a distribution object for the number of synapses of the post population - neurons. - - Args: - pre_pop_size (int): - The size of the pre population. - connection_probability (float): - The probability of connection between the pre and post populations. - """ - number_synapses = binom(pre_pop_size, connection_probability) - self._x = np.arange( - number_synapses.ppf(0.05), number_synapses.ppf(0.95) + 1 - ).astype(int) - self._pdf = number_synapses.pmf(self._x) - ### normalize the pdf to sum to 1 (since stepsize is 1) - self._pdf = self._pdf / np.sum(self._pdf) - - def pdf(self): - """ - Get the PDF of the number of synapses of the post population neurons. - - Returns: - x (np.ndarray): - The synapse count values of the PDF. - pdf (np.ndarray): - The PDF values for the corresponding synapse count values. - """ - return self._x[self._pdf > 0], self._pdf[self._pdf > 0] - - def show_dist(self): - """ - Show the distribution of the number of synapses of the post population neurons. - """ - x, pdf = self.pdf() - plt.bar(x, pdf, alpha=0.5, width=1) - plt.xlabel("Synapses") - plt.ylabel("Density") - plt.title("Synapses Distribution") - plt.show() - - -class DistIncomingSpikes: - - def __init__(self, dist_pre_spikes: DistPreSpikes, dist_synapses: DistSynapses): - """ - Create a distribution object for the incoming spike counts over the post - population for the given pre spike and synapse distributions. - - Args: - dist_pre_spikes (DistPreSpikes): - The distribution of the pre spike counts. - dist_synapses (DistSynapses): - The distribution of the number of synapses of the post population neurons. - """ - self.dist_pre_spikes = dist_pre_spikes - self.dist_synapses = dist_synapses - self._pdf_dict = {} - - def pdf(self, time_bin=0): - """ - Get the PDF of the incoming spike counts over the post population for the given - time bin. - - Args: - time_bin (int): - The time bin to get the PDF for. - - Returns: - x (np.ndarray): - The incoming spike count values of the PDF. - pdf (np.ndarray): - The PDF values for the corresponding incoming spike count values. - """ - ret = self._pdf_dict.get(time_bin) - if ret is not None: - return ret - - ### get pdfs of pre spikes and synapses - spike_count_arr, pdf_spike_count_arr = self.dist_pre_spikes.pdf( - time_bin=time_bin - ) - synapse_count_arr, pdf_synapse_count_arr = self.dist_synapses.pdf() - ### if using pdfs to combine them and calculate new pdfs --> only use i.e. the top 95% - spike_count_arr, pdf_spike_count_arr = get_top_n_percentile( - spike_count_arr, pdf_spike_count_arr, 95 - ) - synapse_count_arr, pdf_synapse_count_arr = get_top_n_percentile( - synapse_count_arr, pdf_synapse_count_arr, 95 - ) - ### for each possible number of synapses, calculate the pdf of the sum of - ### incoming spikes and weight it with the probability of the corresponding - ### number of synapses - incoming_spike_count_list = [] - pdf_incoming_spike_count_list = [] - for synapse_count, pdf_synapse_count in zip( - synapse_count_arr, pdf_synapse_count_arr - ): - incoming_spike_count, pdf_incoming_spike_count = self.pdf_of_sum( - spike_count_arr, pdf_spike_count_arr, n=synapse_count - ) - incoming_spike_count_list.extend(incoming_spike_count) - pdf_incoming_spike_count_list.extend( - pdf_incoming_spike_count * pdf_synapse_count - ) - ### combine the pdf of unique values in incoming_spike_count_list - incoming_spike_count_arr = np.array(incoming_spike_count_list) - pdf_incoming_spike_count_arr = np.array(pdf_incoming_spike_count_list) - sort_indices = np.argsort(incoming_spike_count_arr) - incoming_spike_count_arr = incoming_spike_count_arr[sort_indices] - pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr[sort_indices] - ### combine unique values of incoming spikes and sum the corresponding pdf values (slice - ### the pdf array at the unique indices and sum the values between the indices) - incoming_spike_count_arr, unique_indices = np.unique( - incoming_spike_count_arr, return_index=True - ) - pdf_incoming_spike_count_arr = np.add.reduceat( - pdf_incoming_spike_count_arr, unique_indices - ) - ### sort pdf and values based on pdf - sort_indices = np.argsort(pdf_incoming_spike_count_arr) - incoming_spike_count_arr = incoming_spike_count_arr[sort_indices] - pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr[sort_indices] - ### normalize the pdf to sum to 1 (since stepsize is 1) (it is already - ### normalized but to not accumulate errors, normalize it again) - pdf_incoming_spike_count_arr = pdf_incoming_spike_count_arr / np.sum( - pdf_incoming_spike_count_arr - ) - ### store the pdf - ret = ( - incoming_spike_count_arr[pdf_incoming_spike_count_arr > 0], - pdf_incoming_spike_count_arr[pdf_incoming_spike_count_arr > 0], - ) - self._pdf_dict[time_bin] = ret - ### return the pdf - return ret - - def show_dist(self, time_bin=0): - """ - Show the distribution of the incoming spike counts over the post population for - the given time bin. - """ - x, pdf = self.pdf(time_bin=time_bin) - plt.bar(x, pdf, alpha=0.5, width=1) - plt.xlabel("Incoming Spikes") - plt.ylabel("Density") - plt.title("Incoming Spikes Distribution") - plt.show() - - def pdf_of_sum(self, x_values, pdf_values, n): - """ - Calculate the PDF of the n-th sum of a random variable X, where X has the PDF given by - x_values and pdf_values. - - Args: - x_values(np.ndarray): - The values of the random variable X. - pdf_values(np.ndarray): - The PDF values for the corresponding values of the random variable X. - """ - result_x = x_values.copy() - result_pdf = pdf_values.copy() - for _ in range(n - 1): - result_x_new = [] - result_pdf_new = [] - # multiply combinations for pdf_values and add combinations for x_values - result_x_new = np.add.outer(result_x, x_values).flatten() - result_pdf_new = np.outer(result_pdf, pdf_values).flatten() - # sort based on x values - sort_indices = np.argsort(result_x_new) - result_x_new = result_x_new[sort_indices] - result_pdf_new = result_pdf_new[sort_indices] - # sum the pdf_values for each unique value in x_sum - result_x, unique_indices = np.unique(result_x_new, return_index=True) - result_pdf = np.add.reduceat(result_pdf_new, unique_indices) - - return result_x, result_pdf - - -class ConductanceCalc: - - def __init__(self, tau, w): - """ - Create a conductance calculator object with the given synaptic decay time - constant and weight. - - Args: - tau (float): - The synaptic decay time constant. - w (float): - The synaptic weight. - """ - self.tau = tau - self.w = w - - def g_mean(self, nbr_spikes: int | np.ndarray, g_init: np.ndarray): - """ - Calculate the mean conductance of the post population neurons for the given number - of incoming spikes and initial (prev) conductances. - - Args: - nbr_spikes (int | np.ndarray): - The number of incoming spikes. - g_init (np.ndarray): - The initial (prev) conductances of the post population neurons. - - Returns: - g_mean_arr (np.ndarray): - The mean conductance values for the given number of spikes and initial - (prev) conductances. First axis is the number of spikes and second axis - is the initial conductance values. - g_end_arr (np.ndarray): - The end conductance values for the given number of spikes and initial - (prev) conductances. First axis is the number of spikes and second axis - is the initial conductance values. - """ - - # initial (prev) conductance - self.g_init = g_init - # single number of spikes (check if nbr_spikes is iterable) - if not isinstance(nbr_spikes, Iterable): - # isi duration in ms if spikes are evenly distributed - self.d = BIN_SIZE_MS / (int(nbr_spikes) + 1) - # mean exp for calculating the mean conductance - self.mean_exp = np.mean(np.exp(-np.linspace(0, self.d, 100) / self.tau)) - # calculate the mean conductance - g_mean_arr = np.zeros((1, g_init.size)) - g_end_arr = np.zeros((1, g_init.size)) - g_mean_arr[0] = self.mean_exp * np.mean( - np.stack(self._g_mean_recursive(lvl=int(nbr_spikes))), axis=0 - ) - g_end_arr[0] = self._foo(lvl=int(nbr_spikes)) - self.w - return g_mean_arr, g_end_arr - # multiple number of spikes - else: - g_mean_arr = np.zeros((nbr_spikes.size, g_init.size)) - g_end_arr = np.zeros((nbr_spikes.size, g_init.size)) - for lvl_idx, lvl in enumerate(nbr_spikes): - # isi duration in ms if spikes are evenly distributed - self.d = BIN_SIZE_MS / (int(lvl) + 1) - # mean exp for calculating the mean conductance - self.mean_exp = np.mean(np.exp(-np.linspace(0, self.d, 100) / self.tau)) - # calculate the mean conductance - g_mean_arr[lvl_idx] = self.mean_exp * np.mean( - np.stack(self._g_mean_recursive(lvl=int(lvl))), axis=0 - ) - g_end_arr[lvl_idx] = self._foo(lvl=int(lvl)) - self.w - return g_mean_arr, g_end_arr - - def _foo(self, lvl): - if lvl == 0: - ret = self.g_init * np.exp(-self.d / self.tau) + self.w - return ret - return self._foo(lvl - 1) * np.exp(-self.d / self.tau) + self.w - - def _g_mean_recursive(self, lvl): - if lvl == 0: - return [self.g_init] - ret_rec = self._g_mean_recursive(lvl - 1) - ret_rec.append(self._foo(lvl - 1)) - return ret_rec - - def show_conductance(self, nbr_spikes: int, g_init: np.ndarray): - """ - Show the conductance of the post population neurons for the given number of spikes - and initial (prev) conductances. + break - Args: - nbr_spikes (int): - The number of incoming spikes. - g_init (np.ndarray): - The initial (prev) conductances of the post population neurons. - """ - # calculate g_mean and g_end and print values - g_mean, g_end = self.g_mean(nbr_spikes=nbr_spikes, g_init=g_init) - print(f"Mean Conductance: {g_mean}\nEnd Conductance: {g_end}") - # generate conductance over time - timestep = 0.0001 - # time over bin - t_arr = np.arange(0, BIN_SIZE_MS, timestep) - # when spikes occur - spike_idx = np.arange( - t_arr.size // (nbr_spikes + 1), - t_arr.size - (t_arr.size // (nbr_spikes + 1)) / 2, - t_arr.size // (nbr_spikes + 1), - ) - # loop over time and calculate conductance - g = g_init - g_list = [] - for t_idx, t in enumerate(t_arr): - g = g - (g / self.tau) * timestep - if t_idx in spike_idx: - g = g + self.w - g_list.append(g) - # plot the conductance - g_mean = np.mean(np.stack(g_list), axis=0) - g_end = g_list[-1] - for g_value in g_end: - plt.axhline(y=g_value, color="r", linestyle="--") - plt.title(f"Mean Conductance: {g_mean}\nEnd Conductance: {g_end}") - plt.plot(t_arr, g_list) - plt.xlabel("Time (ms)") - plt.ylabel("Conductance") - plt.tight_layout() - plt.show() - - -class DistCurrentConductance: - - def __init__(self, tau, w): - """ - Create a current conductance object with the given synaptic decay time constant - and weight. - - Args: - tau (float): - The synaptic decay time constant. - w (float): - The synaptic weight. - """ - self.w = w - self.conductance_calc = ConductanceCalc(tau=tau, w=w) - - def pdf( - self, - incoming_spikes_count_arr, - pdf_incoming_spikes_count_arr, - prev_g_arr=np.array([0, 10]), - pdf_prev_g_arr=np.array([0.8, 0.2]), - ): - """ - Get the PDF of the current conductances of the post population for the given - incoming spikes distribution and previous conductances distribution. - - Args: - incoming_spikes_count_arr (np.ndarray): - The incoming spike count values. - pdf_incoming_spikes_count_arr (np.ndarray): - The PDF values for the corresponding incoming spike count values. - prev_g_arr (np.ndarray): - The previous conductance values. - pdf_prev_g_arr (np.ndarray): - The PDF values for the corresponding previous conductance values. - - Returns: - x (np.ndarray): - The current conductance values of the PDF. - pdf (np.ndarray): - The PDF values for the corresponding current conductance values. - """ - ### if using pdfs to combine them and calculate new pdfs --> only use i.e. the top 95% - incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = get_top_n_percentile( - incoming_spikes_count_arr, pdf_incoming_spikes_count_arr, 95 - ) - prev_g_arr, pdf_prev_g_arr = get_top_n_percentile( - prev_g_arr, pdf_prev_g_arr, 95 - ) - ### get the pdf by combining the pdfs of the incoming spikes and previous conductances - pdf_current_g_arr = np.outer(pdf_incoming_spikes_count_arr, pdf_prev_g_arr) - - current_g_arr = np.empty((incoming_spikes_count_arr.size, prev_g_arr.size)) - current_g_end_arr = np.empty((incoming_spikes_count_arr.size, prev_g_arr.size)) - for incoming_spikes_count_idx, incoming_spikes_count in enumerate( - incoming_spikes_count_arr - ): - ( - current_g_arr[incoming_spikes_count_idx], - current_g_end_arr[incoming_spikes_count_idx], - ) = conductance_calc.g_mean( - nbr_spikes=incoming_spikes_count, g_init=prev_g_arr - ) - - ### get distribution for g_mean and g_end - ret_g_mean = self._get_pdf_of_g_arr( - pdf_current_g_arr.flatten(), current_g_arr.flatten() - ) - ret_g_end = self._get_pdf_of_g_arr( - pdf_current_g_arr.flatten(), current_g_end_arr.flatten() - ) - - return ret_g_mean, ret_g_end - - def _get_pdf_of_g_arr(self, pdf_current_g_arr, current_g_arr): - - ### use the conductance and corresponding pdf samples to estimate the density - # scale data to have standard deviation of 1 - # if all values are the same return pdf with 1 - if np.std(current_g_arr) == 0: - return (np.array([current_g_arr[0]]), np.array([1])) - else: - # create histogram from 0 to pdf_max with stepsize self.w/100 - pdf_max = np.max(current_g_arr) - ret = np.histogram( - current_g_arr, - weights=pdf_current_g_arr, - range=(0, pdf_max), - bins=np.arange(0, pdf_max + (self.w / 100) / 2, self.w / 100), - density=True, - ) - pdf = ret[0] - g_values = ret[1][:-1] - return (g_values[pdf > 0], pdf[pdf > 0]) - - def show_dist( - self, - incoming_spikes_count_arr, - pdf_incoming_spikes_count_arr, - prev_g_arr, - pdf_prev_g_arr, - ): - """ - Show the distribution of the current conductances of the post population for the - given incoming spikes distribution and previous conductances distribution. - """ - - dist_mean, dist_end = self.pdf( - incoming_spikes_count_arr=incoming_spikes_count_arr, - pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, - prev_g_arr=prev_g_arr, - pdf_prev_g_arr=pdf_prev_g_arr, - ) - mask = dist_mean[1] > 0 - x_mean = (dist_mean[0][mask], dist_mean[1][mask]) - mask = dist_end[1] > 0 - x_end = (dist_end[0][mask], dist_end[1][mask]) - plt.subplot(411) - plt.bar( - incoming_spikes_count_arr, pdf_incoming_spikes_count_arr, alpha=0.5, width=1 - ) - plt.xlabel("Incoming Spikes") - plt.ylabel("Density") - plt.title("Incoming Spikes Distribution") - - plt.subplot(412) - if len(prev_g_arr) > 1: - width = prev_g_arr[1] - prev_g_arr[0] - else: - width = 1 - plt.bar(prev_g_arr, pdf_prev_g_arr, alpha=0.5, width=width) - plt.xlabel("Conductance") - plt.ylabel("Density") - plt.title("Previous Conductance Distribution") - - plt.subplot(413) - plt.bar( - x_mean[0], - x_mean[1], - alpha=0.5, - width=np.min(np.diff(x_mean[0])), - ) - plt.xlim( - np.min(np.concatenate([x_mean[0], x_end[0]])) - - np.min(np.diff(x_mean[0])) / 2, - np.max(np.concatenate([x_mean[0], x_end[0]])) - + np.min(np.diff(x_mean[0])) / 2, - ) - plt.xlabel("Conductance Mean") - plt.ylabel("Density") - plt.title("Conductance Mean Distribution") - - plt.subplot(414) - plt.bar( - x_end[0], - x_end[1], - alpha=0.5, - width=np.min(np.diff(x_end[0])), - ) - plt.xlim( - np.min( - np.concatenate([x_mean[0], x_end[0]]) - np.min(np.diff(x_end[0])) / 2 - ), - np.max( - np.concatenate([x_mean[0], x_end[0]]) + np.min(np.diff(x_end[0])) / 2 - ), - ) - plt.xlabel("Conductance End") - plt.ylabel("Density") - plt.title("Conductance End Distribution") - plt.tight_layout() - plt.show() - - -dist_pre_spikes = DistPreSpikes( - spikes_dict=recordings[0]["pre;spike"], time_lims_steps=(0, 2000) +### get histogram of incoming spikes to get the sum of incoming spikes for each timestep +incoming_spikes_sum, time_step_arr = np.histogram( + incoming_spikes_dict[0], bins=np.arange(0, 2000, 1) ) -dist_post = DistPostSpikesAndVoltage( - spikes_dict=recordings[0]["post;spike"], - time_lims_steps=(0, 2000), - voltage_array=recordings[0]["post;v"], +plt.figure() +plt.plot( + time_step_arr[:-1], incoming_spikes_sum, label="incoming spikes real", alpha=0.5 ) - -dist_synapses = DistSynapses( - pre_pop_size=pop_pre.size, connection_probability=CONNECTION_PROB -) - -dist_incoming_spikes = DistIncomingSpikes( - dist_pre_spikes=dist_pre_spikes, dist_synapses=dist_synapses -) - -conductance_calc = ConductanceCalc(tau=pop_post.tau_ampa, w=WEIGHTS) - -dist_current_conductance = DistCurrentConductance(tau=pop_post.tau_ampa, w=WEIGHTS) - -dist_post_conductance = DistPostConductance( - conductance_array=recordings[0]["post;g_ampa"] -) - -dist_incoming_spikes_from_model = DistIncomingSpikesFromModel( - pre_spikes_dict=recordings[0]["pre;spike"], - proj=proj, - time_lims_steps=(0, 2000), +plt.plot( + time_step_arr, + recordings[0]["aux2;r"][:, 0], + label="incoming spikes aux2", + alpha=0.5, ) - -# Plot the results -PLOT_EXAMPLES = False -if PLOT_EXAMPLES: - ### dist pre spikes (directly from recordings) - # dist_pre_spikes.show_dist(time_bin=10) - # dist_pre_spikes.show_dist(time_bin=-1) - ### dist post spikes and voltage (directly from recordings) - # dist_post.show_dist(time_bin=10) - # dist_post.show_dist(time_bin=-1) - ### dist synapses (calculated from connection probability) - # dist_synapses.show_dist() - ### dist incoming spikes (calculated from dist pre spikes and dist synapses) - dist_incoming_start = dist_incoming_spikes.pdf(time_bin=5) - dist_incoming_end = dist_incoming_spikes.pdf(time_bin=25) - dist_incoming_from_model_start = dist_incoming_spikes_from_model.pdf(time_bin=5) - dist_incoming_from_model_end = dist_incoming_spikes_from_model.pdf(time_bin=25) - plt.subplot(211) - plt.bar( - dist_incoming_start[0], - dist_incoming_start[1], - alpha=0.5, - width=1, - color="b", - label="calculated", - ) - plt.bar( - dist_incoming_from_model_start[0], - dist_incoming_from_model_start[1], - alpha=0.5, - width=1, - color="r", - label="model", - ) - plt.legend() - plt.xlabel("Incoming Spikes") - plt.ylabel("Density") - plt.title("Incoming Spikes Distribution Start") - plt.subplot(212) - plt.bar( - dist_incoming_end[0], - dist_incoming_end[1], - alpha=0.5, - width=1, - color="b", - label="calculated", - ) - plt.bar( - dist_incoming_from_model_end[0], - dist_incoming_from_model_end[1], - alpha=0.5, - width=1, - color="r", - label="model", - ) - plt.legend() - plt.xlabel("Incoming Spikes") - plt.ylabel("Density") - plt.title("Incoming Spikes Distribution End") - plt.tight_layout() - plt.show() - ### conductance calc (update conductance values based on incoming spikes) - # conductance_calc.show_conductance(nbr_spikes=5, g_init=np.array([0, 0.5, 1.0, 8.0])) - ### dist current conductance (calculated from dist incoming spikes and some example previous conductance) - # incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( - # time_bin=10 - # ) - # dist_current_conductance.show_dist( - # incoming_spikes_count_arr=incoming_spikes_count_arr, - # pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, - # prev_g_arr=np.array([0, 180.0]), - # pdf_prev_g_arr=np.array([0.5, 0.5]), - # ) - # incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = dist_incoming_spikes.pdf( - # time_bin=-1 - # ) - # dist_current_conductance.show_dist( - # incoming_spikes_count_arr=incoming_spikes_count_arr, - # pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, - # prev_g_arr=np.array([0, 180.0]), - # pdf_prev_g_arr=np.array([0.5, 0.5]), - # ) - ### dist post conductance (directly from recordings) - # dist_post_conductance.show_dist(time_bin=10) - # dist_post_conductance.show_dist(time_bin=-1) - pass - -CONDUCTANCE_LOOP = True -if CONDUCTANCE_LOOP: - nbr_bins = dist_pre_spikes.spikes_binned.shape[0] - g_end_dist = (np.array([0.0]), np.array([1.0])) - g_arr_list = [] - pdf_g_arr_list = [] - g_model_arr_list = [] - pdf_g_model_arr_list = [] - incoming_spikes_count_arr_list, pdf_incoming_spikes_count_arr_list = [], [] - incoming_spikes_count_arr_model_list, pdf_incoming_spikes_count_arr_model_list = ( - [], - [], - ) - start = time.time() - for bin in tqdm(range(nbr_bins)): - ### calculate incoming spikes distribution from pre spikes and synapse distribution - incoming_spikes_count_arr, pdf_incoming_spikes_count_arr = ( - dist_incoming_spikes.pdf(time_bin=bin) - ) - ### get incoming spikes distribution directly from model - incoming_spikes_count_arr_model, pdf_incoming_spikes_count_arr_model = ( - dist_incoming_spikes_from_model.pdf(time_bin=bin) - ) - ### calculate current conductance distribution - g_mean_dist, g_end_dist = dist_current_conductance.pdf( - incoming_spikes_count_arr=incoming_spikes_count_arr, - pdf_incoming_spikes_count_arr=pdf_incoming_spikes_count_arr, - prev_g_arr=g_end_dist[0], - pdf_prev_g_arr=g_end_dist[1], - ) - ### get current conductance distribution from model - g_model_arr, pdf_g_model_arr = dist_post_conductance.pdf(time_bin=bin) - ### store the conductance distribution - g_arr_list.append(g_mean_dist[0]) - pdf_g_arr_list.append(g_mean_dist[1]) - g_model_arr_list.append(g_model_arr) - pdf_g_model_arr_list.append(pdf_g_model_arr) - ### store the incoming spikes distribution - incoming_spikes_count_arr_list.append(incoming_spikes_count_arr) - pdf_incoming_spikes_count_arr_list.append(pdf_incoming_spikes_count_arr) - incoming_spikes_count_arr_model_list.append(incoming_spikes_count_arr_model) - pdf_incoming_spikes_count_arr_model_list.append( - pdf_incoming_spikes_count_arr_model - ) - print("loop time:", time.time() - start) - - start = time.time() - # Plotting - plt.subplot(411) - for x, y, z in zip(range(len(g_arr_list)), g_arr_list, pdf_g_arr_list): - norm = plt.Normalize(z.min(), z.max()) - cmap = plt.get_cmap("viridis") - colors = cmap(norm(z)) - plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) - plt.ylim(0, 200) - plt.xlabel("Time Bins") - plt.ylabel("Conductance") - plt.title("Conductance Distribution over Time Bins") - plt.subplot(412) - for x, y, z in zip( - range(len(g_model_arr_list)), g_model_arr_list, pdf_g_model_arr_list - ): - norm = plt.Normalize(z.min(), z.max()) - cmap = plt.get_cmap("viridis") - colors = cmap(norm(z)) - plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) - plt.ylim(0, 200) - plt.xlabel("Time Bins") - plt.ylabel("Conductance") - plt.title("Model Conductance Distribution over Time Bins") - plt.subplot(413) - for x, y, z in zip( - range(len(incoming_spikes_count_arr_list)), - incoming_spikes_count_arr_list, - pdf_incoming_spikes_count_arr_list, - ): - norm = plt.Normalize(z.min(), z.max()) - cmap = plt.get_cmap("viridis") - colors = cmap(norm(z)) - plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) - plt.xlabel("Time Bins") - plt.ylabel("Incoming Spikes") - plt.title("Incoming Spikes Distribution over Time Bins") - plt.subplot(414) - for x, y, z in zip( - range(len(incoming_spikes_count_arr_model_list)), - incoming_spikes_count_arr_model_list, - pdf_incoming_spikes_count_arr_model_list, - ): - norm = plt.Normalize(z.min(), z.max()) - cmap = plt.get_cmap("viridis") - colors = cmap(norm(z)) - plt.hlines(y=y, xmin=x - norm(z) * 0.5, xmax=x + norm(z) * 0.5, color=colors) - plt.xlabel("Time Bins") - plt.ylabel("Incoming Spikes") - plt.title("Model Incoming Spikes Distribution over Time Bins") - plt.tight_layout() - print("plot time:", time.time() - start) - plt.show() +plt.legend() +plt.tight_layout() +plt.show() diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py new file mode 100644 index 0000000..7eddd1e --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -0,0 +1,95 @@ +import numpy as np +import matplotlib.pyplot as plt +import scipy.stats as stats + +# Parameters +n = 15 # number of trials +p = 0.95 # probability of success +N = 10000 # number of samples + +# Generate data samples +binomial_sample = np.random.binomial(n, p, N) +mean = n * p +std_dev = np.sqrt(n * p * (1 - p)) +normal_sample = np.random.normal(mean, std_dev, N) + +### scale normal sample above mean and below mean +normal_sample_original = normal_sample.copy() +normal_sample[normal_sample_original >= mean] = ( + normal_sample_original[normal_sample_original >= mean] * 1.1 +) +normal_sample[normal_sample_original < mean] = ( + normal_sample_original[normal_sample_original < mean] * 0.9 +) + +### round and clip the normal sample +normal_sample = np.round(normal_sample) +normal_sample[normal_sample < 0] = 0 +normal_sample[normal_sample > n] = n + + +# Statistical comparison +# Calculate descriptive statistics +binomial_mean = np.mean(binomial_sample) +binomial_std = np.std(binomial_sample) +normal_mean = np.mean(normal_sample) +normal_std = np.std(normal_sample) + +print(f"Binomial Sample Mean: {binomial_mean}, Standard Deviation: {binomial_std}") +print(f"Normal Sample Mean: {normal_mean}, Standard Deviation: {normal_std}") + +# Perform a Kolmogorov-Smirnov test +ks_statistic, p_value = stats.ks_2samp(binomial_sample, normal_sample) +print(f"KS Statistic: {ks_statistic}, P-value: {p_value}") + +# Interpretation +if p_value > 0.05: + print("The two samples are similar (fail to reject H0).") +else: + print("The two samples are different (reject H0).") + + +# sort both samples and calculate the root mean squared difference +binomial_sample.sort() +normal_sample.sort() +rmsd = np.sqrt(np.mean((binomial_sample - normal_sample) ** 2)) +print(f"Root Mean Squared Difference: {rmsd}") + + +# Visual comparison +plt.figure(figsize=(12, 6)) + +# Histogram of binomial sample +plt.subplot(1, 2, 1) +plt.hist( + binomial_sample, + bins=n + 1, + range=(-0.5, n + 0.5), + density=True, + alpha=0.6, + color="b", + label="Binomial", +) +plt.xlim(-0.5, n + 0.5) +plt.title("Binomial Distribution") +plt.xlabel("Value") +plt.ylabel("Frequency") + +# Histogram of normal sample +plt.subplot(1, 2, 2) +plt.hist( + normal_sample, + bins=n + 1, + range=(-0.5, n + 0.5), + density=True, + alpha=0.6, + color="r", + label="Normal", +) +plt.xlim(-0.5, n + 0.5) +plt.title("Normal Distribution") +plt.xlabel("Value") +plt.ylabel("Frequency") + +plt.tight_layout() +plt.show() From 4f0eec77cc2a8c9c9420413ee8498990f5c949a3 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 30 May 2024 14:17:59 +0200 Subject: [PATCH 24/39] model_configurator: found a way to reduce a model, next implement this in model_configurator --- src/CompNeuroPy/__init__.py | 1 + .../examples/model_configurator/test.py | 275 +++++++++++------- src/CompNeuroPy/model_functions.py | 12 +- 3 files changed, 180 insertions(+), 108 deletions(-) diff --git a/src/CompNeuroPy/__init__.py b/src/CompNeuroPy/__init__.py index 38c8980..22a7638 100644 --- a/src/CompNeuroPy/__init__.py +++ b/src/CompNeuroPy/__init__.py @@ -41,6 +41,7 @@ ) from CompNeuroPy.model_functions import ( compile_in_folder, + annarchy_compiled, get_full_model, cnp_clear, ) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index 0d45902..c9c3f84 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -18,6 +18,7 @@ PlotRecordings, interactive_plot, timing_decorator, + annarchy_compiled, ) from CompNeuroPy.neuron_models import PoissonNeuron import numpy as np @@ -99,9 +100,10 @@ neuron_aux1 = Neuron( parameters=""" pre_size = 1 : population + tau= 1.0 : population """, equations=""" - r = g_ampa/pre_size + tau*dr/dt = g_ampa/pre_size - r g_ampa = 0 """, ) @@ -109,69 +111,93 @@ neuron_aux2 = Neuron( parameters=""" number_synapses = 0 + weights = 0.0 """, equations=""" - ### r = incoming spikes - r = Normal(0, 1) * sqrt(number_synapses * sum(spikeprob) * (1 - sum(spikeprob))) + number_synapses * sum(spikeprob) : min=0, max=number_synapses + incoming_spikes = number_synapses * sum(spikeprob) + Normal(0, 1)*sqrt(number_synapses * sum(spikeprob) * (1 - sum(spikeprob))) : min=0, max=number_synapses + r = incoming_spikes * weights """, ) CONNECTION_PROB = 0.1 -WEIGHTS = 1.0 -POP_PRE_SIZE = 1000 -POP_POST_SIZE = 100 +WEIGHTS = 0.1 +POP_PRE_SIZE = 50 +POP_POST_SIZE = 50 POP_REDUCED_SIZE = 100 +NORMAL_MODEL = True +REDUCED_MODEL = True -### create not reduced model -### pre -pop_pre = Population(POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pre") -### post -pop_post = Population(POP_POST_SIZE, neuron=neuron_izh, name="post") -### pre to post -proj = Projection(pre=pop_pre, post=pop_post, target="ampa", name="proj") -proj.connect_fixed_probability(weights=WEIGHTS, probability=CONNECTION_PROB) - -### create reduced model -### pre -pop_pre2 = Population(POP_REDUCED_SIZE, neuron=PoissonNeuron(rates=10.0), name="pre2") -### post -pop_post2 = Population(POP_REDUCED_SIZE, neuron=neuron_izh_aux, name="post2") -### aux -pop_aux1 = Population(1, neuron=neuron_aux1, name="aux1") -pop_aux1.pre_size = POP_REDUCED_SIZE -pop_aux2 = Population( - POP_REDUCED_SIZE, - neuron=neuron_aux2, - name="aux2", -) -pop_aux2.number_synapses = Binomial(n=POP_PRE_SIZE, p=CONNECTION_PROB).get_values( - POP_REDUCED_SIZE -) -### pre to aux -proj_pre__aux = Projection( - pre=pop_pre2, post=pop_aux1, target="ampa", name="proj_pre__aux" -) -proj_pre__aux.connect_all_to_all(weights=1) -### aux2 to aux2 -proj_aux__aux = Projection( - pre=pop_aux1, post=pop_aux2, target="spikeprob", name="proj_aux__aux" -) -proj_aux__aux.connect_all_to_all(weights=1) -### aux to post -proj_aux__pre = CurrentInjection(pop_aux2, pop_post2, "exc") -proj_aux__pre.connect_current() +if NORMAL_MODEL: + ### create not reduced model + ### pre + pop_pre = Population(POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pre") + ### post + pop_post = Population(POP_POST_SIZE, neuron=neuron_izh, name="post") + ### pre to post + proj = Projection(pre=pop_pre, post=pop_post, target="ampa", name="proj") + proj.connect_fixed_probability(weights=WEIGHTS, probability=CONNECTION_PROB) +if REDUCED_MODEL: + ### create reduced model + ### pre + pop_pre2 = Population( + min([POP_REDUCED_SIZE, POP_PRE_SIZE]), + neuron=PoissonNeuron(rates=10.0), + name="pre2", + ) + ### post + pop_post2 = Population( + min([POP_REDUCED_SIZE, POP_POST_SIZE]), neuron=neuron_izh_aux, name="post2" + ) + ### aux + pop_aux1 = Population(1, neuron=neuron_aux1, name="aux1") + pop_aux1.pre_size = pop_pre2.size + pop_aux2 = Population( + pop_post2.size, + neuron=neuron_aux2, + name="aux2", + ) + pop_aux2.number_synapses = Binomial(n=POP_PRE_SIZE, p=CONNECTION_PROB).get_values( + pop_post2.size + ) + pop_aux2.weights = WEIGHTS + ### pre to aux + proj_pre__aux = Projection( + pre=pop_pre2, post=pop_aux1, target="ampa", name="proj_pre__aux" + ) + proj_pre__aux.connect_all_to_all(weights=1) + ### aux2 to aux2 + proj_aux__aux = Projection( + pre=pop_aux1, post=pop_aux2, target="spikeprob", name="proj_aux__aux" + ) + proj_aux__aux.connect_all_to_all(weights=1) + ### aux to post + proj_aux__pre = CurrentInjection(pop_aux2, pop_post2, "exc") + proj_aux__pre.connect_current() -monitors = CompNeuroMonitors( - mon_dict={ +if NORMAL_MODEL and REDUCED_MODEL: + mon_dict = { + "pre": ["spike"], + "post": ["v", "spike", "I_ampa", "g_ampa"], + "pre2": ["spike"], + "post2": ["v", "spike", "I_ampa", "g_ampa", "g_exc"], + "aux1": ["r"], + "aux2": ["incoming_spikes"], + } +elif NORMAL_MODEL: + mon_dict = { "pre": ["spike"], "post": ["v", "spike", "I_ampa", "g_ampa"], + } +elif REDUCED_MODEL: + mon_dict = { "pre2": ["spike"], "post2": ["v", "spike", "I_ampa", "g_ampa", "g_exc"], "aux1": ["r"], - "aux2": ["r"], } +monitors = CompNeuroMonitors( + mon_dict=mon_dict, ) compile() @@ -180,71 +206,108 @@ start = time.time() simulate(100.0) -pop_pre.rates = 1000.0 -pop_pre2.rates = 1000.0 +if NORMAL_MODEL: + pop_pre.rates = 1000.0 +if REDUCED_MODEL: + pop_pre2.rates = 1000.0 simulate(100.0) print("simulate time:", time.time() - start) recordings = monitors.get_recordings() recording_times = monitors.get_recording_times() -PlotRecordings( - figname="test.png", - recordings=recordings, - recording_times=recording_times, - shape=(4, 2), - plan={ - "position": [1, 3, 5, 2, 4, 6, 8], - "compartment": ["pre", "post", "post", "pre2", "post2", "post2", "aux1"], - "variable": ["spike", "spike", "g_ampa", "spike", "spike", "g_ampa", "r"], - "format": [ - "hybrid", - "hybrid", - "line_mean", - "hybrid", - "hybrid", - "line_mean", - "line", - ], - }, -) - -### compare incoming spikes, i.e. the r of aux 2 and the incoming spikes of post -### idx: [neuron][nr_spikes] -pre_spikes = recordings[0]["pre;spike"] -incoming_spikes_dict = {} - -### loop over post neuron dendrites (only first post neuron) -for post_dendrite in proj: - incoming_spikes_dict[post_dendrite.post_rank] = [] - if post_dendrite is None: - continue - ### if post neuron has incoming synapses, loop over pre neurons - for pre_neuron in post_dendrite: - incoming_spikes_dict[post_dendrite.post_rank].extend( - pre_spikes[pre_neuron.rank] - ) - ### sort incoming spikes - incoming_spikes_dict[post_dendrite.post_rank] = np.sort( - incoming_spikes_dict[post_dendrite.post_rank] +if NORMAL_MODEL and REDUCED_MODEL: + PlotRecordings( + figname="test.png", + recordings=recordings, + recording_times=recording_times, + shape=(4, 2), + plan={ + "position": [1, 3, 5, 2, 4, 6, 8], + "compartment": ["pre", "post", "post", "pre2", "post2", "post2", "aux1"], + "variable": ["spike", "spike", "g_ampa", "spike", "spike", "g_ampa", "r"], + "format": [ + "hybrid", + "hybrid", + "line_mean", + "hybrid", + "hybrid", + "line_mean", + "line", + ], + }, + ) +elif NORMAL_MODEL: + PlotRecordings( + figname="test.png", + recordings=recordings, + recording_times=recording_times, + shape=(4, 2), + plan={ + "position": [1, 3, 5], + "compartment": ["pre", "post", "post"], + "variable": ["spike", "spike", "g_ampa"], + "format": ["hybrid", "hybrid", "line_mean"], + }, + ) +elif REDUCED_MODEL: + PlotRecordings( + figname="test.png", + recordings=recordings, + recording_times=recording_times, + shape=(4, 2), + plan={ + "position": [2, 4, 6, 8], + "compartment": ["pre2", "post2", "post2", "aux1"], + "variable": ["spike", "spike", "g_ampa", "r"], + "format": ["hybrid", "hybrid", "line_mean", "line"], + }, ) - break -### get histogram of incoming spikes to get the sum of incoming spikes for each timestep -incoming_spikes_sum, time_step_arr = np.histogram( - incoming_spikes_dict[0], bins=np.arange(0, 2000, 1) -) +if NORMAL_MODEL and REDUCED_MODEL: + ### compare incoming spikes, i.e. the r of aux 2 and the incoming spikes of post + ### idx: [neuron][nr_spikes] + pre_spikes = recordings[0]["pre;spike"] + incoming_spikes_dict = {} -plt.figure() -plt.plot( - time_step_arr[:-1], incoming_spikes_sum, label="incoming spikes real", alpha=0.5 -) -plt.plot( - time_step_arr, - recordings[0]["aux2;r"][:, 0], - label="incoming spikes aux2", - alpha=0.5, -) -plt.legend() -plt.tight_layout() -plt.show() + ### loop over post neuron dendrites (only first post neuron) + n = 0 + for post_dendrite in proj: + incoming_spikes_dict[post_dendrite.post_rank] = [] + if post_dendrite is None: + continue + ### if post neuron has incoming synapses, loop over pre neurons + for pre_neuron in post_dendrite: + incoming_spikes_dict[post_dendrite.post_rank].extend( + pre_spikes[pre_neuron.rank] + ) + ### sort incoming spikes + incoming_spikes_dict[post_dendrite.post_rank] = np.sort( + incoming_spikes_dict[post_dendrite.post_rank] + ) + n += 1 + if n == 5: + break + + plt.figure(figsize=(6.4, 4.8 * 5)) + for idx in range(5): + plt.subplot(5, 1, idx + 1) + ### get histogram of incoming spikes to get the sum of incoming spikes for each timestep + incoming_spikes_sum, time_step_arr = np.histogram( + incoming_spikes_dict[idx], bins=np.arange(0, 2000, 1) + ) + plt.plot( + time_step_arr[:-1], + incoming_spikes_sum, + label="incoming spikes real", + alpha=0.5, + ) + plt.plot( + time_step_arr, + recordings[0]["aux2;incoming_spikes"][:, idx], + label="incoming spikes aux2", + alpha=0.5, + ) + plt.legend() + plt.tight_layout() + plt.show() diff --git a/src/CompNeuroPy/model_functions.py b/src/CompNeuroPy/model_functions.py index 2b2cfd0..df2f3c3 100644 --- a/src/CompNeuroPy/model_functions.py +++ b/src/CompNeuroPy/model_functions.py @@ -4,10 +4,15 @@ projections, clear, ) +from ANNarchy import __version__ as ANNarchy_version import os from CompNeuroPy import system_functions as sf from CompNeuroPy.generate_model import CompNeuroModel -from ANNarchy.core import Global + +if ANNarchy_version >= "4.8": + from ANNarchy.intern.NetworkManager import NetworkManager +else: + from ANNarchy.core import Global def compile_in_folder(folder_name, net=None, clean=False, silent=False): @@ -43,7 +48,10 @@ def annarchy_compiled(net_id=0): net_id (int, optional): Network ID. Default: 0. """ - return Global._network[net_id]["compiled"] + if ANNarchy_version >= "4.8": + return NetworkManager().is_compiled(net_id) + else: + return Global._network[net_id]["compiled"] def get_full_model(): From 7e5f7cf797dfe8dc1d8c7de3600d9ee6b7980ee8 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 30 May 2024 17:14:20 +0200 Subject: [PATCH 25/39] model_configurator: started implementing reduced model --- .../model_configurator_cnp.py | 41 +- .../model_configurator/reduce_model.py | 787 ++++++++++++++++++ .../examples/model_configurator/test.py | 51 ++ 3 files changed, 862 insertions(+), 17 deletions(-) create mode 100644 src/CompNeuroPy/examples/model_configurator/reduce_model.py diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index d46531c..8c73ad4 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -8,6 +8,7 @@ save_variables, load_variables, clear_dir, + CompNeuroModel, ) from CompNeuroPy.system_functions import _find_folder_with_prefix from CompNeuroPy.neuron_models import poisson_neuron @@ -46,12 +47,13 @@ from scipy.stats import poisson from ANNarchy.extensions.bold import BoldMonitor from sklearn.linear_model import LinearRegression +from CompNeuroPy.examples.model_configurator.reduce_model import ReduceModel class model_configurator: def __init__( self, - model, + model: CompNeuroModel, target_firing_rate_dict, interpolation_grid_points=10, max_psp=10, @@ -816,25 +818,30 @@ def get_base(self): I_base_dict, dict Dictionary with baseline curretns for all configured populations. """ + self._set_weights_of_model() + print("afferent_projection_dict") + print_df(self.afferent_projection_dict) + quit() - ### create many neuron network - net_many_dict = self.create_many_neuron_network() + self.model_reduced = ReduceModel(self.model_reduced).reduce() - ### use voltage clamp networks and many neuron networks to get baseline currents - I_base_dict = {} - target_firing_rate_changed = True - nr_max_iter = 1 - nr_iter = 0 - while target_firing_rate_changed and nr_iter < nr_max_iter: - ### get baseline current values, if target firing rates could not - ### be reached, try again with new target firing rates - ( - target_firing_rate_changed, - I_base_dict, - ) = self.find_base_current(net_many_dict) - nr_iter += 1 + def _set_weights_of_model(self): + """ + Set the weights of the original model to the current weights from the + afferent_projection_dict. + """ + ### clear ANNarchy + cnp_clear() - return I_base_dict + ### create the original model + self.model.create() + + for pop_name in self.pop_name_list: + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + proj_dict = self.get_proj_dict(proj_name) + get_projection(proj_name).w = proj_dict["proj_weight"] def find_base_current(self, net_many_dict): """ diff --git a/src/CompNeuroPy/examples/model_configurator/reduce_model.py b/src/CompNeuroPy/examples/model_configurator/reduce_model.py new file mode 100644 index 0000000..435d264 --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/reduce_model.py @@ -0,0 +1,787 @@ +from ANNarchy import ( + Neuron, + Population, + dt, + add_function, + Projection, + get_population, + Constant, + Synapse, + projections, + populations, + get_projection, +) +from ANNarchy.core import ConnectorMethods +import numpy as np +from CompNeuroPy import model_functions as mf +from CompNeuroPy.generate_model import generate_model +from typingchecker import check_types +import inspect +from CompNeuroPy import CompNeuroModel + +_connector_methods_dict = { + "One-to-One": ConnectorMethods.connect_one_to_one, + "All-to-All": ConnectorMethods.connect_all_to_all, + "Gaussian": ConnectorMethods.connect_gaussian, + "Difference-of-Gaussian": ConnectorMethods.connect_dog, + "Random": ConnectorMethods.connect_fixed_probability, + "Random Convergent": ConnectorMethods.connect_fixed_number_pre, + "Random Divergent": ConnectorMethods.connect_fixed_number_post, + "User-defined": ConnectorMethods.connect_with_func, + "MatrixMarket": ConnectorMethods.connect_from_matrix_market, + "Connectivity matrix": ConnectorMethods.connect_from_matrix, + "Sparse connectivity matrix": ConnectorMethods.connect_from_sparse, + "From File": ConnectorMethods.connect_from_file, +} + + +class _CreateReducedModel: + """ + Class to create a reduced model from the original model. + """ + + def __init__(self, model: CompNeuroModel, reduced_size: int) -> None: + """ + Prepare model for DBS stimulation + + Args: + model (CompNeuroModel): + Model to be reduced + reduced_size (int): + Size of the reduced populations + """ + self.reduced_size = reduced_size + ### check if model is already created but not compiled, if not clear annarchy + ### and create it + if not model.created or model.compiled: + mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + model.create(do_compile=False) + + ### analyze model to be able to recreate it + self.analyze_model() + + ### clear model + mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + + ### recreate model with reduced populations and projections + self.recreate_model() + + def analyze_model( + self, + ): + """ + Analyze the model to be able to recreate it. + """ + ### get all population and projection names + ( + self.population_name_list, + self.projection_name_list, + ) = self.get_all_population_and_projection_names() + + ### get population info (eq, params etc.) + ( + self.neuron_model_attr_dict, + self.neuron_model_init_parameter_dict, + self.pop_init_parameter_dict, + ) = self.analyze_populations() + + ### get projection info + ( + self.proj_init_parameter_dict, + self.synapse_init_parameter_dict, + self.synapse_model_attr_dict, + self.connector_function_dict, + self.connector_function_parameter_dict, + self.pre_post_pop_name_dict, + ) = self.analyze_projections() + + def get_all_population_and_projection_names(self): + """ + Get all population and projection names. + + Returns: + population_name_list (list): + List of all population names + projection_name_list (list): + List of all projection names + """ + population_name_list: list[str] = [pop.name for pop in populations()] + projection_name_list: list[str] = [proj.name for proj in projections()] + + return population_name_list, projection_name_list + + def analyze_populations(self): + """ + Get info of each population + """ + ### values of the paramters and variables of the population's neurons, keys are the names of paramters and variables + neuron_model_attr_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Neuron class + neuron_model_init_parameter_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Population class + pop_init_parameter_dict: dict[str, dict] = {} + + ### for loop over all populations + for pop_name in self.population_name_list: + pop: Population = get_population(pop_name) + ### get the neuron model attributes (parameters/variables) + neuron_model_attr_dict[pop.name] = pop.init + ### get a dict of all paramters of the __init__ function of the Neuron + init_params = inspect.signature(Neuron.__init__).parameters + neuron_model_init_parameter_dict[pop.name] = { + param: getattr(pop.neuron_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all paramters of the __init__ function of the Population + init_params = inspect.signature(Population.__init__).parameters + pop_init_parameter_dict[pop.name] = { + param: getattr(pop, param) + for param in init_params + if param != "self" and param != "storage_order" and param != "copied" + } + + return ( + neuron_model_attr_dict, + neuron_model_init_parameter_dict, + pop_init_parameter_dict, + ) + + def analyze_projections(self): + """ + Get info of each projection + """ + ### parameters of the __init__ function of the Projection class + proj_init_parameter_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Synapse class + synapse_init_parameter_dict: dict[str, dict] = {} + ### values of the paramters and variables of the synapse, keys are the names of paramters and variables + synapse_model_attr_dict: dict[str, dict] = {} + ### connector functions of the projections + connector_function_dict: dict = {} + ### parameters of the connector functions of the projections + connector_function_parameter_dict: dict = {} + ### names of pre- and post-synaptic populations of the projections + pre_post_pop_name_dict: dict[str, tuple] = {} + + ### loop over all projections + for proj_name in self.projection_name_list: + proj: Projection = get_projection(proj_name) + ### get the synapse model attributes (parameters/variables) + synapse_model_attr_dict[proj.name] = proj.init + ### get a dict of all paramters of the __init__ function of the Synapse + init_params = inspect.signature(Synapse.__init__).parameters + synapse_init_parameter_dict[proj.name] = { + param: getattr(proj.synapse_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all paramters of the __init__ function of the Projection + init_params = inspect.signature(Projection.__init__).parameters + proj_init_parameter_dict[proj_name] = { + param: getattr(proj, param) + for param in init_params + if param != "self" and param != "synapse" and param != "copied" + } + + ### get the connector function of the projection and its parameters + ### raise errors for not supported connector functions + if ( + proj.connector_name == "User-defined" + or proj.connector_name == "MatrixMarket" + or proj.connector_name == "From File" + ): + raise ValueError( + f"Connector function '{_connector_methods_dict[proj.connector_name].__name__}' not supported yet" + ) + + ### get the connector function + connector_function_dict[proj.name] = _connector_methods_dict[ + proj.connector_name + ] + + ### get the parameters of the connector function + connector_function_parameter_dict[proj.name] = ( + self.get_connector_parameters(proj) + ) + + ### get the names of the pre- and post-synaptic populations + pre_post_pop_name_dict[proj.name] = (proj.pre.name, proj.post.name) + + return ( + proj_init_parameter_dict, + synapse_init_parameter_dict, + synapse_model_attr_dict, + connector_function_dict, + connector_function_parameter_dict, + pre_post_pop_name_dict, + ) + + def get_connector_parameters(self, proj: Projection): + """ + Get the parameters of the given connector function. + + Args: + proj (Projection): + Projection for which the connector parameters are needed + + Returns: + connector_parameters_dict (dict): + Parameters of the given connector function + """ + + if proj.connector_name == "One-to-One": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "All-to-All": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "allow_self_connections": proj._connection_args[2], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Gaussian": + return { + "amp": proj._connection_args[0], + "sigma": proj._connection_args[1], + "delays": proj._connection_args[2], + "limit": proj._connection_args[3], + "allow_self_connections": proj._connection_args[4], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Difference-of-Gaussian": + return { + "amp_pos": proj._connection_args[0], + "sigma_pos": proj._connection_args[1], + "amp_neg": proj._connection_args[2], + "sigma_neg": proj._connection_args[3], + "delays": proj._connection_args[4], + "limit": proj._connection_args[5], + "allow_self_connections": proj._connection_args[6], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Random": + return { + "probability": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Convergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Divergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "pre_post": proj._connection_args[2], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Sparse connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + + def recreate_model(self): + """ + Recreates the model with reduced populations and projections. + """ + ### recreate populations + for pop_name in self.population_name_list: + self.recreate_population(pop_name) + ### recreate projections + for proj_name in self.projection_name_list: + self.recreate_projection(proj_name) + + def how_pop_is_connected(self, pop_name): + """ + Check how a population is connected. If the population is a postsynaptic and/or + presynaptic population, check if it gets ampa and/or gaba input. + + Args: + pop_name (str): + Name of the population to check + + Returns: + is_presynaptic (bool): + True if the population is a presynaptic population, False otherwise + is_postsynaptic (bool): + True if the population is a postsynaptic population, False otherwise + ampa (bool): + True if the population gets ampa input, False otherwise + gaba (bool): + True if the population gets gaba input, False otherwise + """ + is_presynaptic = False + is_postsynaptic = False + ampa = False + gaba = False + ### loop over all projections + for proj_name in self.projection_name_list: + ### check if the population is a presynaptic population in any projection + if self.pre_post_pop_name_dict[proj_name][0] == pop_name: + is_presynaptic = True + ### check if the population is a postsynaptic population in any projection + if self.pre_post_pop_name_dict[proj_name][1] == pop_name: + is_postsynaptic = True + ### check if the projection target is ampa or gaba + if self.proj_init_parameter_dict[proj_name]["target"] == "ampa": + ampa = True + elif self.proj_init_parameter_dict[proj_name]["target"] == "gaba": + gaba = True + + return is_presynaptic, is_postsynaptic, ampa, gaba + + def recreate_population(self, pop_name): + """ + Recreate a population with the same neuron model and parameters. + + Args: + pop_name (str): + Name of the population to recreate + """ + ### 1st check how the population is connected + is_presynaptic, is_postsynaptic, ampa, gaba = self.how_pop_is_connected( + pop_name + ) + + ### 2nd recreate neuron model + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = self.neuron_model_init_parameter_dict[ + pop_name + ] + ### if the population is a postsynaptic population adjust the synaptic + ### conductance equations + if is_postsynaptic: + neuron_model_init_parameter_dict = self.adjust_neuron_model( + neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba + ) + ### create the new neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + + ### 3rd recreate the population + ### get the stored parameters of the __init__ function of the Population + pop_init_parameter_dict = self.pop_init_parameter_dict[pop_name] + ### replace the neuron model with the new neuron model + pop_init_parameter_dict["neuron"] = neuron_model_new + ### replace the size with the reduced size (if reduced size is smaller than the + ### original size) + ### TODO add model requirements somewhere, here requirements = geometry = int + pop_init_parameter_dict["geometry"] = min( + [pop_init_parameter_dict["geometry"], self.reduced_size] + ) + ### create the new population + pop_new = Population(**pop_init_parameter_dict) + + ### 4th set the parameters and variables of the population's neurons + ### get the stored parameters and variables + neuron_model_attr_dict = self.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_new, attr_name, attr_val) + + ### 5th if the population is a presynaptic population create an auxiliary + ### population to calculate the spike probability + if is_presynaptic: + Population( + 1, + neuron=self.SpikeProbCalcNeuron( + pre_size=pop_init_parameter_dict["geometry"] + ), + name=f"{pop_name}_auxspikeprob", + ) + + ### 6th if the population is a postsynaptic population create an auxiliary + ### population to calculate the incoming auxillary population input + ### for the ampa and gaba conductance + if ampa: + Population( + pop_init_parameter_dict["geometry"], + neuron=self.SpikeProbCalcNeuron( + pre_size=pop_init_parameter_dict["geometry"] + ), + name=f"{pop_name}_auxinputexc", + ) + + def adjust_neuron_model( + self, neuron_model_init_parameter_dict, ampa=True, gaba=True + ): + """ + Add the new synaptic input coming from the auxillary population to the neuron + model. + + Args: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + ampa (bool): + True if the population gets ampa input and therefore the ampa conductance + needs to be adjusted, False otherwise + gaba (bool): + True if the population gets gaba input and therefore the gaba conductance + needs to be adjusted, False otherwise + + Returns: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + with DBS mechanisms added + """ + ### 1st adjust the conductance equations + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### search for equation with dg_ampa/dt and dg_gaba/dt + for line_idx, line in enumerate(equations_line_split_list): + if self.get_line_is_dvardt(line, "g_ampa") and ampa: + ### add " + tau_ampa*g_incomingauxexc/dt" + ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa + equations_line_split_list[line_idx] = self.add_term_to_eq_line( + line=equations_line_split_list[line_idx], + term=" + tau_ampa*g_incomingauxexc/dt", + ) + if self.get_line_is_dvardt(line, "g_gaba") and gaba: + ### add " + tau_gaba*g_incomingauxinh/dt" + ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba + equations_line_split_list[line_idx] = self.add_term_to_eq_line( + line=equations_line_split_list[line_idx], + term=" + tau_gaba*g_incomingauxinh/dt", + ) + ### join list to a string + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) + + ### 3rd extend description + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." + ) + + return neuron_model_init_parameter_dict + + def add_term_to_eq_line(self, line: str, term: str): + """ + Add a term to an equation string. + + Args: + line (str): + Equation string + term (str): + Term to add + + Returns: + line_new (str): + Equation string with added term + """ + ### check if colon is in line + if ":" not in line: + ### add term + line_new = line + term + else: + ### split line at colon + line_split = line.split(":") + ### add term + line_split[0] = line_split[0] + term + ### join line again + line_new = ":".join(line_split) + ### return new line + return line_new + + def get_line_is_dvardt(self, line: str, var_name: str): + """ + Check if a equation string contains dvar/dt. + + Args: + line (str): + Equation string + """ + if "var_name" not in line: + return False + + ### remove whitespaces + line = line.replace(" ", "") + + ### check if dvar/dt is in line and before "=" + if f"d{var_name}/dt" in line and line.find(f"d{var_name}/dt") < line.find("="): + return True + + return False + + def add_DBS_to_rate_coded_neuron_model(self, neuron_model_init_parameter_dict): + """ + Add DBS mechanisms to the rate-coded neuron model + + Args: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + + Returns: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + with DBS mechanisms added + """ + + ### 1st add new DBS parameters + ### get the parameters as a list of strings + parameters_line_split_list = str( + neuron_model_init_parameter_dict["parameters"] + ).splitlines() + ### append list with new parameters + parameters_line_split_list.append("dbs_depolarization = 0 : population") + parameters_line_split_list.append("dbs_on = 0") + parameters_line_split_list.append( + "axon_rate_amp = 1.0 : population # equivalent to prob_axon_spike in spiking model" + ) + ### join list to a string + neuron_model_init_parameter_dict["parameters"] = "\n".join( + parameters_line_split_list + ) + + ### 2nd add new equations + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### append axon_rate + equations_line_split_list.append( + "axon_rate = axon_rate_amp*dbs_on # equivalent to axon_spike in spiking model" + ) + ### search for equation with dmp/dt + lines_with_mp_count = 0 + for line_idx, line in enumerate(equations_line_split_list): + if self.get_line_is_dmpdt(line): + ### add depolarization term + equations_line_split_list[line_idx] = self.add_term_to_eq_line( + line=equations_line_split_list[line_idx], + term=" + pulse(t)*dbs_on*dbs_depolarization*neg(-1 - mp)", + ) + lines_with_mp_count += 1 + if lines_with_mp_count == 0: + raise ValueError( + "No line with dmp/dt found, only rate-coded models with mp as 'membrane potential' supported yet" + ) + ### join list to a string + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) + + ### 3rd extend description + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + ) + + return neuron_model_init_parameter_dict + + def recreate_projection(self, proj_name): + """ + Recreate a projection with the same synapse model and parameters and connector + function. + + Args: + proj_name (str): + Name of the projection to recreate + """ + + ### 1st recreate synapse model + ### get the stored parameters of the __init__ function of the Synapse + synapse_init_parameter_dict = self.synapse_init_parameter_dict[proj_name] + ### get the stored parameters of the __init__ function of the Projection + proj_init_parameter_dict = self.proj_init_parameter_dict[proj_name] + ### adjust the equations and paramters of the synapse model to implement DBS + synapse_init_parameter_dict = self.add_DBS_to_synapse_model( + synapse_init_parameter_dict, + ) + ### create the new synapse model + synapse_new = Synapse(**synapse_init_parameter_dict) + + ### 2nd recreate projection + ### replace the synapse model with the new synapse model + proj_init_parameter_dict["synapse"] = synapse_new + ### replace pre and post to new populations + proj_init_parameter_dict["pre"] = get_population( + self.pre_post_pop_name_dict[proj_name][0] + ) + proj_init_parameter_dict["post"] = get_population( + self.pre_post_pop_name_dict[proj_name][1] + ) + ### create the new projection + proj_new = Projection(**proj_init_parameter_dict) + + ### 3rd connect the projection with the connector function + ### get the connector function + connector_function = self.connector_function_dict[proj_name] + ### get the parameters of the connector function + connector_function_parameter_dict = self.connector_function_parameter_dict[ + proj_name + ] + ### connect the projection + connector_function(proj_new, **connector_function_parameter_dict) + + ### 4th set the parameters and variables of the synapse + ### get the stored parameters and variables + synapse_model_attr_dict = self.synapse_model_attr_dict[proj_name] + ### set the parameters and variables + for attr_name, attr_val in synapse_model_attr_dict.items(): + setattr(proj_new, attr_name, attr_val) + + def add_DBS_to_synapse_model(self, synapse_init_parameter_dict): + """ + Add DBS mechanisms to the synapse model. + + Args: + synapse_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Synapse + + Returns: + synapse_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Synapse + with DBS mechanisms added + """ + + ### check if projection is spiking + spiking = not (isinstance(synapse_init_parameter_dict["pre_spike"], type(None))) + + ### add DBS mechanisms + if spiking: + return self.add_DBS_to_spiking_synapse_model(synapse_init_parameter_dict) + else: + return self.add_DBS_to_rate_coded_synapse_model(synapse_init_parameter_dict) + + def add_DBS_to_spiking_synapse_model(self, synapse_init_parameter_dict): + """ + Add DBS mechanisms to the spiking synapse model. + + Args: + synapse_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Synapse + + Returns: + synapse_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Synapse + with DBS mechanisms added + """ + + ### 1st add new DBS parameters + ### get the parameters as a list of strings + parameters_line_split_list = str( + synapse_init_parameter_dict["parameters"] + ).splitlines() + ### append list with new parameters + parameters_line_split_list.append("p_axon_spike_trans=0 : projection") + ### join list to a string + synapse_init_parameter_dict["parameters"] = "\n".join( + parameters_line_split_list + ) + + ### 2nd add new equation for uniform variable + ### get the equations of the synapse model as a list of strings + equations_line_split_list = str( + synapse_init_parameter_dict["equations"] + ).splitlines() + ### prepend uniform variable + equations_line_split_list.insert(0, "unif_var_dbs = Uniform(0., 1.)") + ### join list to a string + synapse_init_parameter_dict["equations"] = "\n".join(equations_line_split_list) + + ### 3rd add pre_axon_spike + synapse_init_parameter_dict["pre_axon_spike"] = ( + "g_target+=ite(unif_var_dbs Date: Fri, 31 May 2024 15:20:09 +0200 Subject: [PATCH 26/39] reduce model continued --- .../examples/model_configurator/test.py | 427 ++++++++++-------- 1 file changed, 247 insertions(+), 180 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index 794ccbb..2cc8b7e 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -12,6 +12,8 @@ get_population, CurrentInjection, simulate, + projections, + populations, ) from CompNeuroPy import ( CompNeuroMonitors, @@ -83,7 +85,7 @@ """, equations=""" ### synaptic current - tau_ampa * dg_ampa/dt = -g_ampa + tau_ampa*g_exc/dt + tau_ampa * dg_ampa/dt = -g_ampa + tau_ampa*g_ampaaux/dt I_ampa = -neg(g_ampa*(v - E_ampa)) ### Izhikevich spiking I_v = I_app + I_ampa @@ -97,77 +99,63 @@ """, ) -neuron_aux1 = Neuron( - parameters=""" - pre_size = 1 : population - tau= 1.0 : population - """, - equations=""" - tau*dr/dt = g_ampa/pre_size - r - g_ampa = 0 - """, -) - class SpikeProbCalcNeuron(Neuron): - def __init__(self, pre_size=1): + def __init__(self, reduced_size=1): parameters = f""" - pre_size = {pre_size} : population + reduced_size = {reduced_size} : population tau= 1.0 : population """ equations = """ - tau*dr/dt = g_ampa/pre_size - r + tau*dr/dt = g_ampa/reduced_size - r g_ampa = 0 """ super().__init__(parameters=parameters, equations=equations) -neuron_aux2 = Neuron( - parameters=""" - number_synapses = 0 - weights = 0.0 - """, - equations=""" - incoming_spikes = number_synapses * sum(spikeprob) + Normal(0, 1)*sqrt(number_synapses * sum(spikeprob) * (1 - sum(spikeprob))) : min=0, max=number_synapses - r = incoming_spikes * weights - """, -) - - class InputCalcNeuron(Neuron): - def __init__(self, projection_dict, size): + def __init__(self, projection_dict): """ + This neurons get the spike probabilities of the pre neurons and calculates the + incoming spikes for each projection. It accumulates the incoming spikes of all + projections (of the same target type) and calculates the conductance increase + for the post neuron. + Args: projection_dict (dict): - keys: names of afferent projections - values: dict with keys "pre_size", "connection_prob", "weights" - size (int): - size of the population + keys: names of afferent projections (of the same target type) + values: dict with keys "weights", "pre_name" """ - ### calculate number of synapses for all projections - number_synapses = { - proj_name: Binomial( - n=vals["pre_size"], p=vals["connection_prob"] - ).get_values(size) - for proj_name, vals in projection_dict.items() - } ### create parameters parameters = [ f""" - number_synapses_{proj_name} = {number_synapses[proj_name]} - weights_{proj_name} = {vals["weights"]} + number_synapses_{proj_name} = 0 + weights_{proj_name} = {vals['weights']} """ for proj_name, vals in projection_dict.items() ] parameters = "\n".join(parameters) ### create equations - ### TODO sum spikes of different afferent projections, r is the increase in conductance - equations = """ - incoming_spikes = number_synapses * sum(spikeprob) + Normal(0, 1)*sqrt(number_synapses * sum(spikeprob) * (1 - sum(spikeprob))) : min=0, max=number_synapses - r = incoming_spikes * weights + equations = [ + f""" + incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} """ + for proj_name, vals in projection_dict.items() + ] + equations = "\n".join(equations) + sum_of_conductance_increase = ( + "r = " + + "".join( + [ + f"incoming_spikes_{proj_name} * weights_{proj_name} + " + for proj_name in projection_dict.keys() + ] + )[:-3] + ) + equations = equations + "\n" + sum_of_conductance_increase + super().__init__(parameters=parameters, equations=equations) @@ -176,77 +164,168 @@ def __init__(self, projection_dict, size): POP_PRE_SIZE = 50 POP_POST_SIZE = 50 POP_REDUCED_SIZE = 100 -NORMAL_MODEL = True REDUCED_MODEL = True -if NORMAL_MODEL: - ### create not reduced model - ### pre - pop_pre = Population(POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pre") - ### post - pop_post = Population(POP_POST_SIZE, neuron=neuron_izh, name="post") - ### pre to post - proj = Projection(pre=pop_pre, post=pop_post, target="ampa", name="proj") - proj.connect_fixed_probability(weights=WEIGHTS, probability=CONNECTION_PROB) -if REDUCED_MODEL: - ### create reduced model - ### pre - pop_pre2 = Population( - min([POP_REDUCED_SIZE, POP_PRE_SIZE]), - neuron=PoissonNeuron(rates=10.0), - name="pre2", - ) - ### post - pop_post2 = Population( - min([POP_REDUCED_SIZE, POP_POST_SIZE]), neuron=neuron_izh_aux, name="post2" - ) - ### aux - pop_aux1 = Population(1, neuron=neuron_aux1, name="aux1") - pop_aux1.pre_size = pop_pre2.size - pop_aux2 = Population( - pop_post2.size, - neuron=neuron_aux2, - name="aux2", +### create not reduced model +### pre +pop_pre1 = Population(POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pop_pre1") +pop_pre2 = Population(POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pop_pre2") +### post +pop_post = Population(POP_POST_SIZE, neuron=neuron_izh, name="pop_post") +### pre to post +proj_pre1__post = Projection( + pre=pop_pre1, post=pop_post, target="ampa", name="proj_pre1__post" +) +proj_pre1__post.connect_fixed_probability(weights=WEIGHTS, probability=CONNECTION_PROB) +proj_pre2__post = Projection( + pre=pop_pre2, post=pop_post, target="ampa", name="proj_pre2__post" +) +proj_pre2__post.connect_fixed_probability(weights=WEIGHTS, probability=CONNECTION_PROB) + + +def create_reduced_pop(pop: Population): + ### TODO in ReduceModel class the initial arguments of the populaitons can be + ### obtained with the population names, here just use size, neuron and name + print(f"create_reduced_pop for {pop.name}") + if pop.name == "pop_post": + Population( + min([POP_REDUCED_SIZE, pop.size]), + neuron=neuron_izh_aux, + name=pop.name + "_reduced", + ) + else: + Population( + min([POP_REDUCED_SIZE, pop.size]), + neuron=pop.neuron_type, # TODO neuron type in reuced model has to be different + name=pop.name + "_reduced", + ) + + +def create_spike_collecting_aux_pop(pop: Population, projection_list: list[Projection]): + ### get all efferent projections + efferent_projection_list = [proj for proj in projection_list if proj.pre == pop] + ### check if pop has efferent projections + if len(efferent_projection_list) == 0: + return + print(f"create_spike_collecting_aux_pop for {pop.name}") + ### create the spike collecting population + pop_aux = Population( + 1, + neuron=SpikeProbCalcNeuron(reduced_size=min([POP_REDUCED_SIZE, pop.size])), + name=f"{pop.name}_spike_collecting_aux", ) - pop_aux2.number_synapses = Binomial(n=POP_PRE_SIZE, p=CONNECTION_PROB).get_values( - pop_post2.size + ### create the projection from reduced pop to spike collecting aux pop + proj = Projection( + pre=get_population(pop.name + "_reduced"), + post=pop_aux, + target="ampa", + name=f"proj_{pop.name}_spike_collecting_aux", ) - pop_aux2.weights = WEIGHTS - ### pre to aux - proj_pre__aux = Projection( - pre=pop_pre2, post=pop_aux1, target="ampa", name="proj_pre__aux" + proj.connect_all_to_all(weights=1) + + +def create_conductance_aux_pop( + pop: Population, projection_list: list[Projection], target: str +): + ### get all afferent projections + afferent_projection_list = [proj for proj in projection_list if proj.post == pop] + ### check if pop has afferent projections + if len(afferent_projection_list) == 0: + return + ### get all afferent projections with target type + afferent_target_projection_list = [ + proj for proj in afferent_projection_list if proj.target == target + ] + ### check if there are afferent projections with target type + if len(afferent_target_projection_list) == 0: + return + print(f"create_conductance_aux_pop for {pop.name} target {target}") + ### get projection informations TODO in ReduceModel class weights and probs not global constants + projection_dict = { + proj.name: { + "pre_size": proj.pre.size, + "connection_prob": CONNECTION_PROB, + "weights": WEIGHTS, + "pre_name": proj.pre.name, + } + for proj in afferent_target_projection_list + } + ### create the conductance calculating population + pop_aux = Population( + pop.size, + neuron=InputCalcNeuron(projection_dict=projection_dict), + name=f"{pop.name}_{target}_aux", ) - proj_pre__aux.connect_all_to_all(weights=1) - ### aux2 to aux2 - proj_aux__aux = Projection( - pre=pop_aux1, post=pop_aux2, target="spikeprob", name="proj_aux__aux" + ### set number of synapses parameter for each projection + for proj_name, vals in projection_dict.items(): + number_synapses = Binomial( + n=vals["pre_size"], p=vals["connection_prob"] + ).get_values(pop.size) + setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) + ### create the "current injection" projection from conductance calculating + ### population to the reduced post population + proj = CurrentInjection( + pre=pop_aux, + post=get_population(f"{pop.name}_reduced"), + target=f"{target}aux", + name=f"proj_{pop.name}_{target}_aux", ) - proj_aux__aux.connect_all_to_all(weights=1) - ### aux to post - proj_aux__pre = CurrentInjection(pop_aux2, pop_post2, "exc") - proj_aux__pre.connect_current() + proj.connect_current() + ### create projection from spike_prob calculating aux neurons of presynaptic + ### populations to conductance calculating aux population + for proj in afferent_target_projection_list: + pre_pop = proj.pre + pre_pop_spike_collecting_aux = get_population( + f"{pre_pop.name}_spike_collecting_aux" + ) + proj = Projection( + pre=pre_pop_spike_collecting_aux, + post=pop_aux, + target=f"spikeprob_{pre_pop.name}", + name=f"{proj.name}_spike_collecting_to_conductance", + ) + proj.connect_all_to_all(weights=1) -if NORMAL_MODEL and REDUCED_MODEL: - mon_dict = { - "pre": ["spike"], - "post": ["v", "spike", "I_ampa", "g_ampa"], - "pre2": ["spike"], - "post2": ["v", "spike", "I_ampa", "g_ampa", "g_exc"], - "aux1": ["r"], - "aux2": ["incoming_spikes"], - } -elif NORMAL_MODEL: + +if REDUCED_MODEL: + ### create reduced model + population_list = populations().copy() + projection_list = projections().copy() + ### for each population create a reduced population + for pop in population_list: + create_reduced_pop(pop) + ### for each population which is a presynaptic population, create a spikes collecting aux population + for pop in population_list: + create_spike_collecting_aux_pop(pop, projection_list) + ## for each population which has afferents create a population for incoming spikes for each target type + for pop in population_list: + create_conductance_aux_pop(pop, projection_list, target="ampa") + create_conductance_aux_pop(pop, projection_list, target="gaba") + +if REDUCED_MODEL: mon_dict = { - "pre": ["spike"], - "post": ["v", "spike", "I_ampa", "g_ampa"], + pop_pre1.name: ["spike"], + pop_pre2.name: ["spike"], + pop_post.name: ["spike", "g_ampa"], + f"{pop_pre1.name}_reduced": ["spike"], + f"{pop_pre2.name}_reduced": ["spike"], + f"{pop_post.name}_reduced": ["spike", "g_ampa"], + f"{pop_pre1.name}_spike_collecting_aux": ["r"], + f"{pop_pre2.name}_spike_collecting_aux": ["r"], + f"{pop_post.name}_ampa_aux": [ + "incoming_spikes_proj_pre1__post", + "incoming_spikes_proj_pre2__post", + "r", + ], } -elif REDUCED_MODEL: +else: mon_dict = { - "pre2": ["spike"], - "post2": ["v", "spike", "I_ampa", "g_ampa", "g_exc"], - "aux1": ["r"], + pop_pre1.name: ["spike"], + pop_pre2.name: ["spike"], + pop_post.name: ["spike", "g_ampa"], } + monitors = CompNeuroMonitors( mon_dict=mon_dict, ) @@ -256,109 +335,97 @@ def __init__(self, projection_dict, size): monitors.start() start = time.time() -simulate(100.0) -if NORMAL_MODEL: - pop_pre.rates = 1000.0 +simulate(50.0) +pop_pre1.rates = 1000.0 if REDUCED_MODEL: - pop_pre2.rates = 1000.0 + get_population(f"{pop_pre1.name}_reduced").rates = 1000.0 +simulate(50.0) +pop_pre2.rates = 1000.0 +if REDUCED_MODEL: + get_population(f"{pop_pre2.name}_reduced").rates = 1000.0 simulate(100.0) print("simulate time:", time.time() - start) recordings = monitors.get_recordings() recording_times = monitors.get_recording_times() -if NORMAL_MODEL and REDUCED_MODEL: +if REDUCED_MODEL: PlotRecordings( figname="test.png", recordings=recordings, recording_times=recording_times, - shape=(4, 2), + shape=(4, 3), plan={ - "position": [1, 3, 5, 2, 4, 6, 8], - "compartment": ["pre", "post", "post", "pre2", "post2", "post2", "aux1"], - "variable": ["spike", "spike", "g_ampa", "spike", "spike", "g_ampa", "r"], + "position": [1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12], + "compartment": [ + pop_pre1.name, + pop_pre2.name, + pop_post.name, + pop_post.name, + f"{pop_pre1.name}_reduced", + f"{pop_pre2.name}_reduced", + f"{pop_post.name}_reduced", + f"{pop_post.name}_reduced", + f"{pop_pre1.name}_spike_collecting_aux", + f"{pop_pre2.name}_spike_collecting_aux", + f"{pop_post.name}_ampa_aux", + f"{pop_post.name}_ampa_aux", + ], + "variable": [ + "spike", + "spike", + "spike", + "g_ampa", + "spike", + "spike", + "spike", + "g_ampa", + "r", + "r", + "incoming_spikes_proj_pre1__post", + "incoming_spikes_proj_pre2__post", + ], "format": [ + "hybrid", "hybrid", "hybrid", "line_mean", "hybrid", "hybrid", + "hybrid", + "line_mean", + "line_mean", + "line_mean", + "line_mean", "line_mean", - "line", ], }, ) -elif NORMAL_MODEL: - PlotRecordings( - figname="test.png", - recordings=recordings, - recording_times=recording_times, - shape=(4, 2), - plan={ - "position": [1, 3, 5], - "compartment": ["pre", "post", "post"], - "variable": ["spike", "spike", "g_ampa"], - "format": ["hybrid", "hybrid", "line_mean"], - }, - ) -elif REDUCED_MODEL: +else: PlotRecordings( figname="test.png", recordings=recordings, recording_times=recording_times, shape=(4, 2), plan={ - "position": [2, 4, 6, 8], - "compartment": ["pre2", "post2", "post2", "aux1"], - "variable": ["spike", "spike", "g_ampa", "r"], - "format": ["hybrid", "hybrid", "line_mean", "line"], + "position": [1, 3, 5, 7], + "compartment": [ + pop_pre1.name, + pop_pre2.name, + pop_post.name, + pop_post.name, + ], + "variable": [ + "spike", + "spike", + "spike", + "g_ampa", + ], + "format": [ + "hybrid", + "hybrid", + "hybrid", + "line_mean", + ], }, ) - -if NORMAL_MODEL and REDUCED_MODEL: - ### compare incoming spikes, i.e. the r of aux 2 and the incoming spikes of post - ### idx: [neuron][nr_spikes] - pre_spikes = recordings[0]["pre;spike"] - incoming_spikes_dict = {} - - ### loop over post neuron dendrites (only first post neuron) - n = 0 - for post_dendrite in proj: - incoming_spikes_dict[post_dendrite.post_rank] = [] - if post_dendrite is None: - continue - ### if post neuron has incoming synapses, loop over pre neurons - for pre_neuron in post_dendrite: - incoming_spikes_dict[post_dendrite.post_rank].extend( - pre_spikes[pre_neuron.rank] - ) - ### sort incoming spikes - incoming_spikes_dict[post_dendrite.post_rank] = np.sort( - incoming_spikes_dict[post_dendrite.post_rank] - ) - n += 1 - if n == 5: - break - - plt.figure(figsize=(6.4, 4.8 * 5)) - for idx in range(5): - plt.subplot(5, 1, idx + 1) - ### get histogram of incoming spikes to get the sum of incoming spikes for each timestep - incoming_spikes_sum, time_step_arr = np.histogram( - incoming_spikes_dict[idx], bins=np.arange(0, 2000, 1) - ) - plt.plot( - time_step_arr[:-1], - incoming_spikes_sum, - label="incoming spikes real", - alpha=0.5, - ) - plt.plot( - time_step_arr, - recordings[0]["aux2;incoming_spikes"][:, idx], - label="incoming spikes aux2", - alpha=0.5, - ) - plt.legend() - plt.tight_layout() - plt.show() From 1f83fdf64bb05d4d3ce5cfffb8ae8e492041899e Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 3 Jun 2024 17:15:13 +0200 Subject: [PATCH 27/39] model_configurator: started using reduce model in model configurator (get_base) --- .../model_configurator_cnp.py | 120 +++- .../model_configurator_user.py | 42 +- .../model_configurator/reduce_model.py | 643 ++++++++---------- .../examples/model_configurator/test.py | 395 ++++------- 4 files changed, 534 insertions(+), 666 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 8c73ad4..68bcb27 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -9,6 +9,8 @@ load_variables, clear_dir, CompNeuroModel, + CompNeuroMonitors, + PlotRecordings, ) from CompNeuroPy.system_functions import _find_folder_with_prefix from CompNeuroPy.neuron_models import poisson_neuron @@ -26,8 +28,11 @@ simulate_until, Uniform, get_current_step, + projections, + populations, ) -from ANNarchy.core.Global import _network + +# from ANNarchy.core.Global import _network import numpy as np from scipy.interpolate import interp1d, interpn from scipy.signal import find_peaks, argrelmin @@ -47,7 +52,7 @@ from scipy.stats import poisson from ANNarchy.extensions.bold import BoldMonitor from sklearn.linear_model import LinearRegression -from CompNeuroPy.examples.model_configurator.reduce_model import ReduceModel +from CompNeuroPy.examples.model_configurator.reduce_model import _CreateReducedModel class model_configurator: @@ -142,6 +147,15 @@ def __init__( ### do things for which the model needs to be created (it will not be available later) self.analyze_model() + ### get reduced model + self.model_reduced = _CreateReducedModel( + model=self.model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, + ).model_reduced + ### print guide self._p_g(_p_g_1) @@ -334,6 +348,7 @@ def create_single_neuron_networks( txt = f"create network_single for {pop_name}" print(txt) self.log(txt) + ### the network with the standard neuron if single_net: self.net_single_dict[pop_name] = self.create_net_single( @@ -367,6 +382,7 @@ def create_single_neuron_networks( net_single_v_clamp_dummy.add( [pop_single_v_clamp_dummy, mon_single_v_clamp_dummy] ) + ### get v_rest and correspodning I_app_hold if prepare_psp: self.prepare_psp_dict[pop_name] = self.find_v_rest_for_psp( @@ -713,7 +729,7 @@ def get_new_v_rest_2000( """ use single_net to simulate 2000 ms and return v """ - net = self.net_single_dict[pop_name]["net"] + net: Network = self.net_single_dict[pop_name]["net"] pop = self.net_single_dict[pop_name]["population"] monitor = self.net_single_dict[pop_name]["monitor"] net.reset() @@ -818,30 +834,98 @@ def get_base(self): I_base_dict, dict Dictionary with baseline curretns for all configured populations. """ - self._set_weights_of_model() - print("afferent_projection_dict") - print_df(self.afferent_projection_dict) - quit() + ### TODO: current problem: model is without noise... but how large and for what is noise??? + ### neurons all behave equally (e.g. spike at same time), this changes due to different inputs ("noise" in input) + ### this could also be prevented by initializing all neurons differently (along there periodic u-v curve) + ### or by adding noise to conductances or baseline current + ### thenthe question is, how is the relation between added noise and the noise in the input + for pop_name in self.pop_name_list: + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + proj_dict = self.get_proj_dict(proj_name) + print(f"set weight of {proj_name} to {proj_dict['proj_weight']}") + + ### set the weights of the normal model + model = self._set_weights_of_model(mode=0) + + mon_dict = {pop_name: ["spike"] for pop_name in model.populations} + mon = CompNeuroMonitors(mon_dict=mon_dict) + mon.start() + simulate(1000) + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + plan = { + "position": list(range(1, len(model.populations) + 1)), + "compartment": model.populations, + "variable": ["spike"] * len(model.populations), + "format": ["hybrid"] * len(model.populations), + } + PlotRecordings( + figname="model_conf_normal_model.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(plan["position"]), 1), + plan=plan, + ) - self.model_reduced = ReduceModel(self.model_reduced).reduce() + ### set the weights of the reduced model + model = self._set_weights_of_model(mode=1) + + mon_dict = {f"{pop_name}_reduced": ["spike"] for pop_name in mon_dict.keys()} + mon = CompNeuroMonitors(mon_dict=mon_dict) + mon.start() + simulate(1000) + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + plan["compartment"] = [ + f"{pop_name}_reduced" for pop_name in plan["compartment"] + ] + PlotRecordings( + figname="model_conf_reduced_model.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(plan["position"]), 1), + plan=plan, + ) + ### TODO get base with reduced model + quit() - def _set_weights_of_model(self): + def _set_weights_of_model(self, mode=0): """ - Set the weights of the original model to the current weights from the + Set the weights of the model to the current weights from the afferent_projection_dict. """ ### clear ANNarchy cnp_clear() ### create the original model - self.model.create() + if mode == 0: + model = self.model + elif mode == 1: + model = self.model_reduced + model.create() for pop_name in self.pop_name_list: for proj_name in self.afferent_projection_dict[pop_name][ "projection_names" ]: - proj_dict = self.get_proj_dict(proj_name) - get_projection(proj_name).w = proj_dict["proj_weight"] + if mode == 0: + ### set weght of projection + proj_dict = self.get_proj_dict(proj_name) + get_projection(proj_name).w = proj_dict["proj_weight"] + elif mode == 1: + ### set weight of the projection in the conductance-calculating + ### input current population + proj_dict = self.get_proj_dict(proj_name) + proj_weight = proj_dict["proj_weight"] + proj_target_type = proj_dict["proj_target_type"] + setattr( + get_population(f"{pop_name}_{proj_target_type}_aux"), + f"weights_{proj_name}", + proj_weight, + ) + return model def find_base_current(self, net_many_dict): """ @@ -2047,6 +2131,16 @@ def set_syn_load(self, synaptic_load_dict, synaptic_contribution_dict=None): ### print guide self._p_g(_p_g_after_set_syn_load) + def set_weights(self, weights): + for pop_name in self.pop_name_list: + self.afferent_projection_dict[pop_name]["weights"] = [] + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + self.afferent_projection_dict[pop_name]["weights"].append( + weights[pop_name][proj_name] + ) + def get_syn_load(self, pop_name: str, target_type: str) -> float: """ Calculates the synaptic load of a population for a given target type for the given weights of the afferent_projection_dict diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index c1611e5..4368264 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -294,22 +294,38 @@ def BGM_part_function(params): ### obtain the maximum synaptic loads for the populations and the ### maximum weights of their afferent projections - model_conf.get_max_syn(clear=False) + model_conf.get_max_syn(cache=True) ### now either set weights directly - ### or define synaptic load of populations - synaptic_load_dict = { - "stn": [0.3, 0.3], - "gpe": [0.4], - "snr": [0.5, 0.3], - "thal": [0.1], + weights = { + "stn": { + "cor_exc__stn": 0.1420716334652917 * 0, + "cor_inh__stn": 0.3210113100293368 * 0, + }, + "gpe": {"stn__gpe": 0.14456939170522481 * 0}, + "snr": { + "stn__snr": 0.14456939170522481 * 0, + "gpe__snr": 0.3258095138891384 * 0, + "snr__snr": 0.3258095138891384 * 0, + }, + "thal": {"snr__thal": 0.33855115254020435 * 0}, } - ### and define the contributions of their afferent projections - synaptic_contribution_dict = {"snr": {"gaba": {"gpe__snr": 0.7, "snr__snr": 0.3}}} - synaptic_contribution_dict = model_conf.set_syn_load( - synaptic_load_dict, - synaptic_contribution_dict, - ) + + model_conf.set_weights(weights) + + ### or define synaptic load of populations + # synaptic_load_dict = { + # "stn": [0.3, 0.3], + # "gpe": [0.4], + # "snr": [0.5, 0.3], + # "thal": [0.1], + # } + # ### and define the contributions of their afferent projections + # synaptic_contribution_dict = {"snr": {"gaba": {"gpe__snr": 0.7, "snr__snr": 0.3}}} + # synaptic_contribution_dict = model_conf.set_syn_load( + # synaptic_load_dict, + # synaptic_contribution_dict, + # ) ### after setting the weights i.e. the synaptic load/contributions ### get the baseline currents diff --git a/src/CompNeuroPy/examples/model_configurator/reduce_model.py b/src/CompNeuroPy/examples/model_configurator/reduce_model.py index 435d264..ee0962d 100644 --- a/src/CompNeuroPy/examples/model_configurator/reduce_model.py +++ b/src/CompNeuroPy/examples/model_configurator/reduce_model.py @@ -10,6 +10,8 @@ projections, populations, get_projection, + Binomial, + CurrentInjection, ) from ANNarchy.core import ConnectorMethods import numpy as np @@ -18,6 +20,7 @@ from typingchecker import check_types import inspect from CompNeuroPy import CompNeuroModel +import sympy as sp _connector_methods_dict = { "One-to-One": ConnectorMethods.connect_one_to_one, @@ -37,12 +40,24 @@ class _CreateReducedModel: """ - Class to create a reduced model from the original model. + Class to create a reduced model from the original model. It is accessable via the + attribute model_reduced. + + Attributes: + model_reduced (CompNeuroModel): + Reduced model, created but not compiled """ - def __init__(self, model: CompNeuroModel, reduced_size: int) -> None: + def __init__( + self, + model: CompNeuroModel, + reduced_size: int, + do_create: bool = False, + do_compile: bool = False, + verbose: bool = False, + ) -> None: """ - Prepare model for DBS stimulation + Prepare model for reduction. Args: model (CompNeuroModel): @@ -51,6 +66,7 @@ def __init__(self, model: CompNeuroModel, reduced_size: int) -> None: Size of the reduced populations """ self.reduced_size = reduced_size + self.verbose = verbose ### check if model is already created but not compiled, if not clear annarchy ### and create it if not model.created or model.compiled: @@ -64,7 +80,14 @@ def __init__(self, model: CompNeuroModel, reduced_size: int) -> None: mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) ### recreate model with reduced populations and projections - self.recreate_model() + self.model_reduced = CompNeuroModel( + model_creation_function=self.recreate_model, + name=f"{model.name}_reduced", + description=f"{model.description}\nWith reduced populations and projections.", + do_create=do_create, + do_compile=do_compile, + compile_folder_name=f"{model.compile_folder_name}_reduced", + ) def analyze_model( self, @@ -317,60 +340,21 @@ def recreate_model(self): """ Recreates the model with reduced populations and projections. """ - ### recreate populations + ### 1st for each population create a reduced population for pop_name in self.population_name_list: - self.recreate_population(pop_name) - ### recreate projections - for proj_name in self.projection_name_list: - self.recreate_projection(proj_name) - - def how_pop_is_connected(self, pop_name): - """ - Check how a population is connected. If the population is a postsynaptic and/or - presynaptic population, check if it gets ampa and/or gaba input. - - Args: - pop_name (str): - Name of the population to check - - Returns: - is_presynaptic (bool): - True if the population is a presynaptic population, False otherwise - is_postsynaptic (bool): - True if the population is a postsynaptic population, False otherwise - ampa (bool): - True if the population gets ampa input, False otherwise - gaba (bool): - True if the population gets gaba input, False otherwise - """ - is_presynaptic = False - is_postsynaptic = False - ampa = False - gaba = False - ### loop over all projections - for proj_name in self.projection_name_list: - ### check if the population is a presynaptic population in any projection - if self.pre_post_pop_name_dict[proj_name][0] == pop_name: - is_presynaptic = True - ### check if the population is a postsynaptic population in any projection - if self.pre_post_pop_name_dict[proj_name][1] == pop_name: - is_postsynaptic = True - ### check if the projection target is ampa or gaba - if self.proj_init_parameter_dict[proj_name]["target"] == "ampa": - ampa = True - elif self.proj_init_parameter_dict[proj_name]["target"] == "gaba": - gaba = True - - return is_presynaptic, is_postsynaptic, ampa, gaba - - def recreate_population(self, pop_name): - """ - Recreate a population with the same neuron model and parameters. + self.create_reduced_pop(pop_name) + ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population + for pop_name in self.population_name_list: + self.create_spike_collecting_aux_pop(pop_name) + ## 3rd for each population which has afferents create a population for incoming spikes for each target type + for pop_name in self.population_name_list: + self.create_conductance_aux_pop(pop_name, target="ampa") + self.create_conductance_aux_pop(pop_name, target="gaba") - Args: - pop_name (str): - Name of the population to recreate - """ + def create_reduced_pop(self, pop_name: str): + """ """ + if self.verbose: + print(f"create_reduced_pop for {pop_name}") ### 1st check how the population is connected is_presynaptic, is_postsynaptic, ampa, gaba = self.how_pop_is_connected( pop_name @@ -380,7 +364,7 @@ def recreate_population(self, pop_name): ### get the stored parameters of the __init__ function of the Neuron neuron_model_init_parameter_dict = self.neuron_model_init_parameter_dict[ pop_name - ] + ].copy() ### if the population is a postsynaptic population adjust the synaptic ### conductance equations if is_postsynaptic: @@ -392,15 +376,17 @@ def recreate_population(self, pop_name): ### 3rd recreate the population ### get the stored parameters of the __init__ function of the Population - pop_init_parameter_dict = self.pop_init_parameter_dict[pop_name] + pop_init_parameter_dict = self.pop_init_parameter_dict[pop_name].copy() ### replace the neuron model with the new neuron model pop_init_parameter_dict["neuron"] = neuron_model_new ### replace the size with the reduced size (if reduced size is smaller than the ### original size) ### TODO add model requirements somewhere, here requirements = geometry = int pop_init_parameter_dict["geometry"] = min( - [pop_init_parameter_dict["geometry"], self.reduced_size] + [pop_init_parameter_dict["geometry"][0], self.reduced_size] ) + ### append _reduce to the name + pop_init_parameter_dict["name"] = f"{pop_name}_reduced" ### create the new population pop_new = Population(**pop_init_parameter_dict) @@ -411,28 +397,152 @@ def recreate_population(self, pop_name): for attr_name, attr_val in neuron_model_attr_dict.items(): setattr(pop_new, attr_name, attr_val) - ### 5th if the population is a presynaptic population create an auxiliary - ### population to calculate the spike probability - if is_presynaptic: - Population( - 1, - neuron=self.SpikeProbCalcNeuron( - pre_size=pop_init_parameter_dict["geometry"] - ), - name=f"{pop_name}_auxspikeprob", - ) + def create_spike_collecting_aux_pop(self, pop_name: str): + """ """ + ### get all efferent projections + efferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[0] == pop_name + ] + ### check if pop has efferent projections + if len(efferent_projection_list) == 0: + return + if self.verbose: + print(f"create_spike_collecting_aux_pop for {pop_name}") + ### create the spike collecting population + pop_aux = Population( + 1, + neuron=self.SpikeProbCalcNeuron( + reduced_size=min( + [ + self.pop_init_parameter_dict[pop_name]["geometry"][0], + self.reduced_size, + ] + ) + ), + name=f"{pop_name}_spike_collecting_aux", + ) + ### create the projection from reduced pop to spike collecting aux pop + proj = Projection( + pre=get_population(pop_name + "_reduced"), + post=pop_aux, + target="ampa", + name=f"proj_{pop_name}_spike_collecting_aux", + ) + proj.connect_all_to_all(weights=1) - ### 6th if the population is a postsynaptic population create an auxiliary - ### population to calculate the incoming auxillary population input - ### for the ampa and gaba conductance - if ampa: - Population( - pop_init_parameter_dict["geometry"], - neuron=self.SpikeProbCalcNeuron( - pre_size=pop_init_parameter_dict["geometry"] - ), - name=f"{pop_name}_auxinputexc", + def create_conductance_aux_pop(self, pop_name: str, target: str): + """ """ + ### get all afferent projections + afferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[1] == pop_name + ] + ### check if pop has afferent projections + if len(afferent_projection_list) == 0: + return + ### get all afferent projections with target type + afferent_target_projection_list = [ + proj_name + for proj_name in afferent_projection_list + if self.proj_init_parameter_dict[proj_name]["target"] == target + ] + ### check if there are afferent projections with target type + if len(afferent_target_projection_list) == 0: + return + if self.verbose: + print(f"create_conductance_aux_pop for {pop_name} target {target}") + ### get projection informations TODO in ReduceModel class weights and probs not global constants + ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) + projection_dict = { + proj_name: { + "pre_size": self.pop_init_parameter_dict[ + self.pre_post_pop_name_dict[proj_name][0] + ]["geometry"][0], + "connection_prob": self.connector_function_parameter_dict[proj_name][ + "probability" + ], + "weights": self.connector_function_parameter_dict[proj_name]["weights"], + "pre_name": self.pre_post_pop_name_dict[proj_name][0], + } + for proj_name in afferent_target_projection_list + } + ### create the conductance calculating population + pop_aux = Population( + self.pop_init_parameter_dict[pop_name]["geometry"][0], + neuron=self.InputCalcNeuron(projection_dict=projection_dict), + name=f"{pop_name}_{target}_aux", + ) + ### set number of synapses parameter for each projection + for proj_name, vals in projection_dict.items(): + number_synapses = Binomial( + n=vals["pre_size"], p=vals["connection_prob"] + ).get_values(self.pop_init_parameter_dict[pop_name]["geometry"][0]) + setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) + ### create the "current injection" projection from conductance calculating + ### population to the reduced post population + proj = CurrentInjection( + pre=pop_aux, + post=get_population(f"{pop_name}_reduced"), + target=f"incomingaux{target}", + name=f"proj_{pop_name}_{target}_aux", + ) + proj.connect_current() + ### create projection from spike_prob calculating aux neurons of presynaptic + ### populations to conductance calculating aux population + for proj_name, vals in projection_dict.items(): + pre_pop_name = vals["pre_name"] + pre_pop_spike_collecting_aux = get_population( + f"{pre_pop_name}_spike_collecting_aux" + ) + proj = Projection( + pre=pre_pop_spike_collecting_aux, + post=pop_aux, + target=f"spikeprob_{pre_pop_name}", + name=f"{proj_name}_spike_collecting_to_conductance", ) + proj.connect_all_to_all(weights=1) + + def how_pop_is_connected(self, pop_name): + """ + Check how a population is connected. If the population is a postsynaptic and/or + presynaptic population, check if it gets ampa and/or gaba input. + + Args: + pop_name (str): + Name of the population to check + + Returns: + is_presynaptic (bool): + True if the population is a presynaptic population, False otherwise + is_postsynaptic (bool): + True if the population is a postsynaptic population, False otherwise + ampa (bool): + True if the population gets ampa input, False otherwise + gaba (bool): + True if the population gets gaba input, False otherwise + """ + is_presynaptic = False + is_postsynaptic = False + ampa = False + gaba = False + ### loop over all projections + for proj_name in self.projection_name_list: + ### check if the population is a presynaptic population in any projection + if self.pre_post_pop_name_dict[proj_name][0] == pop_name: + is_presynaptic = True + ### check if the population is a postsynaptic population in any projection + if self.pre_post_pop_name_dict[proj_name][1] == pop_name: + is_postsynaptic = True + ### check if the projection target is ampa or gaba + if self.proj_init_parameter_dict[proj_name]["target"] == "ampa": + ampa = True + elif self.proj_init_parameter_dict[proj_name]["target"] == "gaba": + gaba = True + + return is_presynaptic, is_postsynaptic, ampa, gaba def adjust_neuron_model( self, neuron_model_init_parameter_dict, ampa=True, gaba=True @@ -463,325 +573,138 @@ def adjust_neuron_model( ).splitlines() ### search for equation with dg_ampa/dt and dg_gaba/dt for line_idx, line in enumerate(equations_line_split_list): - if self.get_line_is_dvardt(line, "g_ampa") and ampa: - ### add " + tau_ampa*g_incomingauxexc/dt" + if ( + self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") + and ampa + ): + ### add " + tau_ampa*g_incomingauxampa/dt" ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa - equations_line_split_list[line_idx] = self.add_term_to_eq_line( - line=equations_line_split_list[line_idx], - term=" + tau_ampa*g_incomingauxexc/dt", + equations_line_split_list[line_idx] = ( + "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" ) - if self.get_line_is_dvardt(line, "g_gaba") and gaba: - ### add " + tau_gaba*g_incomingauxinh/dt" + if ( + self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") + and gaba + ): + ### add " + tau_gaba*g_incomingauxgaba/dt" ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba - equations_line_split_list[line_idx] = self.add_term_to_eq_line( - line=equations_line_split_list[line_idx], - term=" + tau_gaba*g_incomingauxinh/dt", + equations_line_split_list[line_idx] = ( + "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" ) ### join list to a string neuron_model_init_parameter_dict["equations"] = "\n".join( equations_line_split_list ) - ### 3rd extend description + ### 2nd extend description neuron_model_init_parameter_dict["description"] = ( f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." ) return neuron_model_init_parameter_dict - def add_term_to_eq_line(self, line: str, term: str): + def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): """ - Add a term to an equation string. + Check if a equation string has the form "tau*dvar/dt = -var". Args: line (str): Equation string - term (str): - Term to add + var_name (str): + Name of the variable + tau_name (str): + Name of the time constant Returns: - line_new (str): - Equation string with added term - """ - ### check if colon is in line - if ":" not in line: - ### add term - line_new = line + term - else: - ### split line at colon - line_split = line.split(":") - ### add term - line_split[0] = line_split[0] + term - ### join line again - line_new = ":".join(line_split) - ### return new line - return line_new - - def get_line_is_dvardt(self, line: str, var_name: str): + is_solution_correct (bool): + True if the equation is as expected, False otherwise """ - Check if a equation string contains dvar/dt. - - Args: - line (str): - Equation string - """ - if "var_name" not in line: + if var_name not in line: return False - ### remove whitespaces - line = line.replace(" ", "") - - ### check if dvar/dt is in line and before "=" - if f"d{var_name}/dt" in line and line.find(f"d{var_name}/dt") < line.find("="): - return True - - return False - - def add_DBS_to_rate_coded_neuron_model(self, neuron_model_init_parameter_dict): - """ - Add DBS mechanisms to the rate-coded neuron model - - Args: - neuron_model_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Neuron - - Returns: - neuron_model_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Neuron - with DBS mechanisms added - """ - - ### 1st add new DBS parameters - ### get the parameters as a list of strings - parameters_line_split_list = str( - neuron_model_init_parameter_dict["parameters"] - ).splitlines() - ### append list with new parameters - parameters_line_split_list.append("dbs_depolarization = 0 : population") - parameters_line_split_list.append("dbs_on = 0") - parameters_line_split_list.append( - "axon_rate_amp = 1.0 : population # equivalent to prob_axon_spike in spiking model" - ) - ### join list to a string - neuron_model_init_parameter_dict["parameters"] = "\n".join( - parameters_line_split_list - ) - - ### 2nd add new equations - ### get the equations of the neuron model as a list of strings - equations_line_split_list = str( - neuron_model_init_parameter_dict["equations"] - ).splitlines() - ### append axon_rate - equations_line_split_list.append( - "axon_rate = axon_rate_amp*dbs_on # equivalent to axon_spike in spiking model" - ) - ### search for equation with dmp/dt - lines_with_mp_count = 0 - for line_idx, line in enumerate(equations_line_split_list): - if self.get_line_is_dmpdt(line): - ### add depolarization term - equations_line_split_list[line_idx] = self.add_term_to_eq_line( - line=equations_line_split_list[line_idx], - term=" + pulse(t)*dbs_on*dbs_depolarization*neg(-1 - mp)", - ) - lines_with_mp_count += 1 - if lines_with_mp_count == 0: - raise ValueError( - "No line with dmp/dt found, only rate-coded models with mp as 'membrane potential' supported yet" - ) - ### join list to a string - neuron_model_init_parameter_dict["equations"] = "\n".join( - equations_line_split_list - ) - - ### 3rd extend description - neuron_model_init_parameter_dict["description"] = ( - f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." - ) - - return neuron_model_init_parameter_dict - - def recreate_projection(self, proj_name): - """ - Recreate a projection with the same synapse model and parameters and connector - function. - - Args: - proj_name (str): - Name of the projection to recreate - """ - - ### 1st recreate synapse model - ### get the stored parameters of the __init__ function of the Synapse - synapse_init_parameter_dict = self.synapse_init_parameter_dict[proj_name] - ### get the stored parameters of the __init__ function of the Projection - proj_init_parameter_dict = self.proj_init_parameter_dict[proj_name] - ### adjust the equations and paramters of the synapse model to implement DBS - synapse_init_parameter_dict = self.add_DBS_to_synapse_model( - synapse_init_parameter_dict, - ) - ### create the new synapse model - synapse_new = Synapse(**synapse_init_parameter_dict) - - ### 2nd recreate projection - ### replace the synapse model with the new synapse model - proj_init_parameter_dict["synapse"] = synapse_new - ### replace pre and post to new populations - proj_init_parameter_dict["pre"] = get_population( - self.pre_post_pop_name_dict[proj_name][0] - ) - proj_init_parameter_dict["post"] = get_population( - self.pre_post_pop_name_dict[proj_name][1] - ) - ### create the new projection - proj_new = Projection(**proj_init_parameter_dict) - - ### 3rd connect the projection with the connector function - ### get the connector function - connector_function = self.connector_function_dict[proj_name] - ### get the parameters of the connector function - connector_function_parameter_dict = self.connector_function_parameter_dict[ - proj_name - ] - ### connect the projection - connector_function(proj_new, **connector_function_parameter_dict) + # Define the variables + var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") - ### 4th set the parameters and variables of the synapse - ### get the stored parameters and variables - synapse_model_attr_dict = self.synapse_model_attr_dict[proj_name] - ### set the parameters and variables - for attr_name, attr_val in synapse_model_attr_dict.items(): - setattr(proj_new, attr_name, attr_val) + # Given equation as a string + equation_str = line - def add_DBS_to_synapse_model(self, synapse_init_parameter_dict): - """ - Add DBS mechanisms to the synapse model. + # Parse the equation string + lhs, rhs = equation_str.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs) - Args: - synapse_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Synapse + # Form the equation + equation = sp.Eq(lhs, rhs) - Returns: - synapse_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Synapse - with DBS mechanisms added - """ - - ### check if projection is spiking - spiking = not (isinstance(synapse_init_parameter_dict["pre_spike"], type(None))) - - ### add DBS mechanisms - if spiking: - return self.add_DBS_to_spiking_synapse_model(synapse_init_parameter_dict) - else: - return self.add_DBS_to_rate_coded_synapse_model(synapse_init_parameter_dict) - - def add_DBS_to_spiking_synapse_model(self, synapse_init_parameter_dict): - """ - Add DBS mechanisms to the spiking synapse model. - - Args: - synapse_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Synapse - - Returns: - synapse_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Synapse - with DBS mechanisms added - """ - - ### 1st add new DBS parameters - ### get the parameters as a list of strings - parameters_line_split_list = str( - synapse_init_parameter_dict["parameters"] - ).splitlines() - ### append list with new parameters - parameters_line_split_list.append("p_axon_spike_trans=0 : projection") - ### join list to a string - synapse_init_parameter_dict["parameters"] = "\n".join( - parameters_line_split_list - ) - - ### 2nd add new equation for uniform variable - ### get the equations of the synapse model as a list of strings - equations_line_split_list = str( - synapse_init_parameter_dict["equations"] - ).splitlines() - ### prepend uniform variable - equations_line_split_list.insert(0, "unif_var_dbs = Uniform(0., 1.)") - ### join list to a string - synapse_init_parameter_dict["equations"] = "\n".join(equations_line_split_list) - - ### 3rd add pre_axon_spike - synapse_init_parameter_dict["pre_axon_spike"] = ( - "g_target+=ite(unif_var_dbs Date: Tue, 4 Jun 2024 13:38:03 +0200 Subject: [PATCH 28/39] new neuron model with noise based on SNR --- .../model_configurator_cnp.py | 238 ++++++++++-------- .../model_configurator_user.py | 197 ++++++++++----- src/CompNeuroPy/neuron_models/__init__.py | 1 + .../final_models/izhikevich_2003_like_nm.py | 145 +++++++++++ 4 files changed, 414 insertions(+), 167 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 68bcb27..4d472a1 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -159,6 +159,113 @@ def __init__( ### print guide self._p_g(_p_g_1) + def analyze_model(self): + """ + prepares the creation of the single neuron and many neuron networks + """ + + ### clear ANNarchy and create the model + cnp_clear() + self.model.create(do_compile=False) + + ### get the neuron models from the model + for pop_name in self.pop_name_list: + self.neuron_model_dict[pop_name] = get_population(pop_name).neuron_type + self.neuron_model_parameters_dict[pop_name] = get_population( + pop_name + ).init.items() + self.neuron_model_attributes_dict[pop_name] = get_population( + pop_name + ).attributes + + ### do further things for which the model needs to be created + ### get the afferent projection dict for the populations (model needed!) + for pop_name in self.pop_name_list: + ### get afferent projection dict + self.log(f"get the afferent_projection_dict for {pop_name}") + self.afferent_projection_dict[pop_name] = self.get_afferent_projection_dict( + pop_name=pop_name + ) + + ### create dictionary with timeconstants of g_ampa and g_gaba of the populations + for pop_name in self.pop_name_list: + self.tau_dict[pop_name] = { + "ampa": get_population(pop_name).tau_ampa, + "gaba": get_population(pop_name).tau_gaba, + } + + ### get the post_pop_name_dict + self.post_pop_name_dict = {} + for proj_name in self.model.projections: + self.post_pop_name_dict[proj_name] = get_projection(proj_name).post.name + + ### get the pre_pop_name_dict + self.pre_pop_name_dict = {} + for proj_name in self.model.projections: + self.pre_pop_name_dict[proj_name] = get_projection(proj_name).pre.name + + ### get the pre_pop_size_dict + self.pre_pop_size_dict = {} + for proj_name in self.model.projections: + self.pre_pop_size_dict[proj_name] = get_projection(proj_name).pre.size + + ### clear ANNarchy --> the model is not available anymore + cnp_clear() + + def get_afferent_projection_dict(self, pop_name): + """ + creates a dictionary containing + projection_names + target firing rate + probability + size + target + for each afferent projection (=first level of keys) of the specified population + + Args: + pop_name: str + populaiton name + + return: dict of dicts + """ + ### check if model is available + if not self.model.created: + error_msg = "ERROR model_configurator get_afferent_projection_dict: the model has to be created!" + self.log(error_msg) + raise AssertionError(error_msg) + ### get projection names + afferent_projection_dict = {} + afferent_projection_dict["projection_names"] = [] + for projection in self.model.projections: + if get_projection(projection).post.name == pop_name: + afferent_projection_dict["projection_names"].append(projection) + + self.nr_afferent_proj_dict[pop_name] = len( + afferent_projection_dict["projection_names"] + ) + + ### get target firing rates resting-state for afferent projections + afferent_projection_dict["target firing rate"] = [] + afferent_projection_dict["probability"] = [] + afferent_projection_dict["size"] = [] + afferent_projection_dict["target"] = [] + for projection in afferent_projection_dict["projection_names"]: + pre_pop_name = get_projection(projection).pre.name + ### target firing rate + afferent_projection_dict["target firing rate"].append( + self.target_firing_rate_dict[pre_pop_name] + ) + ### probability, _connection_args only if connect_fixed_prob (i.e. connector_name==Random) + afferent_projection_dict["probability"].append( + get_projection(projection)._connection_args[0] + ) + ### size + afferent_projection_dict["size"].append(len(get_projection(projection).pre)) + ### target type + afferent_projection_dict["target"].append(get_projection(projection).target) + + return afferent_projection_dict + def get_max_syn(self, cache=True, clear=False): """ get the weight dictionary for all populations given in target_firing_rate_dict @@ -839,6 +946,10 @@ def get_base(self): ### this could also be prevented by initializing all neurons differently (along there periodic u-v curve) ### or by adding noise to conductances or baseline current ### thenthe question is, how is the relation between added noise and the noise in the input + ### TODO: I've decided for noise depending on the input current (scaled by specified SNR) + ### without input there is no noise, decorrelate neurons by random initial values + ### TODO: current idea is: to find max syn things the noise has to be deactivated and to find baseline currents the noise has to be activated + ### so single neuron networks should be without noise, an then here noise should be activated, maybe requirement for model conf will be a variable called noise to turn on and off noise for pop_name in self.pop_name_list: for proj_name in self.afferent_projection_dict[pop_name][ "projection_names" @@ -849,6 +960,15 @@ def get_base(self): ### set the weights of the normal model model = self._set_weights_of_model(mode=0) + ### set initial variables of populations (do not initialize all neurons the same) + for pop_name in self.pop_name_list: + population = get_population(pop_name) + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + + ### record and simulate mon_dict = {pop_name: ["spike"] for pop_name in model.populations} mon = CompNeuroMonitors(mon_dict=mon_dict) mon.start() @@ -872,6 +992,15 @@ def get_base(self): ### set the weights of the reduced model model = self._set_weights_of_model(mode=1) + ### set initial variables of populations (do not initialize all neurons the same) + for pop_name in self.pop_name_list: + population = get_population(f"{pop_name}_reduced") + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + + ### record and simulate mon_dict = {f"{pop_name}_reduced": ["spike"] for pop_name in mon_dict.keys()} mon = CompNeuroMonitors(mon_dict=mon_dict) mon.start() @@ -888,6 +1017,8 @@ def get_base(self): shape=(len(plan["position"]), 1), plan=plan, ) + ### next check if populations which should not be tuned have the correct firing rates, if not warning that the populations are tuned but if the rate of the not tuned populations changes this might also change the tuned populations' rates + ### next activate noise and then performe search algorithm ith reduced model with input varaibles = I_app of populations and output variables = firing rates of populations ### TODO get base with reduced model quit() @@ -2223,59 +2354,6 @@ def divide_almost_equal(self, number, num_parts): return result - def analyze_model(self): - """ - prepares the creation of the single neuron and many neuron networks - """ - - ### clear ANNarchy and create the model - cnp_clear() - self.model.create(do_compile=False) - - ### get the neuron models from the model - for pop_name in self.pop_name_list: - self.neuron_model_dict[pop_name] = get_population(pop_name).neuron_type - self.neuron_model_parameters_dict[pop_name] = get_population( - pop_name - ).init.items() - self.neuron_model_attributes_dict[pop_name] = get_population( - pop_name - ).attributes - - ### do further things for which the model needs to be created - ### get the afferent projection dict for the populations (model needed!) - for pop_name in self.pop_name_list: - ### get afferent projection dict - self.log(f"get the afferent_projection_dict for {pop_name}") - self.afferent_projection_dict[pop_name] = self.get_afferent_projection_dict( - pop_name=pop_name - ) - - ### create dictionary with timeconstants of g_ampa and g_gaba of the populations - for pop_name in self.pop_name_list: - self.tau_dict[pop_name] = { - "ampa": get_population(pop_name).tau_ampa, - "gaba": get_population(pop_name).tau_gaba, - } - - ### get the post_pop_name_dict - self.post_pop_name_dict = {} - for proj_name in self.model.projections: - self.post_pop_name_dict[proj_name] = get_projection(proj_name).post.name - - ### get the pre_pop_name_dict - self.pre_pop_name_dict = {} - for proj_name in self.model.projections: - self.pre_pop_name_dict[proj_name] = get_projection(proj_name).pre.name - - ### get the pre_pop_size_dict - self.pre_pop_size_dict = {} - for proj_name in self.model.projections: - self.pre_pop_size_dict[proj_name] = get_projection(proj_name).pre.size - - ### clear ANNarchy --> the model is not available anymore - cnp_clear() - def compile_net_many_sequential(self): network_list = [ net_many_dict["net"] @@ -2323,60 +2401,6 @@ def compile_net_many_parallel(self): _network[net_idx]["compiled"] = True _network[net_idx]["directory"] = run_folder_name - def get_afferent_projection_dict(self, pop_name): - """ - creates a dictionary containing - projection_names - target firing rate - probability - size - target - for each afferent projection (=first level of keys) of the specified population - - Args: - pop_name: str - populaiton name - - return: dict of dicts - """ - ### check if model is available - if not self.model.created: - error_msg = "ERROR model_configurator get_afferent_projection_dict: the model has to be created!" - self.log(error_msg) - raise AssertionError(error_msg) - ### get projection names - afferent_projection_dict = {} - afferent_projection_dict["projection_names"] = [] - for projection in self.model.projections: - if get_projection(projection).post.name == pop_name: - afferent_projection_dict["projection_names"].append(projection) - - self.nr_afferent_proj_dict[pop_name] = len( - afferent_projection_dict["projection_names"] - ) - - ### get target firing rates resting-state for afferent projections - afferent_projection_dict["target firing rate"] = [] - afferent_projection_dict["probability"] = [] - afferent_projection_dict["size"] = [] - afferent_projection_dict["target"] = [] - for projection in afferent_projection_dict["projection_names"]: - pre_pop_name = get_projection(projection).pre.name - ### target firing rate - afferent_projection_dict["target firing rate"].append( - self.target_firing_rate_dict[pre_pop_name] - ) - ### probability, _connection_args only if connect_fixed_prob (i.e. connector_name==Random) - afferent_projection_dict["probability"].append( - get_projection(projection)._connection_args[0] - ) - ### size - afferent_projection_dict["size"].append(len(get_projection(projection).pre)) - ### target type - afferent_projection_dict["target"].append(get_projection(projection).target) - - return afferent_projection_dict - def get_max_syn_currents(self, pop_name: str) -> list: """ obtain I_app_max, g_ampa_max and g_gaba max. diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 4368264..83a6a91 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -9,9 +9,16 @@ ) from CompNeuroPy.neuron_models import ( poisson_neuron_up_down, - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR, +) +from CompNeuroPy import ( + generate_model, + Monitors, + plot_recordings, + my_raster_plot, + CompNeuroMonitors, + PlotRecordings, ) -from CompNeuroPy import generate_model, Monitors, plot_recordings, my_raster_plot from CompNeuroPy.examples.model_configurator.model_configurator_cnp import ( model_configurator, ) @@ -39,68 +46,88 @@ def BGM_part_function(params): ### BG Populations stn = Population( params["stn.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["stn.a"], + b=params["stn.b"], + c=params["stn.c"], + d=params["stn.d"], + n2=params["stn.n2"], + n1=params["stn.n1"], + n0=params["stn.n0"], + tau_ampa=params["stn.tau_ampa"], + tau_gaba=params["stn.tau_gaba"], + E_ampa=params["stn.E_ampa"], + E_gaba=params["stn.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="stn", ) - stn.a = params["stn.a"] - stn.b = params["stn.b"] - stn.c = params["stn.c"] - stn.d = params["stn.d"] - stn.n2 = params["stn.n2"] - stn.n1 = params["stn.n1"] - stn.n0 = params["stn.n0"] - stn.tau_ampa = params["stn.tau_ampa"] - stn.tau_gaba = params["stn.tau_gaba"] - stn.E_ampa = params["stn.E_ampa"] - stn.E_gaba = params["stn.E_gaba"] snr = Population( params["snr.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["snr.a"], + b=params["snr.b"], + c=params["snr.c"], + d=params["snr.d"], + n2=params["snr.n2"], + n1=params["snr.n1"], + n0=params["snr.n0"], + tau_ampa=params["snr.tau_ampa"], + tau_gaba=params["snr.tau_gaba"], + E_ampa=params["snr.E_ampa"], + E_gaba=params["snr.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="snr", ) - snr.a = params["snr.a"] - snr.b = params["snr.b"] - snr.c = params["snr.c"] - snr.d = params["snr.d"] - snr.n2 = params["snr.n2"] - snr.n1 = params["snr.n1"] - snr.n0 = params["snr.n0"] - snr.tau_ampa = params["snr.tau_ampa"] - snr.tau_gaba = params["snr.tau_gaba"] - snr.E_ampa = params["snr.E_ampa"] - snr.E_gaba = params["snr.E_gaba"] gpe = Population( params["gpe.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["gpe.a"], + b=params["gpe.b"], + c=params["gpe.c"], + d=params["gpe.d"], + n2=params["gpe.n2"], + n1=params["gpe.n1"], + n0=params["gpe.n0"], + tau_ampa=params["gpe.tau_ampa"], + tau_gaba=params["gpe.tau_gaba"], + E_ampa=params["gpe.E_ampa"], + E_gaba=params["gpe.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="gpe", ) - gpe.a = params["gpe.a"] - gpe.b = params["gpe.b"] - gpe.c = params["gpe.c"] - gpe.d = params["gpe.d"] - gpe.n2 = params["gpe.n2"] - gpe.n1 = params["gpe.n1"] - gpe.n0 = params["gpe.n0"] - gpe.tau_ampa = params["gpe.tau_ampa"] - gpe.tau_gaba = params["gpe.tau_gaba"] - gpe.E_ampa = params["gpe.E_ampa"] - gpe.E_gaba = params["gpe.E_gaba"] thal = Population( params["thal.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["thal.a"], + b=params["thal.b"], + c=params["thal.c"], + d=params["thal.d"], + n2=params["thal.n2"], + n1=params["thal.n1"], + n0=params["thal.n0"], + tau_ampa=params["thal.tau_ampa"], + tau_gaba=params["thal.tau_gaba"], + E_ampa=params["thal.E_ampa"], + E_gaba=params["thal.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="thal", ) - thal.a = params["thal.a"] - thal.b = params["thal.b"] - thal.c = params["thal.c"] - thal.d = params["thal.d"] - thal.n2 = params["thal.n2"] - thal.n1 = params["thal.n1"] - thal.n0 = params["thal.n0"] - thal.tau_ampa = params["thal.tau_ampa"] - thal.tau_gaba = params["thal.tau_gaba"] - thal.E_ampa = params["thal.E_ampa"] - thal.E_gaba = params["thal.E_gaba"] ###### PROJECTIONS ###### ### cortex go output @@ -111,7 +138,7 @@ def BGM_part_function(params): name="cor_exc__stn", ) cor_exc__stn.connect_fixed_probability( - probability=params["cor_exc__stn.probability"], weights=1 + probability=params["cor_exc__stn.probability"], weights=0 ) cor_inh__stn = Projection( @@ -121,7 +148,7 @@ def BGM_part_function(params): name="cor_inh__stn", ) cor_inh__stn.connect_fixed_probability( - probability=params["cor_inh__stn.probability"], weights=1 + probability=params["cor_inh__stn.probability"], weights=0 ) ### stn output @@ -132,7 +159,7 @@ def BGM_part_function(params): name="stn__snr", ) stn__snr.connect_fixed_probability( - probability=params["stn__snr.probability"], weights=1 + probability=params["stn__snr.probability"], weights=0 ) stn__gpe = Projection( pre=stn, @@ -141,7 +168,7 @@ def BGM_part_function(params): name="stn__gpe", ) stn__gpe.connect_fixed_probability( - probability=params["stn__gpe.probability"], weights=1 + probability=params["stn__gpe.probability"], weights=0 ) ### gpe proto output if params["general.more_complex"]: @@ -152,7 +179,7 @@ def BGM_part_function(params): # name="gpe__stn", # ) # gpe__stn.connect_fixed_probability( - # probability=params["gpe__stn.probability"], weights=1 + # probability=params["gpe__stn.probability"], weights=0 # ) gpe__snr = Projection( pre=gpe, @@ -161,7 +188,7 @@ def BGM_part_function(params): name="gpe__snr", ) gpe__snr.connect_fixed_probability( - probability=params["gpe__snr.probability"], weights=1 + probability=params["gpe__snr.probability"], weights=0 ) ### snr output snr__thal = Projection( @@ -171,7 +198,7 @@ def BGM_part_function(params): name="snr__thal", ) snr__thal.connect_fixed_probability( - probability=params["snr__thal.probability"], weights=1 + probability=params["snr__thal.probability"], weights=0 ) if params["general.more_complex"]: snr__snr = Projection( @@ -181,7 +208,7 @@ def BGM_part_function(params): name="snr__snr", ) snr__snr.connect_fixed_probability( - probability=params["snr__snr.probability"], weights=1 + probability=params["snr__snr.probability"], weights=0 ) @@ -267,6 +294,56 @@ def BGM_part_function(params): do_create=False, ) + # model.create() + # mon = CompNeuroMonitors( + # { + # pop_name: [ + # "I_noise", + # "I_signal", + # "I", + # "power_I_signal", + # "spike", + # ] + # for pop_name in ["stn"] + # } + # ) + # mon.start() + + # simulate(500) + # get_population("stn").I_app = 10 + # simulate(500) + + # recordings = mon.get_recordings() + # recording_times = mon.get_recording_times() + + # PlotRecordings( + # recordings=recordings, + # recording_times=recording_times, + # chunk=0, + # shape=(5, 1), + # plan={ + # "position": list(range(1, 5 + 1)), + # "compartment": ["stn"] * 5, + # "variable": [ + # "I_noise", + # "I_signal", + # "I", + # "power_I_signal", + # "spike", + # ], + # "format": [ + # "line", + # "line", + # "line", + # "line", + # "hybrid", + # ], + # }, + # figname="model_recordings_noise.png", + # # time_lim=(495, 505), + # ) + # quit() + ### model configurator should get target resting-state firing rates for the ### model populations one wants to configure and their afferents as input target_firing_rate_dict = { @@ -294,7 +371,7 @@ def BGM_part_function(params): ### obtain the maximum synaptic loads for the populations and the ### maximum weights of their afferent projections - model_conf.get_max_syn(cache=True) + model_conf.get_max_syn(cache=True, clear=False) ### now either set weights directly weights = { @@ -305,7 +382,7 @@ def BGM_part_function(params): "gpe": {"stn__gpe": 0.14456939170522481 * 0}, "snr": { "stn__snr": 0.14456939170522481 * 0, - "gpe__snr": 0.3258095138891384 * 0, + "gpe__snr": 0.04, "snr__snr": 0.3258095138891384 * 0, }, "thal": {"snr__thal": 0.33855115254020435 * 0}, diff --git a/src/CompNeuroPy/neuron_models/__init__.py b/src/CompNeuroPy/neuron_models/__init__.py index d371537..84aa27b 100644 --- a/src/CompNeuroPy/neuron_models/__init__.py +++ b/src/CompNeuroPy/neuron_models/__init__.py @@ -23,6 +23,7 @@ Izhikevich2003NoisyAmpaOscillating, Izhikevich2003NoisyBase, Izhikevich2003NoisyBaseNonlin, + Izhikevich2003NoisyBaseSNR, ) from .final_models.izhikevich_2007_like_nm import ( Izhikevich2007, diff --git a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py index 8f80dd0..30096bf 100644 --- a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py @@ -458,6 +458,151 @@ def __init__( self._instantiated.append(True) +class Izhikevich2003NoisyBaseSNR(Neuron): + """ + TEMPLATE + + [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with + additional conductance based synapses for AMPA and GABA currents and a noisy baseline + current defined by the signal-to-noise ratio (SNR). + + Parameters: + a (float, optional): + Time constant of the recovery variable u. + b (float, optional): + Sensitivity of the recovery variable u to the membrane potential v. + c (float, optional): + After-spike reset value of the membrane potential v. + d (float, optional): + After-spike change of the recovery variable u. + n2 (float, optional): + Factor of the quadratic equation of the membrane potential v. + n1 (float, optional): + Factor of the quadratic equation of the membrane potential v. + n0 (float, optional): + Factor of the quadratic equation of the membrane potential v. + tau_ampa (float, optional): + Time constant of the AMPA conductance. + tau_gaba (float, optional): + Time constant of the GABA conductance. + E_ampa (float, optional): + Reversal potential of the AMPA conductance. + E_gaba (float, optional): + Reversal potential of the GABA conductance. + I_app (float, optional): + External applied current. + I_base (float, optional): + Baseline current. + noise (float, optional): + Can be set to 0 to disable the noise and 1 to enable it. (For other values + the noise is scaled accordingly but the target snr is only reached for 1.) + tau_power (float, optional): + Time constant of the power calculation. + snr_target (float, optional): + Target signal-to-noise ratio (SNR). + rate_noise (float, optional): + Rate of the Poisson distributed noise in the baseline current, i.e. how + often the baseline current is changed randomly. + + Variables to record: + - g_ampa + - g_gaba + - power_I_signal + - I_noise + - I_signal + - I + - v + - u + - r + """ + + # For reporting + _instantiated = [] + + def __init__( + self, + a: float = 0, + b: float = 0, + c: float = 0, + d: float = 0, + n2: float = 0, + n1: float = 0, + n0: float = 0, + tau_ampa: float = 1, + tau_gaba: float = 1, + E_ampa: float = 0, + E_gaba: float = 0, + I_app: float = 0, + I_base: float = 0, + noise: float = 1, + tau_power: float = 1, + snr_target: float = 1, + rate_noise: float = 0, + ): + # Create the arguments + parameters = f""" + ### izhikevich parameters + a = {a} : population + b = {b} : population + c = {c} : population + d = {d} : population + n2 = {n2} : population + n1 = {n1} : population + n0 = {n0} : population + ### synaptic currents + tau_ampa = {tau_ampa} : population + tau_gaba = {tau_gaba} : population + E_ampa = {E_ampa} : population + E_gaba = {E_gaba} : population + ### external currents + I_app = {I_app} + I_base = {I_base} + ### noise + noise = {noise} + tau_power = {tau_power} + snr_target = {snr_target} + rate_noise = {rate_noise} + """ + + super().__init__( + parameters=parameters, + equations=""" + ### input current + I_noise = noise*ite(Uniform(0, 1) * 1000.0 / dt > rate_noise, I_noise, Normal(0, 1)) + I_signal = I_base - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba)) + I_app + ### scale noise to reach target snr, scale factor is: + ### scaling_factor = sqrt((power_I_signal/power_I_noise)/snr_target) + ### since power of N(0,1) is 1, we can scale the noise by: + ### scaling_factor = sqrt(power_I_signal/snr_target) + I = I_signal + I_noise * sqrt(power_I_signal/snr_target) + ### synaptic conductances + tau_ampa * dg_ampa/dt = -g_ampa + tau_gaba * dg_gaba/dt = -g_gaba + ### power of signal + tau_power * dpower_I_signal/dt = I_signal**2 - power_I_signal + ### membrane potential and recovery variable + dv/dt = n2 * v * v + n1 * v + n0 - u + I + du/dt = a * (b * v - u) + """, + spike=""" + v >= 30 + """, + reset=""" + v = c + u = u + d + """, + name="Izhikevich2003_noisy_I_snr", + description=""" + Neuron model from Izhikevich (2003). With additional conductance based + synapses for AMPA and GABA currents and a noisy baseline current with + a specified signal-to-noise ratio (SNR). + """, + ) + + # For reporting + self._instantiated.append(True) + + class Izhikevich2003NoisyBase(Neuron): """ TEMPLATE From 3e324321b705cbd7e2bcb6dfa5f685e4d4393bb7 Mon Sep 17 00:00:00 2001 From: olimaol Date: Tue, 4 Jun 2024 14:52:13 +0200 Subject: [PATCH 29/39] model_configurator: started new implementation --- .../model_configurator_cnp.py | 4190 +---------------- .../model_configurator_cnp_old.py | 4020 ++++++++++++++++ .../model_configurator_user.py | 5 +- 3 files changed, 4277 insertions(+), 3938 deletions(-) create mode 100644 src/CompNeuroPy/examples/model_configurator/model_configurator_cnp_old.py diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 4d472a1..df2b2a9 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -1,21 +1,10 @@ -from CompNeuroPy import ( - cnp_clear, - compile_in_folder, - data_obj, - evaluate_expression_with_dict, - timing_decorator, - print_df, - save_variables, - load_variables, - clear_dir, - CompNeuroModel, - CompNeuroMonitors, - PlotRecordings, -) -from CompNeuroPy.system_functions import _find_folder_with_prefix -from CompNeuroPy.neuron_models import poisson_neuron +from CompNeuroPy.generate_model import CompNeuroModel +from CompNeuroPy import model_functions as mf + from ANNarchy import ( Population, + Projection, + Synapse, get_population, Monitor, Network, @@ -32,6 +21,8 @@ populations, ) +from ANNarchy.core import ConnectorMethods + # from ANNarchy.core.Global import _network import numpy as np from scipy.interpolate import interp1d, interpn @@ -52,3969 +43,298 @@ from scipy.stats import poisson from ANNarchy.extensions.bold import BoldMonitor from sklearn.linear_model import LinearRegression -from CompNeuroPy.examples.model_configurator.reduce_model import _CreateReducedModel -class model_configurator: +class ModelConfigurator: def __init__( self, model: CompNeuroModel, - target_firing_rate_dict, - interpolation_grid_points=10, - max_psp=10, - do_not_config_list=[], - print_guide=False, - I_app_variable="I_app", - ) -> None: - """ - Args: - model: CompNeuroPy generate_model object - it's not important if the model is created or compiled but after running - the model_configurator only the given model will exist so do not create - something else in ANNarchy! - - target_firing_rate_dict: dict - keys = population names of model which should be configured, values = target firing rates in Hz - - interpolation_grid_points: int, optional, default=10 - how many points should be used for the interpolation of the f-I-g curve on a single axis - - max_psp: int, optional, default=10 - maximum post synaptic potential in mV - - do_not_config_list: list, optional, default=[] - list with strings containing population names of populations which should not be configured - - print_guide: bool, optional, default=False - if you want to get information about what you could do with model_configurator - - I_app_variable: str, optional, default="I_app" - the name of the varaible in the populations which represents the applied current - TODO: not implemented yet, default value is always used - - Functions: - get_max_syn: - returns a dictionary with weight ranges for all afferent projections of the configured populations - """ - self.model = model - self.target_firing_rate_dict = target_firing_rate_dict - self.pop_name_list = list(target_firing_rate_dict.keys()) - for do_not_pop_name in do_not_config_list: - self.pop_name_list.remove(do_not_pop_name) - self.I_app_max_dict = {pop_name: None for pop_name in self.pop_name_list} - self.g_max_dict = {pop_name: None for pop_name in self.pop_name_list} - self.tau_dict = {pop_name: None for pop_name in self.pop_name_list} - self.nr_afferent_proj_dict = {pop_name: None for pop_name in self.pop_name_list} - self.net_many_dict = {pop_name: None for pop_name in self.pop_name_list} - self.net_single_dict = {pop_name: None for pop_name in self.pop_name_list} - self.net_single_v_clamp_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.max_weight_dict = {pop_name: None for pop_name in self.pop_name_list} - self.variable_init_sampler_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.f_I_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} - self.I_f_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} - self.afferent_projection_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.neuron_model_dict = {pop_name: None for pop_name in self.pop_name_list} - self.neuron_model_parameters_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.neuron_model_attributes_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.max_psp_dict = {pop_name: None for pop_name in self.pop_name_list} - self.possible_rates_dict = {pop_name: None for pop_name in self.pop_name_list} - self.extreme_firing_rates_df_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.prepare_psp_dict = {pop_name: None for pop_name in self.pop_name_list} - ### set max psp for a single spike - self.max_psp_dict = {pop_name: max_psp for pop_name in self.pop_name_list} - ### print things - self.log_exist = False - self.caller_name = "" - self.log("model configurator log:") - self.print_guide = print_guide - ### simulation things - self.simulation_dur = 5000 - self.simulation_dur_estimate_time = 50 - self.nr_neurons_per_net = 100 - - ### do things for which the model needs to be created (it will not be available later) - self.analyze_model() - - ### get reduced model - self.model_reduced = _CreateReducedModel( - model=self.model, - reduced_size=100, - do_create=False, - do_compile=False, - verbose=True, - ).model_reduced + target_firing_rate_dict: dict, + max_psp: float = 10.0, + do_not_config_list: list[str] = [], + print_guide: bool = False, + I_app_variable: str = "I_app", + ): + self._analyze_model = AnalyzeModel(model=model) + self._single_neuron_networks = CreateSingleNeuronNetworks() + self._reduced_model = CreateReducedModel() + self._v_rest = GetVRest() + self._max_syn = GetMaxSyn() + self._weight_templates = GetWeightTemplates() - ### print guide - self._p_g(_p_g_1) - def analyze_model(self): - """ - prepares the creation of the single neuron and many neuron networks - """ +class AnalyzeModel: + """ + Class to analyze the given model to be able to reproduce it. + """ + _connector_methods_dict = { + "One-to-One": ConnectorMethods.connect_one_to_one, + "All-to-All": ConnectorMethods.connect_all_to_all, + "Gaussian": ConnectorMethods.connect_gaussian, + "Difference-of-Gaussian": ConnectorMethods.connect_dog, + "Random": ConnectorMethods.connect_fixed_probability, + "Random Convergent": ConnectorMethods.connect_fixed_number_pre, + "Random Divergent": ConnectorMethods.connect_fixed_number_post, + "User-defined": ConnectorMethods.connect_with_func, + "MatrixMarket": ConnectorMethods.connect_from_matrix_market, + "Connectivity matrix": ConnectorMethods.connect_from_matrix, + "Sparse connectivity matrix": ConnectorMethods.connect_from_sparse, + "From File": ConnectorMethods.connect_from_file, + } + + def __init__(self, model: CompNeuroModel): ### clear ANNarchy and create the model - cnp_clear() - self.model.create(do_compile=False) - - ### get the neuron models from the model - for pop_name in self.pop_name_list: - self.neuron_model_dict[pop_name] = get_population(pop_name).neuron_type - self.neuron_model_parameters_dict[pop_name] = get_population( - pop_name - ).init.items() - self.neuron_model_attributes_dict[pop_name] = get_population( - pop_name - ).attributes - - ### do further things for which the model needs to be created - ### get the afferent projection dict for the populations (model needed!) - for pop_name in self.pop_name_list: - ### get afferent projection dict - self.log(f"get the afferent_projection_dict for {pop_name}") - self.afferent_projection_dict[pop_name] = self.get_afferent_projection_dict( - pop_name=pop_name - ) - - ### create dictionary with timeconstants of g_ampa and g_gaba of the populations - for pop_name in self.pop_name_list: - self.tau_dict[pop_name] = { - "ampa": get_population(pop_name).tau_ampa, - "gaba": get_population(pop_name).tau_gaba, - } - - ### get the post_pop_name_dict - self.post_pop_name_dict = {} - for proj_name in self.model.projections: - self.post_pop_name_dict[proj_name] = get_projection(proj_name).post.name + self._clear_model(model=model) - ### get the pre_pop_name_dict - self.pre_pop_name_dict = {} - for proj_name in self.model.projections: - self.pre_pop_name_dict[proj_name] = get_projection(proj_name).pre.name + ### get population info (eq, params etc.) + self._analyze_populations(model=model) - ### get the pre_pop_size_dict - self.pre_pop_size_dict = {} - for proj_name in self.model.projections: - self.pre_pop_size_dict[proj_name] = get_projection(proj_name).pre.size + ### get projection info + self._analyze_projections(model=model) - ### clear ANNarchy --> the model is not available anymore - cnp_clear() + def _clear_model(self, model: CompNeuroModel): + mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + model.create(do_compile=False) - def get_afferent_projection_dict(self, pop_name): + def _analyze_populations(self, model: CompNeuroModel): """ - creates a dictionary containing - projection_names - target firing rate - probability - size - target - for each afferent projection (=first level of keys) of the specified population + Get info of each population Args: - pop_name: str - populaiton name - - return: dict of dicts - """ - ### check if model is available - if not self.model.created: - error_msg = "ERROR model_configurator get_afferent_projection_dict: the model has to be created!" - self.log(error_msg) - raise AssertionError(error_msg) - ### get projection names - afferent_projection_dict = {} - afferent_projection_dict["projection_names"] = [] - for projection in self.model.projections: - if get_projection(projection).post.name == pop_name: - afferent_projection_dict["projection_names"].append(projection) - - self.nr_afferent_proj_dict[pop_name] = len( - afferent_projection_dict["projection_names"] - ) - - ### get target firing rates resting-state for afferent projections - afferent_projection_dict["target firing rate"] = [] - afferent_projection_dict["probability"] = [] - afferent_projection_dict["size"] = [] - afferent_projection_dict["target"] = [] - for projection in afferent_projection_dict["projection_names"]: - pre_pop_name = get_projection(projection).pre.name - ### target firing rate - afferent_projection_dict["target firing rate"].append( - self.target_firing_rate_dict[pre_pop_name] - ) - ### probability, _connection_args only if connect_fixed_prob (i.e. connector_name==Random) - afferent_projection_dict["probability"].append( - get_projection(projection)._connection_args[0] - ) - ### size - afferent_projection_dict["size"].append(len(get_projection(projection).pre)) - ### target type - afferent_projection_dict["target"].append(get_projection(projection).target) - - return afferent_projection_dict - - def get_max_syn(self, cache=True, clear=False): - """ - get the weight dictionary for all populations given in target_firing_rate_dict - keys = population names, values = dict which contain values = afferent projection names, values = lists with w_min and w_max - """ - ### clear cache to create new cache - if cache and clear: - self.log("clear cache of get_max_syn") - clear_dir("./.model_configurator_cache/get_max_syn") - - ### check cache for get_max_syn - cache_worked = False - if cache: - try: - loaded_variables_dict = load_variables( - name_list=[ - "net_single_dict", - "prepare_psp_dict", - "I_app_max_dict", - "g_max_dict", - "syn_contr_dict", - "syn_load_dict", - ], - path="./.model_configurator_cache/get_max_syn", - ) - ( - self.net_single_dict, - self.prepare_psp_dict, - self.I_app_max_dict, - self.g_max_dict, - self.syn_contr_dict, - self.syn_load_dict, - ) = loaded_variables_dict.values() - ### create dummy network for single network and actually create network for single_v_clamp (single_v_clamp needed in get_base) - self.create_single_neuron_networks( - single_net=False, single_net_v_clamp=True, prepare_psp=False - ) - cache_worked = True - except: - cache_worked = False - - if not cache_worked: - ### create single neuron networks - self.create_single_neuron_networks() - - ### get max synaptic things with single neuron networks - for pop_name in self.pop_name_list: - self.log(pop_name) - ### get max I_app and max weights (i.e. g_ampa, g_gaba) - txt = f"get max I_app, g_ampa and g_gaba using network_single for {pop_name}" - print(txt) - self.log(txt) - I_app_max, g_ampa_max, g_gaba_max = self.get_max_syn_currents( - pop_name=pop_name, - ) - - self.I_app_max_dict[pop_name] = I_app_max - self.g_max_dict[pop_name] = { - "ampa": g_ampa_max, - "gaba": g_gaba_max, - } - - ### obtain the synaptic contributions assuming max weights - self.syn_contr_dict = {} - for pop_name in self.pop_name_list: - self.syn_contr_dict[pop_name] = {} - for target_type in ["ampa", "gaba"]: - self.log(f"get synaptic contributions for {pop_name} {target_type}") - self.syn_contr_dict[pop_name][target_type] = ( - self.get_syn_contr_dict( - pop_name=pop_name, - target_type=target_type, - use_max_weights=True, - normalize=True, - ) - ) - - ### create the synaptic load template dict - self.syn_load_dict = {} - for pop_name in self.pop_name_list: - self.syn_load_dict[pop_name] = [] - if "ampa" in self.afferent_projection_dict[pop_name]["target"]: - self.syn_load_dict[pop_name].append("ampa_load") - if "gaba" in self.afferent_projection_dict[pop_name]["target"]: - self.syn_load_dict[pop_name].append("gaba_load") - - ### save variables in cache - ### obtain variables which should be cached / are needed later - ### do not cache ANNarchy objects - net_single_dict_to_cache = {} - for key, val in self.net_single_dict.items(): - net_single_dict_to_cache[key] = { - "variable_init_sampler": val["variable_init_sampler"] - } - save_variables( - variable_list=[ - net_single_dict_to_cache, - self.prepare_psp_dict, - self.I_app_max_dict, - self.g_max_dict, - self.syn_contr_dict, - self.syn_load_dict, - ], - name_list=[ - "net_single_dict", - "prepare_psp_dict", - "I_app_max_dict", - "g_max_dict", - "syn_contr_dict", - "syn_load_dict", - ], - path="./.model_configurator_cache/get_max_syn", - ) - - ### only return synaptic contributions smaller 1 - template_synaptic_contribution_dict = ( - self.get_template_synaptic_contribution_dict(given_dict=self.syn_contr_dict) - ) - - self._p_g( - _p_g_after_get_weights( - template_weight_dict=self.g_max_dict, - template_synaptic_load_dict=self.syn_load_dict, - template_synaptic_contribution_dict=template_synaptic_contribution_dict, + model (CompNeuroModel): + Model to be analyzed + """ + ### values of the paramters and variables of the population's neurons, keys are + ### the names of paramters and variables + self.neuron_model_attr_dict: dict[str, dict] = {} + ### arguments of the __init__ function of the Neuron class + self.neuron_model_init_parameter_dict: dict[str, dict] = {} + ### arguments of the __init__ function of the Population class + self.pop_init_parameter_dict: dict[str, dict] = {} + + ### for loop over all populations + for pop_name in model.populations: + pop: Population = get_population(pop_name) + ### get the neuron model attributes (parameters/variables) + ### old: self.neuron_model_parameters_dict + ### old: self.neuron_model_attributes_dict = keys() + self.neuron_model_attr_dict[pop.name] = pop.init + ### get a dict of all arguments of the __init__ function of the Neuron + ### ignore self + ### old: self.neuron_model_dict[pop_name] + init_params = inspect.signature(Neuron.__init__).parameters + self.neuron_model_init_parameter_dict[pop.name] = { + param: getattr(pop.neuron_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all arguments of the __init__ function of the Population + ### ignore self, storage_order and copied + init_params = inspect.signature(Population.__init__).parameters + self.pop_init_parameter_dict[pop.name] = { + param: getattr(pop, param) + for param in init_params + if param != "self" and param != "storage_order" and param != "copied" + } + ### get the afferent projections dict of the population TODO + self.afferent_projection_dict[pop_name] = ( + self._get_afferent_projection_dict(pop_name=pop_name) ) - ) - return self.max_weight_dict - - def get_syn_contr_dict( - self, pop_name: str, target_type: str, use_max_weights=False, normalize=False - ) -> dict: - """ - get the relative synaptic contribution list of a population for a given target type - weights are obtained from the afferent_projection_dict, if there are no weights --> use max weights - - Args: - pop_name: str - population name - - target_type: str - target type of the afferent projections of the population - - use_max_weights: bool, optional, default=False - if True the max weights are used, if False the weights from the afferent_projection_dict are used - - Returns: - rel_syn_contr_dict: dict - keys = projection names, values = relative synaptic contributions - """ - ### g_max have to be obtained already - assert not ( - isinstance(self.g_max_dict[pop_name][target_type], type(None)) - ), "ERROR, get_rel_syn_contr_list: g_max have to be obtained already" - ### get list of relative synaptic contributions - proj_name_list = [] - rel_syn_contr_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - proj_target_type = proj_dict["proj_target_type"] - weight = proj_dict["proj_weight"] - if isinstance(weight, type(None)) or use_max_weights: - weight = self.g_max_dict[pop_name][target_type] - if proj_target_type == target_type: - rel_syn_contr_list.append(proj_dict["spike_frequency"] * weight) - proj_name_list.append(proj_name) - ### normalize the list - if normalize: - rel_syn_contr_arr = np.array(rel_syn_contr_list) - rel_syn_contr_arr = rel_syn_contr_arr / np.sum(rel_syn_contr_arr) - rel_syn_contr_list = rel_syn_contr_arr.tolist() - ### combine proj_name_list and rel_syn_contr_list to an dict - rel_syn_contr_dict = { - proj_name: rel_syn_contr - for proj_name, rel_syn_contr in zip(proj_name_list, rel_syn_contr_list) - } - - return rel_syn_contr_dict - - def create_single_neuron_networks( - self, single_net=True, single_net_v_clamp=True, prepare_psp=True - ): - ### clear ANNarchy - cnp_clear() - - ### create the single neuron networks - for pop_name in self.pop_name_list: - txt = f"create network_single for {pop_name}" - print(txt) - self.log(txt) - ### the network with the standard neuron - if single_net: - self.net_single_dict[pop_name] = self.create_net_single( - pop_name=pop_name - ) - else: - ### dummy network for the pop - net_single_dummy = Network() - pop_single_dummy = Population( - 1, - neuron=Neuron(equations="r=1"), - name=f"dummy_single_{pop_name}", - ) - mon_single_dummy = Monitor(pop_single_dummy, ["r"]) - net_single_dummy.add([pop_single_dummy, mon_single_dummy]) - - ### the network with the voltage clamp version neuron - if single_net_v_clamp: - self.net_single_v_clamp_dict[pop_name] = ( - self.create_net_single_voltage_clamp(pop_name=pop_name) - ) - else: - ### dummy network for the pop - net_single_v_clamp_dummy = Network() - pop_single_v_clamp_dummy = Population( - 1, - neuron=Neuron(equations="r=1"), - name=f"dummy_single_v_clamp_{pop_name}", - ) - mon_single_v_clamp_dummy = Monitor(pop_single_v_clamp_dummy, ["r"]) - net_single_v_clamp_dummy.add( - [pop_single_v_clamp_dummy, mon_single_v_clamp_dummy] - ) - - ### get v_rest and correspodning I_app_hold - if prepare_psp: - self.prepare_psp_dict[pop_name] = self.find_v_rest_for_psp( - pop_name, do_plot=False - ) - - def create_net_single(self, pop_name): - """ - creates a network with the neuron type of the population given by pop_name - the number of neurons is 1 - - Args: - pop_name: str - population name - """ - - ### for stop condition for recording psp --> add v_before_psp and v_psp_thresh to equations/parameters - - ### get the initial arguments of the neuron - neuron_model = self.neuron_model_dict[pop_name] - ### names of arguments - init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) - init_arguments_name_list.remove("self") - init_arguments_name_list.remove("name") - init_arguments_name_list.remove("description") - ### arguments dict - init_arguments_dict = { - init_arguments_name: getattr(neuron_model, init_arguments_name) - for init_arguments_name in init_arguments_name_list - } - ### add v_before_psp=v at the beginning of the equations - equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() - equations_line_split_list.insert(0, "v_before_psp = v") - init_arguments_dict["equations"] = "\n".join(equations_line_split_list) - ### add v_psp_thresh to the parameters - parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() - parameters_line_split_list.append("v_psp_thresh = 0 : population") - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - - ### create neuron model with new equations - neuron_model_new = Neuron(**init_arguments_dict) - - ### create the single neuron population - single_neuron = Population( - 1, - neuron=neuron_model_new, - name=f"single_neuron_{pop_name}", - stop_condition=f"((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", - ) - ### set the attributes of the neuron - for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: - setattr(single_neuron, attr_name, attr_val) - - ### create Monitor for single neuron - mon_single = Monitor(single_neuron, ["spike", "v"]) - - ### create network with single neuron - net_single = Network() - net_single.add([single_neuron, mon_single]) - compile_in_folder( - folder_name=f"single_net_{pop_name}", silent=True, net=net_single - ) - - ### get the values of the variables after 2000 ms simulation - variable_init_sampler = self.get_init_neuron_variables( - net_single, net_single.get(single_neuron) - ) - - ### network dict - net_single_dict = { - "net": net_single, - "population": net_single.get(single_neuron), - "monitor": net_single.get(mon_single), - "variable_init_sampler": variable_init_sampler, - } - - return net_single_dict - - def get_init_neuron_variables(self, net, pop): - """ - get the variables of the given population after simulating 2000 ms - - Args: - net: ANNarchy network - the network which contains the pop - - pop: ANNarchy population - the population whose variables are obtained - - """ - ### reset neuron and deactivate input - net.reset() - pop.I_app = 0 - - ### 10000 ms init duration - net.simulate(10000) - - ### simulate 2000 ms and check every dt the variables of the neuron - time_steps = int(2000 / dt()) - var_name_list = list(pop.variables) - var_arr = np.zeros((time_steps, len(var_name_list))) - for time_idx in range(time_steps): - net.simulate(dt()) - get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) - var_arr[time_idx, :] = get_arr[:, 0] - net.reset() - - ### create a sampler with the data samples of from the 1000 ms simulation - sampler = self.var_arr_sampler(var_arr, var_name_list) - return sampler - - def create_net_single_voltage_clamp(self, pop_name): + def _analyze_projections(self, model: CompNeuroModel): """ - creates a network with the neuron type of the population given by pop_name - the number of neurons is 1 - - The equation wich defines the chagne of v is set to zero and teh change of v - is stored in the new variable v_clamp_rec + Get info of each projection Args: - pop_name: str - population name - """ - - ### get the initial arguments of the neuron - neuron_model = self.neuron_model_dict[pop_name] - ### names of arguments - init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) - init_arguments_name_list.remove("self") - init_arguments_name_list.remove("name") - init_arguments_name_list.remove("description") - ### arguments dict - init_arguments_dict = { - init_arguments_name: getattr(neuron_model, init_arguments_name) - for init_arguments_name in init_arguments_name_list - } - ### get new equations for voltage clamp - equations_new = self.get_voltage_clamp_equations(init_arguments_dict, pop_name) - init_arguments_dict["equations"] = equations_new - ### add v_clamp_rec_thresh to the parameters - parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() - parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - - ### for each afferent population create a binomial spike train equation string - ### add it to the equations - ### and add the related parameters to the parameters - - ### get the afferent populations - afferent_population_list = [] - proj_target_type_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_name = proj_dict["pre_pop_name"] - afferent_population_list.append(pre_pop_name) - proj_target_type_list.append(proj_dict["proj_target_type"]) - - ### split the equations and parameters string - equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() - - parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() - - ### add the binomial spike train equations and parameters - ( - equations_line_split_list, - parameters_line_split_list, - ) = self.add_binomial_input( - equations_line_split_list, - parameters_line_split_list, - afferent_population_list, - proj_target_type_list, - ) - - ### combine string lines to multiline strings again - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - init_arguments_dict["equations"] = "\n".join(equations_line_split_list) - - ### create neuron model with new equations - neuron_model_new = Neuron(**init_arguments_dict) - - ### create the single neuron population - single_neuron_v_clamp = Population( - 1, - neuron=neuron_model_new, - name=f"single_neuron_v_clamp_{pop_name}", - ) - - ### set the attributes of the neuron - for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: - setattr(single_neuron_v_clamp, attr_name, attr_val) - - ### create Monitor for single neuron - mon_single = Monitor(single_neuron_v_clamp, ["v_clamp_rec_sign"]) - - ### create network with single neuron - net_single = Network() - net_single.add([single_neuron_v_clamp, mon_single]) - compile_in_folder( - folder_name=f"single_v_clamp_net_{pop_name}", silent=True, net=net_single - ) - - ### network dict - net_single_dict = { - "net": net_single, - "population": net_single.get(single_neuron_v_clamp), - "monitor": net_single.get(mon_single), - } - - return net_single_dict - - def find_v_rest_for_psp(self, pop_name, do_plot=False): - """ - using both single networks to find v_rest and I_app_hold - """ - - ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can only be >= 0) - self.log("search v_rest with y(X) = delta_v_2000(v=X) using grid search") - v_arr = np.linspace(-90, -20, 200) - v_clamp_arr = np.array( - [ - self.get_v_clamp_2000( - v=X_val, - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - ) - for X_val in v_arr - ] - ) - v_rest = np.min(v_arr[argrelmin(v_clamp_arr)[0]]) - if do_plot: - plt.figure() - plt.plot(v_arr, v_clamp_arr) - plt.axvline(v_rest, color="k") - plt.axhline(0, color="k", ls="dashed") - plt.savefig(f"v_clamp_{pop_name}.png") - plt.close("all") - - ### do again the simulation with the obtained v_rest to get the stady state values - detla_v_rest = ( - self.get_v_clamp_2000( - v=v_rest, - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - ) - * dt() - ) - obtained_variables = { - var_name: getattr( - self.net_single_v_clamp_dict[pop_name]["population"], var_name - ) - for var_name in self.net_single_v_clamp_dict[pop_name][ - "population" - ].variables - } - self.log( - f"for {pop_name} found v_rest={v_rest} with delta_v_2000(v=v_rest)={detla_v_rest}" - ) - - ### check if the neuron stays at v_rest with normal neuron - ### if it stays --> use new value as v_rest (its even a bit finer as before) - ### if it not stays --> find I_app which holds the membrane potential constant - v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables) - v_rest_arr_is_const = ( - np.std(v_rest_arr, axis=0) - <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - ) - if v_rest_arr_is_const: - ### v_rest found, no I_app_hold needed - v_rest = v_rest_arr[-1] - I_app_hold = 0 - self.log(f"final v_rest = {v_rest_arr[-1]}") - else: - ### there is no v_rest i.e. neuron is self-active --> find smallest negative I_app to silence neuron - self.log( - "neuron seems to be self-active --> find smallest I_app to silence the neuron" - ) - - ### negative current initially reduces v - ### then v climbs back up - ### check if the second half of v is constant if yes fine if not increase negative I_app - ### find I_app_hold with incremental_continuous_bound_search - self.log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") - I_app_hold = -self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_v_rest_arr_const( - pop_name=pop_name, - obtained_variables=obtained_variables, - I_app=-X_val, - ), - y_bound=0, - X_0=0, - y_0=self.get_v_rest_arr_const( - pop_name=pop_name, - obtained_variables=obtained_variables, - I_app=0, - ), - X_increase=detla_v_rest, - accept_non_dicontinuity=True, - bound_type="greater", - ) - ### again simulate the neuron with the obtained I_app_hold to get the new v_rest - v_rest_arr = self.get_new_v_rest_2000( - pop_name, obtained_variables, I_app=I_app_hold - ) - v_rest = v_rest_arr[-1] - self.log(f"I_app_hold = {I_app_hold}, resulting v_rest = {v_rest}") - - ### get the sampler for the initial variables - variable_init_sampler = self.get_init_neuron_variables_for_psp( - net=self.net_single_dict[pop_name]["net"], - pop=self.net_single_dict[pop_name]["population"], - v_rest=v_rest, - I_app_hold=I_app_hold, - ) - - return { - "v_rest": v_rest, - "I_app_hold": I_app_hold, - "variable_init_sampler": variable_init_sampler, - } - - def get_v_rest_arr_const( - self, pop_name, obtained_variables, I_app, return_bool=False - ): - """ - sets I_app and obtained varaibles in single neuron - simulates 2000 ms and returns how much the v changes - 0 = constant, negative = not constant - """ - v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables, I_app=I_app) - v_rest_arr = v_rest_arr[len(v_rest_arr) // 2 :] - - if return_bool: - return 0 <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( - v_rest_arr, axis=0 - ) - else: - return np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( - v_rest_arr, axis=0 - ) - - def get_new_v_rest_2000( - self, pop_name, obtained_variables, I_app=None, do_plot=True - ): - """ - use single_net to simulate 2000 ms and return v - """ - net: Network = self.net_single_dict[pop_name]["net"] - pop = self.net_single_dict[pop_name]["population"] - monitor = self.net_single_dict[pop_name]["monitor"] - net.reset() - ### set variables - for var_name, var_val in obtained_variables.items(): - if var_name in pop.variables: - setattr(pop, var_name, var_val) - if not isinstance(I_app, type(None)): - pop.I_app = I_app - ### simulate - net.simulate(2000) - v_arr = monitor.get("v")[:, 0] - - if do_plot: - plt.figure() - plt.title(f"{pop.I_app}") - plt.plot(v_arr) - plt.savefig(f"tmp_v_rest_{pop_name}.png") - plt.close("all") - - return v_arr - - def get_nr_spikes_from_v_rest_2000( - self, pop_name, obtained_variables, I_app=None, do_plot=True - ): - """ - use single_net to simulate 2000 ms and return number spikes - """ - net = self.net_single_dict[pop_name]["net"] - pop = self.net_single_dict[pop_name]["population"] - mon = self.net_single_dict[pop_name]["monitor"] - net.reset() - ### set variables - for var_name, var_val in obtained_variables.items(): - if var_name in pop.variables: - setattr(pop, var_name, var_val) - if not isinstance(I_app, type(None)): - pop.I_app = I_app - ### simulate - simulate(2000) - ### get spikes - spike_dict = mon.get("spike") - nr_spikes = len(spike_dict[0]) - return nr_spikes - - def log(self, txt): - caller_frame = inspect.currentframe().f_back - caller_name = caller_frame.f_code.co_name - - if caller_name == self.caller_name: - txt = f"{textwrap.indent(str(txt), ' ')}" - else: - txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" - - self.caller_name = caller_name - - if self.log_exist: - with open("model_conf_log", "a") as f: - print(txt, file=f) - else: - with open("model_conf_log", "w") as f: - print(txt, file=f) - self.log_exist = True - - def _p_g(self, txt): - """ - prints guiding text - """ - print_width = min([os.get_terminal_size().columns, 80]) - - if self.print_guide: - print("\n[model_configurator guide]:") - for line in txt.splitlines(): - wrapped_text = textwrap.fill( - line, width=print_width - 5, replace_whitespace=False - ) - wrapped_text = textwrap.indent(wrapped_text, " |") - print(wrapped_text) - print("") - - def _p_w(self, txt): - """ - prints warning - """ - print_width = min([os.get_terminal_size().columns, 80]) - - print("\n[model_configurator WARNING]:") - for line in str(txt).splitlines(): - wrapped_text = textwrap.fill( - line, width=print_width - 5, replace_whitespace=False - ) - wrapped_text = textwrap.indent(wrapped_text, " |") - print(wrapped_text) - print("") - - def get_base(self): - """ - Obtain the baseline currents for the configured populations to obtian the target firing rates - with the currently set weights, set by .set_weights or .set_syn_load - - return: - I_base_dict, dict - Dictionary with baseline curretns for all configured populations. - """ - ### TODO: current problem: model is without noise... but how large and for what is noise??? - ### neurons all behave equally (e.g. spike at same time), this changes due to different inputs ("noise" in input) - ### this could also be prevented by initializing all neurons differently (along there periodic u-v curve) - ### or by adding noise to conductances or baseline current - ### thenthe question is, how is the relation between added noise and the noise in the input - ### TODO: I've decided for noise depending on the input current (scaled by specified SNR) - ### without input there is no noise, decorrelate neurons by random initial values - ### TODO: current idea is: to find max syn things the noise has to be deactivated and to find baseline currents the noise has to be activated - ### so single neuron networks should be without noise, an then here noise should be activated, maybe requirement for model conf will be a variable called noise to turn on and off noise - for pop_name in self.pop_name_list: - for proj_name in self.afferent_projection_dict[pop_name][ - "projection_names" - ]: - proj_dict = self.get_proj_dict(proj_name) - print(f"set weight of {proj_name} to {proj_dict['proj_weight']}") - - ### set the weights of the normal model - model = self._set_weights_of_model(mode=0) - - ### set initial variables of populations (do not initialize all neurons the same) - for pop_name in self.pop_name_list: - population = get_population(pop_name) - variable_init_sampler = self.net_single_dict[pop_name][ - "variable_init_sampler" - ] - self.set_init_variables(population, variable_init_sampler) - - ### record and simulate - mon_dict = {pop_name: ["spike"] for pop_name in model.populations} - mon = CompNeuroMonitors(mon_dict=mon_dict) - mon.start() - simulate(1000) - recordings = mon.get_recordings() - recording_times = mon.get_recording_times() - plan = { - "position": list(range(1, len(model.populations) + 1)), - "compartment": model.populations, - "variable": ["spike"] * len(model.populations), - "format": ["hybrid"] * len(model.populations), - } - PlotRecordings( - figname="model_conf_normal_model.png", - recordings=recordings, - recording_times=recording_times, - shape=(len(plan["position"]), 1), - plan=plan, - ) - - ### set the weights of the reduced model - model = self._set_weights_of_model(mode=1) - - ### set initial variables of populations (do not initialize all neurons the same) - for pop_name in self.pop_name_list: - population = get_population(f"{pop_name}_reduced") - variable_init_sampler = self.net_single_dict[pop_name][ - "variable_init_sampler" - ] - self.set_init_variables(population, variable_init_sampler) - - ### record and simulate - mon_dict = {f"{pop_name}_reduced": ["spike"] for pop_name in mon_dict.keys()} - mon = CompNeuroMonitors(mon_dict=mon_dict) - mon.start() - simulate(1000) - recordings = mon.get_recordings() - recording_times = mon.get_recording_times() - plan["compartment"] = [ - f"{pop_name}_reduced" for pop_name in plan["compartment"] - ] - PlotRecordings( - figname="model_conf_reduced_model.png", - recordings=recordings, - recording_times=recording_times, - shape=(len(plan["position"]), 1), - plan=plan, - ) - ### next check if populations which should not be tuned have the correct firing rates, if not warning that the populations are tuned but if the rate of the not tuned populations changes this might also change the tuned populations' rates - ### next activate noise and then performe search algorithm ith reduced model with input varaibles = I_app of populations and output variables = firing rates of populations - ### TODO get base with reduced model - quit() - - def _set_weights_of_model(self, mode=0): - """ - Set the weights of the model to the current weights from the - afferent_projection_dict. - """ - ### clear ANNarchy - cnp_clear() - - ### create the original model - if mode == 0: - model = self.model - elif mode == 1: - model = self.model_reduced - model.create() - - for pop_name in self.pop_name_list: - for proj_name in self.afferent_projection_dict[pop_name][ - "projection_names" - ]: - if mode == 0: - ### set weght of projection - proj_dict = self.get_proj_dict(proj_name) - get_projection(proj_name).w = proj_dict["proj_weight"] - elif mode == 1: - ### set weight of the projection in the conductance-calculating - ### input current population - proj_dict = self.get_proj_dict(proj_name) - proj_weight = proj_dict["proj_weight"] - proj_target_type = proj_dict["proj_target_type"] - setattr( - get_population(f"{pop_name}_{proj_target_type}_aux"), - f"weights_{proj_name}", - proj_weight, - ) - return model - - def find_base_current(self, net_many_dict): - """ - search through whole I_app space - for each population simulate a network with 10000 neurons, each neuron has a different I_app value - g_ampa and g_gaba values are internally created using - the weigths stored in the afferent_projection dict - and target firing rates stored in the target_firing_rate_dict - """ - - I_app_arr_list = [] - weight_list_list = [] - pre_pop_name_list_list = [] - rate_list_list = [] - eff_size_list_list = [] - ### get lists which define the current weights to the afferent populations - ### get lists which define the current rates of the afferent populations - ### get lists with the names of the afferent populations - ### the length of the lists has to be the number of networks i.e. the number of populations - for pop_name in self.pop_name_list: - ### get the weights, names, rates of the afferent populations - weight_list = self.afferent_projection_dict[pop_name]["weights"] - proj_name_list = self.afferent_projection_dict[pop_name]["projection_names"] - pre_pop_name_list = [ - self.get_proj_dict(proj_name)["pre_pop_name"] - for proj_name in proj_name_list - ] - rate_list = self.get_rate_list_for_pop(pop_name) - eff_size_list = self.get_eff_size_list_for_pop(pop_name) - ### get correct magnitude of I_app using the voltage clamp networks - I_app_magnitude = self.get_I_app_magnitude( - pop_name, - pre_pop_name_list=pre_pop_name_list, - eff_size_list=eff_size_list, - rate_list=rate_list, - weight_list=weight_list, - ) - ### get the I_app_arr - I_app_arr = np.linspace( - I_app_magnitude, - I_app_magnitude + self.I_app_max_dict[pop_name], - self.nr_neurons_per_net, - ) - ### append these lists to the list for all post populations i.e. networks - weight_list_list.append(weight_list) - pre_pop_name_list_list.append(pre_pop_name_list) - rate_list_list.append(rate_list) - eff_size_list_list.append(eff_size_list) - I_app_arr_list.append(I_app_arr) - - ### create list with variable_init_samplers of populations - variable_init_sampler_list = [ - self.net_single_dict[pop_name]["variable_init_sampler"] - for pop_name in self.pop_name_list - ] - - ### get firing rates obtained with all I_app values - ### rates depend on the current weights and the current target firing rates - nr_networks = len(self.pop_name_list) - possible_firing_rates_list_list = parallel_run( - method=get_rate_parallel, - networks=net_many_dict["network_list"], - **{ - "population": net_many_dict["population_list"], - "variable_init_sampler": variable_init_sampler_list, - "monitor": net_many_dict["monitor_list"], - "I_app_arr": I_app_arr_list, - "weight_list": weight_list_list, - "pre_pop_name_list": pre_pop_name_list_list, - "rate_list": rate_list_list, - "eff_size_list": eff_size_list_list, - "simulation_dur": [self.simulation_dur] * nr_networks, - }, - ) + model (CompNeuroModel): + Model to be analyzed + """ + ### parameters of the __init__ function of the Projection class + self.proj_init_parameter_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Synapse class + self.synapse_init_parameter_dict: dict[str, dict] = {} + ### values of the paramters and variables of the synapse, keys are the names of + ### paramters and variables + self.synapse_model_attr_dict: dict[str, dict] = {} + ### connector functions of the projections + self.connector_function_dict: dict = {} + ### parameters of the connector functions of the projections + self.connector_function_parameter_dict: dict = {} + ### names of pre- and post-synaptic populations of the projections + ### old: self.post_pop_name_dict and self.pre_pop_name_dict + self.pre_post_pop_name_dict: dict[str, tuple] = {} + ### sizes of pre- and post-synaptic populations of the projections + ### old: self.pre_pop_size_dict + self.pre_post_pop_size_dict: dict[str, tuple] = {} + + ### loop over all projections + for proj_name in model.projections: + proj: Projection = get_projection(proj_name) + ### get the synapse model attributes (parameters/variables) + self.synapse_model_attr_dict[proj.name] = proj.init + ### get a dict of all paramters of the __init__ function of the Synapse + init_params = inspect.signature(Synapse.__init__).parameters + self.synapse_init_parameter_dict[proj.name] = { + param: getattr(proj.synapse_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all paramters of the __init__ function of the Projection + init_params = inspect.signature(Projection.__init__).parameters + self.proj_init_parameter_dict[proj_name] = { + param: getattr(proj, param) + for param in init_params + if param != "self" and param != "synapse" and param != "copied" + } - ### catch if target firing rate in any population cannot be reached - I_app_best_dict = {} - target_firing_rate_changed = False - for pop_idx, pop_name in enumerate(self.pop_name_list): - target_firing_rate = self.target_firing_rate_dict[pop_name] - possible_firing_rates_arr = np.array( - possible_firing_rates_list_list[pop_idx] - ) - I_app_arr = I_app_arr_list[pop_idx] - print(f"firing rates for pop {pop_name}") - print(f"{I_app_arr}") - print(f"{possible_firing_rates_arr}\n") - possible_f_min = possible_firing_rates_arr.min() - possible_f_max = possible_firing_rates_arr.max() - if not ( - target_firing_rate >= possible_f_min - and target_firing_rate <= possible_f_max + ### get the connector function of the projection and its parameters + ### raise errors for not supported connector functions + if ( + proj.connector_name == "User-defined" + or proj.connector_name == "MatrixMarket" + or proj.connector_name == "From File" ): - new_target_firing_rate = np.array([possible_f_min, possible_f_max])[ - np.argmin( - np.absolute( - np.array([possible_f_min, possible_f_max]) - - target_firing_rate - ) - ) - ] - ### if the possible firing rates are too small --> what (high) firing rate could be maximally reached with a hypothetical g_ampa_max and I_app_max - ### if the possible firing rates are too large --> waht (low) firing rate could be reached with g_gaba_max and -I_app_max - warning_txt = f"WARNING get_possible_rates: target firing rate of population {pop_name}({target_firing_rate}) cannot be reached.\nPossible range with current synaptic load: [{round(possible_f_min,1)},{round(possible_f_max,1)}].\nSet firing rate to {round(new_target_firing_rate,1)}." - self._p_w(warning_txt) - self.log(warning_txt) - self.target_firing_rate_dict[pop_name] = new_target_firing_rate - target_firing_rate = self.target_firing_rate_dict[pop_name] - target_firing_rate_changed = True - ### find best I_app for reaching target firing rate - best_idx = np.argmin( - np.absolute(possible_firing_rates_arr - target_firing_rate) - ) - ### take all possible firing rates in range target firing rate +-10 - lower_rate = max([0, target_firing_rate - 10]) - higher_rate = target_firing_rate + 10 - rate_range_idx_arr = ( - (possible_firing_rates_arr >= lower_rate).astype(int) - * (possible_firing_rates_arr <= higher_rate).astype(int) - ).astype(bool) - possible_firing_rates_arr = possible_firing_rates_arr[rate_range_idx_arr] - I_app_arr = I_app_arr[rate_range_idx_arr] - ### now do linear fit to find I_app for target firing rate - if len(I_app_arr) > 10: - reg = LinearRegression().fit( - X=possible_firing_rates_arr.reshape(-1, 1), y=I_app_arr + raise ValueError( + f"Connector function '{self._connector_methods_dict[proj.connector_name].__name__}' not supported yet" ) - I_app_best_dict[pop_name] = reg.predict( - np.array([[target_firing_rate]]) - )[0] - else: - I_app_best_dict[pop_name] = 0 - plt.figure(figsize=(6.4, 4.8 * 2)) - plt.subplot(211) - plt.plot(I_app_arr, possible_firing_rates_arr) - plt.axhline(target_firing_rate, color="k") - plt.axvline(I_app_best_dict[pop_name], color="r") - plt.subplot(212) - plt.plot( - I_app_arr, np.absolute(possible_firing_rates_arr - target_firing_rate) - ) - plt.tight_layout() - plt.savefig(f"possible_firing_rate_{pop_name}.png", dpi=300) - plt.close("all") - if target_firing_rate_changed and False: - print_df(pd.DataFrame(self.afferent_projection_dict)) - print_df(pd.DataFrame(self.g_max_dict)) - ### TODO cannot reach firing rates for example for thal because I_app_max is too small, this +100Hz method seems not to work well - ### maybe use the weights and a voltage clamp neuron to find I_app - ### like with I_app_hold - ### weights i.e. spike trains cause dv/dt to be e.g. extremely negative --> then find I_app to make dv/dt zero - ### this I_app should then be "near" the I_app needed to reach the target firing rate - quit() - - return [target_firing_rate_changed, I_app_best_dict] - - def get_I_app_magnitude( - self, - pop_name, - pre_pop_name_list=[], - eff_size_list=[], - rate_list=[], - weight_list=[], - ): - """ - Get the correct magnitude of I_app for the given population. - The correct magnitude is the magnitude which is to negate the synaptic currents caused by the afferent populations. - Use the curretn weights and rates from the afferent_projection_dict and target_firing_rate_dict. - """ - print(f"get v clamp of {pop_name}") - print(f"pre_pop_name_list: {pre_pop_name_list}") - print(f"eff_size_list: {eff_size_list}") - print(f"rate_list: {rate_list}") - print(f"weight_list: {weight_list}") - print(f"I_app_hold: {self.prepare_psp_dict[pop_name]['I_app_hold']}") - print(f"v_rest: {self.prepare_psp_dict[pop_name]['v_rest']}") + ### get the connector function + self.connector_function_dict[proj.name] = self._connector_methods_dict[ + proj.connector_name + ] - detla_v_rest_0 = ( - self.get_v_clamp_2000( - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], - v=None, - I_app=0, - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - pre_pop_name_list=pre_pop_name_list, - eff_size_list=eff_size_list, - rate_list=rate_list, - weight_list=weight_list, - return_1000=True, + ### get the parameters of the connector function + self.connector_function_parameter_dict[proj.name] = ( + self._get_connector_parameters(proj) ) - * dt() - ) - if detla_v_rest_0 > 0: - I_app_sign = -1 - else: - I_app_sign = 1 + ### get the names of the pre- and post-synaptic populations + self.pre_post_pop_name_dict[proj.name] = (proj.pre.name, proj.post.name) - self.log("search I_app_magnitude with y(X) = detla_v(I_app=X)") - I_app_magnitude = I_app_sign * self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_v_clamp_2000( - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], - v=None, - I_app=I_app_sign * X_val, - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - pre_pop_name_list=pre_pop_name_list, - eff_size_list=eff_size_list, - rate_list=rate_list, - weight_list=weight_list, - return_1000=True, + ### get the sizes of the pre- and post-synaptic populations + self.pre_post_pop_size_dict[proj.name] = ( + proj.pre.size, + proj.post.size, ) - * dt(), - y_bound=0, - X_0=0, - y_0=detla_v_rest_0, - alpha_abs=0.005, - ) - - print(f"I_app_magnitude: {I_app_magnitude}\n") - - return I_app_magnitude - - def get_rate_list_for_pop(self, pop_name): - """ - get the rate list for the afferent populations of the given population - """ - rate_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_name = proj_dict["pre_pop_name"] - pre_rate = self.target_firing_rate_dict[pre_pop_name] - rate_list.append(pre_rate) - return rate_list - - def get_eff_size_list_for_pop(self, pop_name): - """ - get the effective size list for the afferent populations of the given population - """ - eff_size_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_size = proj_dict["pre_pop_size"] - proj_prob = proj_dict["proj_prob"] - eff_size = int(round(pre_pop_size * proj_prob, 0)) - eff_size_list.append(eff_size) - return eff_size_list - - def set_base(self, I_base_dict=None, I_base_variable="base_mean"): - """ - Set baseline currents in model, compile model and set weights in model. - - Args: - I_base_dict: dict, optional, default=None - Dictionary with baseline currents for all populations, if None the baselines are obtained by .get_base - - I_base_variable: str, optional, default="mean_base" - Name of the variable which represents the baseline current in the configured populations. They all have to have the same variable. - """ - ### check I_base_dict - if isinstance(I_base_dict, type(None)): - I_base_dict = self.get_base() - - ### clear annarchy, create model and set baselines and weights - cnp_clear() - self.model.create(do_compile=False) - ### set initial variables of populations - for pop_name in self.pop_name_list: - population = get_population(pop_name) - variable_init_sampler = self.net_single_dict[pop_name][ - "variable_init_sampler" - ] - self.set_init_variables(population, variable_init_sampler) - ### set baselines - for pop_name in I_base_dict.keys(): - get_val = getattr(get_population(pop_name), I_base_variable) - try: - set_val = np.ones(len(get_val)) * I_base_dict[pop_name] - except: - set_val = I_base_dict[pop_name] - setattr(get_population(pop_name), I_base_variable, set_val) - ### compile - self.model.compile() - ### set weights - for pop_name in self.pop_name_list: - for proj_idx, proj_name in enumerate( - self.afferent_projection_dict[pop_name]["projection_names"] - ): - weight_val = self.afferent_projection_dict[pop_name]["weights"][ - proj_idx - ] - get_projection(proj_name).w = weight_val - return I_base_dict - - def set_init_variables(self, population, variable_init_sampler): - """ - Set the initial variables of the given population to the given values. + def _get_connector_parameters(self, proj: Projection): """ - variable_init_arr = variable_init_sampler.sample(len(population), seed=0) - var_name_list = variable_init_sampler.var_name_list - for var_name in population.variables: - if var_name in var_name_list: - set_val = variable_init_arr[:, var_name_list.index(var_name)] - setattr(population, var_name, set_val) + Get the parameters of the given connector function. - def get_time_in_x_sec(self, x): - """ Args: - x: int - how many seconds add to the current time - - return: - formatted_future_time: str - string of the future time in HH:MM:SS - """ - # Get the current time - current_time = datetime.datetime.now() - - # Add 10 seconds to the current time - future_time = current_time + datetime.timedelta(seconds=x) - - # Format future_time as HH:MM:SS - formatted_future_time = future_time.strftime("%H:%M:%S") - - return formatted_future_time + proj (Projection): + Projection for which the connector parameters are needed - def get_interpolation(self): - """ - get the interpolations to - predict f with I_app, g_ampa and g_gaba - - sets the class variable self.f_I_g_curve_dict --> for each population a f_I_g_curve function - """ - - ### create model - net_many_dict = self.create_many_neuron_network() - - ### get interpolation data - txt = "get interpolation data..." - print(txt) - self.log(txt) - ### for each population get the input arrays for I_app, g_ampa and g_gaba - ### while getting inputs define which values should be used later - input_dict = self.get_input_for_many_neurons_net() - - ### create list with variable_init_samplers of populations - variable_init_sampler_list = [ - self.net_single_dict[pop_name]["variable_init_sampler"] - for pop_name in self.pop_name_list - ] - - ### run the run_parallel with a reduced simulation duration and obtain a time estimate for the full duration - ### TODO use directly measureing simulation time to get time estimate - start = time() - parallel_run( - method=get_rate_parallel, - number=self.nr_networks, - **{ - "pop_name_list": [self.pop_name_list] * self.nr_networks, - "population_list": [list(net_many_dict["population_dict"].values())] - * self.nr_networks, - "variable_init_sampler_list": [variable_init_sampler_list] - * self.nr_networks, - "monitor_list": [list(net_many_dict["monitor_dict"].values())] - * self.nr_networks, - "I_app_list": input_dict["I_app_list"], - "g_ampa_list": input_dict["g_ampa_list"], - "g_gaba_list": input_dict["g_gaba_list"], - "simulation_dur": [dt()] * self.nr_networks, - }, - ) - reset() - end = time() - offset_time = end - start - start = time() - parallel_run( - method=get_rate_parallel, - number=self.nr_networks, - **{ - "pop_name_list": [self.pop_name_list] * self.nr_networks, - "population_list": [list(net_many_dict["population_dict"].values())] - * self.nr_networks, - "variable_init_sampler_list": [variable_init_sampler_list] - * self.nr_networks, - "monitor_list": [list(net_many_dict["monitor_dict"].values())] - * self.nr_networks, - "I_app_list": input_dict["I_app_list"], - "g_ampa_list": input_dict["g_ampa_list"], - "g_gaba_list": input_dict["g_gaba_list"], - "simulation_dur": [self.simulation_dur_estimate_time] - * self.nr_networks, - }, - ) - reset() - end = time() - time_estimate = np.clip( - round( - (end - start - offset_time) - * (self.simulation_dur / self.simulation_dur_estimate_time), - 0, - ), - 0, - None, - ) - - txt = f"start parallel_run of many neurons network on {self.nr_networks} threads, will take approx. {time_estimate} s (end: {self.get_time_in_x_sec(x=time_estimate)})..." - print(txt) - self.log(txt) - ### simulate the many neurons network with the input arrays splitted into the network populations sizes - ### and get the data of all populations - ### run_parallel - start = time() - f_rec_arr_list_list = parallel_run( - method=get_rate_parallel, - number=self.nr_networks, - **{ - "pop_name_list": [self.pop_name_list] * self.nr_networks, - "population_list": [list(net_many_dict["population_dict"].values())] - * self.nr_networks, - "variable_init_sampler_list": [variable_init_sampler_list] - * self.nr_networks, - "monitor_list": [list(net_many_dict["monitor_dict"].values())] - * self.nr_networks, - "I_app_list": input_dict["I_app_list"], - "g_ampa_list": input_dict["g_ampa_list"], - "g_gaba_list": input_dict["g_gaba_list"], - "simulation_dur": [self.simulation_dur] * self.nr_networks, - }, - ) - end = time() - txt = f"took {end-start} s" - print(txt) - self.log(txt) - - ### combine the list of outputs from parallel_run to one output per population - output_of_populations_dict = self.get_output_of_populations( - f_rec_arr_list_list, input_dict - ) - - ### create interpolation for each population - ### it can be a 1D to 3D interpolation, default (if everything works fine) is - ### 3D interpolation with "x": "I_app", "y": "g_ampa", "z": "g_gaba" - for pop_name in self.pop_name_list: - ### get whole input arrays - I_app_value_array = None - g_ampa_value_array = None - g_gaba_value_array = None - if self.I_app_max_dict[pop_name] > 0: - I_app_value_array = input_dict["I_app_arr_dict"][pop_name] - if self.g_max_dict[pop_name]["ampa"] > 0: - g_ampa_value_array = input_dict["g_ampa_arr_dict"][pop_name] - if self.g_max_dict[pop_name]["gaba"] > 0: - g_gaba_value_array = input_dict["g_gaba_arr_dict"][pop_name] - - ### get the interpolation - self.f_I_g_curve_dict[pop_name] = self.get_interp_3p( - values=output_of_populations_dict[pop_name], - model_conf_obj=self, - var_name_dict={"x": "I_app", "y": "g_ampa", "z": "g_gaba"}, - x=I_app_value_array, - y=g_ampa_value_array, - z=g_gaba_value_array, - ) - - self.did_get_interpolation = True - - ### with interpolation get the firing rates for all extreme values of I_app, g_ampa, g_gaba - for pop_name in self.pop_name_list: - self.extreme_firing_rates_df_dict[pop_name] = ( - self.get_extreme_firing_rates_df(pop_name) - ) - - def get_extreme_firing_rates_df(self, pop_name): + Returns: + connector_parameters_dict (dict): + Parameters of the given connector function """ - get the firing rates for all extreme values of I_app, g_ampa, g_gaba - Args: - pop_name: str - popualtion name + if proj.connector_name == "One-to-One": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "All-to-All": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "allow_self_connections": proj._connection_args[2], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Gaussian": + return { + "amp": proj._connection_args[0], + "sigma": proj._connection_args[1], + "delays": proj._connection_args[2], + "limit": proj._connection_args[3], + "allow_self_connections": proj._connection_args[4], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Difference-of-Gaussian": + return { + "amp_pos": proj._connection_args[0], + "sigma_pos": proj._connection_args[1], + "amp_neg": proj._connection_args[2], + "sigma_neg": proj._connection_args[3], + "delays": proj._connection_args[4], + "limit": proj._connection_args[5], + "allow_self_connections": proj._connection_args[6], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Random": + return { + "probability": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Convergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Divergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "pre_post": proj._connection_args[2], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Sparse connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } - return: - table_df: pandas dataframe - containing the firing rates for all extreme values of I_app, g_ampa, g_gaba - """ - I_app_list = [-self.I_app_max_dict[pop_name], self.I_app_max_dict[pop_name]] - g_ampa_list = [0, self.g_max_dict[pop_name]["ampa"]] - g_gaba_list = [0, self.g_max_dict[pop_name]["gaba"]] - ### create all combiniations of I_app_list, g_ampa_list, g_gaba_list in a single list - comb_list = self.get_all_combinations_of_lists( - [I_app_list, g_ampa_list, g_gaba_list] - ) - ### get the firing rates for all combinations - f_list = [] - for I_app, g_ampa, g_gaba in comb_list: - f_list.append( - self.f_I_g_curve_dict[pop_name](x=I_app, y=g_ampa, z=g_gaba)[0] - ) +class CreateSingleNeuronNetworks: + def __init__(self): + pass - ### now get the same for names - I_app_name_list = ["min", "max"] - g_ampa_name_list = ["min", "max"] - g_gaba_name_list = ["min", "max"] - ### create all combiniations of I_app_name_list, g_ampa_name_list, g_gaba_name_list in a single list - comb_name_list = self.get_all_combinations_of_lists( - [I_app_name_list, g_ampa_name_list, g_gaba_name_list] - ) - ### create a dict as table with header I_app, g_ampa, g_gaba - table_dict = { - "I_app": np.array(comb_name_list)[:, 0].tolist(), - "g_ampa": np.array(comb_name_list)[:, 1].tolist(), - "g_gaba": np.array(comb_name_list)[:, 2].tolist(), - "f": f_list, - } +class CreateReducedModel: + def __init__(self): + pass - ### create a pandas dataframe from the table_dict - table_df = pd.DataFrame(table_dict) - return table_df +class GetVRest: + def __init__(self): + pass - def get_all_combinations_of_lists(self, list_of_lists): - """ - get all combinations of lists in a single list - example: [[1,2],[3,4],[5,6]] --> [[1,3,5],[1,3,6],[1,4,5],[1,4,6],[2,3,5],[2,3,6],[2,4,5],[2,4,6]] - """ - return list(itertools.product(*list_of_lists)) - def get_output_of_populations(self, f_rec_arr_list_list, input_dict): - """ - restructure the output of run_parallel so that for each population a single array with firing rates is obtained +class GetMaxSyn: + def __init__(self): + pass - Args: - f_rec_arr_list_list: list of lists of arrays - first lists contain different network runs, second level lists contain arrays for the different populations - return: - output_pop_dict: dict of arrays - for each population a single array with firing rates - """ - output_pop_dict = {} - for pop_name in self.pop_name_list: - output_pop_dict[pop_name] = [] - ### first loop selecting the network - for f_rec_arr_list in f_rec_arr_list_list: - ### second loop selecting the population - for pop_idx, pop_name in enumerate(self.pop_name_list): - ### append the recorded values to the array of the corresponding population - output_pop_dict[pop_name].append(f_rec_arr_list[pop_idx]) - - ### concatenate the arrays of the individual populations - for pop_name in self.pop_name_list: - output_pop_dict[pop_name] = np.concatenate(output_pop_dict[pop_name]) - - ### use the input dict to only use values which should be used - ### lis of lists, first list level = networks, second list level = populations then you get array with input values - ### so same format as f_rec_arr_list_list - use_I_app_arr_list_list = input_dict["use_I_app_list"] - use_g_ampa_arr_list_list = input_dict["use_g_ampa_list"] - use_g_gaba_arr_list_list = input_dict["use_g_gaba_list"] - - ### now get for each population an array which contains the info if the values should be used - use_output_pop_dict = {} - for pop_name in self.pop_name_list: - use_output_pop_dict[pop_name] = [] - ### first loop selecting the network - for net_idx in range(len(use_I_app_arr_list_list)): - use_I_app_arr_list = use_I_app_arr_list_list[net_idx] - use_g_ampa_arr_list = use_g_ampa_arr_list_list[net_idx] - use_g_gaba_arr_list = use_g_gaba_arr_list_list[net_idx] - ### second loop selecting the population - for pop_idx, pop_name in enumerate(self.pop_name_list): - ### only use values if for all input values use is True - use_I_app_arr = use_I_app_arr_list[pop_idx] - use_g_ampa_arr = use_g_ampa_arr_list[pop_idx] - use_g_gaba_arr = use_g_gaba_arr_list[pop_idx] - use_value_arr = np.logical_and(use_I_app_arr, use_g_ampa_arr) - use_value_arr = np.logical_and(use_value_arr, use_g_gaba_arr) - ### append the recorded values to the array of the corresponding population - use_output_pop_dict[pop_name].append(use_value_arr) - - ### concatenate the arrays of the individual populations - for pop_name in self.pop_name_list: - use_output_pop_dict[pop_name] = np.concatenate( - use_output_pop_dict[pop_name] - ) - - ### finaly only use values defined by ues_output... - for pop_name in self.pop_name_list: - output_pop_dict[pop_name] = output_pop_dict[pop_name][ - use_output_pop_dict[pop_name] - ] - - return output_pop_dict - - def get_input_for_many_neurons_net(self): - """ - get the inputs for the parallel many neurons network simulation - - need a list of dicts, keys=pop_name, lsit=number of networks - """ - - ### create dicts with lists for the populations - I_app_arr_list_dict = {} - g_ampa_arr_list_dict = {} - g_gaba_arr_list_dict = {} - use_I_app_arr_list_dict = {} - use_g_ampa_arr_list_dict = {} - use_g_gaba_arr_list_dict = {} - I_app_arr_dict = {} - g_ampa_arr_dict = {} - g_gaba_arr_dict = {} - for pop_name in self.pop_name_list: - ### prepare grid for I, g_ampa and g_gaba - ### bounds - g_ampa_max = self.g_max_dict[pop_name]["ampa"] - g_gaba_max = self.g_max_dict[pop_name]["gaba"] - I_max = self.I_app_max_dict[pop_name] - - ### create value_arrays - I_app_value_array = np.linspace( - -I_max, I_max, self.nr_vals_interpolation_grid - ) - g_ampa_value_array = np.linspace( - 0, g_ampa_max, self.nr_vals_interpolation_grid - ) - g_gaba_value_array = np.linspace( - 0, g_gaba_max, self.nr_vals_interpolation_grid - ) - - ### store these value arrays for each pop - I_app_arr_dict[pop_name] = I_app_value_array - g_ampa_arr_dict[pop_name] = g_ampa_value_array - g_gaba_arr_dict[pop_name] = g_gaba_value_array - - ### create use values arrays - use_I_app_array = np.array([I_max > 0] * self.nr_vals_interpolation_grid) - use_g_ampa_array = np.array( - [g_ampa_max > 0] * self.nr_vals_interpolation_grid - ) - use_g_gaba_array = np.array( - [g_gaba_max > 0] * self.nr_vals_interpolation_grid - ) - ### use at least a single value - use_I_app_array[0] = True - use_g_ampa_array[0] = True - use_g_gaba_array[0] = True - - ### get all combinations (grid) of value_arrays - I_g_arr = np.array( - list( - itertools.product( - *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] - ) - ) - ) - - ### get all combinations (grid) of the use values arrays - use_I_g_arr = np.array( - list( - itertools.product( - *[use_I_app_array, use_g_ampa_array, use_g_gaba_array] - ) - ) - ) - - ### individual value arrays from combinations - I_app_arr = I_g_arr[:, 0] - g_ampa_arr = I_g_arr[:, 1] - g_gaba_arr = I_g_arr[:, 2] - - ### individual use values arrays from combinations - use_I_app_arr = use_I_g_arr[:, 0] - use_g_ampa_arr = use_I_g_arr[:, 1] - use_g_gaba_arr = use_I_g_arr[:, 2] - - ### split the arrays for the networks - networks_size_list = np.array( - [self.nr_neurons_of_pop_per_net] * self.nr_networks - ) - split_idx_arr = np.cumsum(networks_size_list)[:-1] - ### after this split the last array may be smaller than the others --> append zeros - ### value arrays - I_app_arr_list = np.split(I_app_arr, split_idx_arr) - g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) - g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) - ### use value arrays - use_I_app_arr_list = np.split(use_I_app_arr, split_idx_arr) - use_g_ampa_arr_list = np.split(use_g_ampa_arr, split_idx_arr) - use_g_gaba_arr_list = np.split(use_g_gaba_arr, split_idx_arr) - - ### check if last network is smaler - if self.nr_last_network < self.nr_neurons_of_pop_per_net: - ### if yes --> append zeros to value arrays - ### and append False to use values arrays - nr_of_zeros_append = round( - self.nr_neurons_of_pop_per_net - self.nr_last_network, 0 - ) - ### value arrays - I_app_arr_list[-1] = np.concatenate( - [I_app_arr_list[-1], np.zeros(nr_of_zeros_append)] - ) - g_ampa_arr_list[-1] = np.concatenate( - [g_ampa_arr_list[-1], np.zeros(nr_of_zeros_append)] - ) - g_gaba_arr_list[-1] = np.concatenate( - [g_gaba_arr_list[-1], np.zeros(nr_of_zeros_append)] - ) - ### use values arrays - use_I_app_arr_list[-1] = np.concatenate( - [use_I_app_arr_list[-1], np.array([False] * nr_of_zeros_append)] - ) - use_g_ampa_arr_list[-1] = np.concatenate( - [use_g_ampa_arr_list[-1], np.array([False] * nr_of_zeros_append)] - ) - use_g_gaba_arr_list[-1] = np.concatenate( - [use_g_gaba_arr_list[-1], np.array([False] * nr_of_zeros_append)] - ) - - ### store the array lists into the population dicts - ### value arrays - I_app_arr_list_dict[pop_name] = I_app_arr_list - g_ampa_arr_list_dict[pop_name] = g_ampa_arr_list - g_gaba_arr_list_dict[pop_name] = g_gaba_arr_list - ### use value arrays - use_I_app_arr_list_dict[pop_name] = use_I_app_arr_list - use_g_ampa_arr_list_dict[pop_name] = use_g_ampa_arr_list - use_g_gaba_arr_list_dict[pop_name] = use_g_gaba_arr_list - - ### restructure the dict of lists into a list for networks of list for populations - I_app_list = [] - g_ampa_list = [] - g_gaba_list = [] - use_I_app_list = [] - use_g_ampa_list = [] - use_g_gaba_list = [] - for net_idx in range(self.nr_networks): - ### value arrays - I_app_list.append( - [ - I_app_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - g_ampa_list.append( - [ - g_ampa_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - g_gaba_list.append( - [ - g_gaba_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - ### use values arrays - use_I_app_list.append( - [ - use_I_app_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - use_g_ampa_list.append( - [ - use_g_ampa_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - use_g_gaba_list.append( - [ - use_g_gaba_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - - return { - "I_app_list": I_app_list, - "g_ampa_list": g_ampa_list, - "g_gaba_list": g_gaba_list, - "use_I_app_list": use_I_app_list, - "use_g_ampa_list": use_g_ampa_list, - "use_g_gaba_list": use_g_gaba_list, - "I_app_arr_dict": I_app_arr_dict, - "g_ampa_arr_dict": g_ampa_arr_dict, - "g_gaba_arr_dict": g_gaba_arr_dict, - } - - for pop_name in self.pop_name_list: - ### prepare grid for I, g_ampa and g_gaba - ### bounds - g_ampa_max = self.g_max_dict[pop_name]["ampa"] - g_gaba_max = self.g_max_dict[pop_name]["gaba"] - I_max = self.I_app_max_dict[pop_name] - ### number of points for individual value arrays: I, g_ampa and g_gaba - number_of_points = np.round( - self.nr_neurons_net_many_total ** (1 / 3), 0 - ).astype(int) - ### create value_arrays - I_app_value_array = np.linspace(-I_max, I_max, number_of_points) - g_ampa_value_array = np.linspace(0, g_ampa_max, number_of_points) - g_gaba_value_array = np.linspace(0, g_gaba_max, number_of_points) - ### get all combinations (grid) of value_arrays - I_g_arr = np.array( - list( - itertools.product( - *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] - ) - ) - ) - ### individual value arrays from combinations - I_app_arr = I_g_arr[:, 0] - g_ampa_arr = I_g_arr[:, 1] - g_gaba_arr = I_g_arr[:, 2] - - ### split the arrays into the sizes of the many-neuron networks - split_idx_arr = np.cumsum(self.nr_many_neurons_list[pop_name])[:-1] - - I_app_arr_list = np.split(I_app_arr, split_idx_arr) - g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) - g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) - - class get_interp_3p: - def __init__( - self, values, model_conf_obj, var_name_dict, x=None, y=None, z=None - ) -> None: - """ - x, y, and z are the increasing gid steps on the interpolation grid - set z=None to get 2D interpiolation - set y and z = None to get 1D interpolation - """ - self.x = x - self.y = y - self.z = z - self.values = values - self.model_conf_obj = model_conf_obj - self.var_name_dict = var_name_dict - - if ( - isinstance(self.x, type(None)) - and isinstance(self.y, type(None)) - and isinstance(self.z, type(None)) - ): - error_msg = ( - "ERROR get_interp_3p: at least one of x,y,z has to be an array" - ) - model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - - def __call__(self, x=None, y=None, z=None): - ### check x - if isinstance(x, type(None)): - if not isinstance(self.x, type(None)): - error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['x']} were given but sample points are missing!" - self.model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - tmp_x = 0 - else: - if isinstance(self.x, type(None)): - warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['x']} are given but no interpolation values for {self.var_name_dict['x']} were given!" - self.model_conf_obj.log(warning_txt) - x = None - tmp_x = 0 - else: - tmp_x = x - - ### check y - if isinstance(y, type(None)): - if not isinstance(self.y, type(None)): - error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['y']} were given but sample points are missing!" - self.model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - tmp_y = 0 - else: - if isinstance(self.y, type(None)): - warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['y']} are given but no interpolation values for {self.var_name_dict['y']} were given!" - self.model_conf_obj.log(warning_txt) - y = None - tmp_y = 0 - else: - tmp_y = y - - ### check z - if isinstance(z, type(None)): - if not isinstance(self.y, type(None)): - error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['z']} were given but sample points are missing!" - self.model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - tmp_z = 0 - else: - if isinstance(self.z, type(None)): - warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['z']} are given but no interpolation values for {self.var_name_dict['z']} were given!" - self.model_conf_obj._p_w(warning_txt) - self.model_conf_obj.log(warning_txt) - z = None - tmp_z = 0 - else: - tmp_z = z - - ### get input arrays - input_arr_dict = { - "x": np.array(tmp_x).reshape(-1), - "y": np.array(tmp_y).reshape(-1), - "z": np.array(tmp_z).reshape(-1), - } - - ### check if the arrays with size larger 1 have same size - size_arr = np.array([val.size for val in input_arr_dict.values()]) - mask = size_arr > 1 - if True in mask: - input_size = size_arr[mask][0] - if not (input_size == size_arr[mask]).all(): - raise ValueError( - "ERROR model_configurator get_interp_3p: x,y,z sample points have to be either single values or arrays. All arrays have to have same size" - ) - - ### if there are inputs only consisting of a single value --> duplicate to increase size if there are also array inputs - for idx, larger_1 in enumerate(mask): - if not larger_1 and True in mask: - val = input_arr_dict[list(input_arr_dict.keys())[idx]][0] - input_arr_dict[list(input_arr_dict.keys())[idx]] = ( - np.ones(input_size) * val - ) - - ### get the sample points - use_variable_names_list = ["x", "y", "z"] - if isinstance(x, type(None)): - use_variable_names_list.remove("x") - if isinstance(y, type(None)): - use_variable_names_list.remove("y") - if isinstance(z, type(None)): - use_variable_names_list.remove("z") - point_arr = np.array( - [input_arr_dict[var_name] for var_name in use_variable_names_list] - ).T - - ### get the grid points, only use these which are not None - use_variable_names_list = ["x", "y", "z"] - if isinstance(self.x, type(None)): - use_variable_names_list.remove("x") - if isinstance(self.y, type(None)): - use_variable_names_list.remove("y") - if isinstance(self.z, type(None)): - use_variable_names_list.remove("z") - - interpolation_grid_arr_dict = { - "x": self.x, - "y": self.y, - "z": self.z, - } - points = tuple( - [ - interpolation_grid_arr_dict[var_name] - for var_name in use_variable_names_list - ] - ) - - ### get shape of values - values_shape = tuple( - [ - interpolation_grid_arr_dict[var_name].size - for var_name in use_variable_names_list - ] - ) - - return interpn( - points=points, - values=self.values.reshape(values_shape), - xi=point_arr, - ) - - def set_syn_load(self, synaptic_load_dict, synaptic_contribution_dict=None): - """ - Args: - synaptic_load_dict: dict or number - either a dictionary with keys = all population names the model_configurator should configure - or a single number between 0 and 1 - The dictionary values should be lists which contain either 2 values for ampa and gaba load, - only 1 value if the population has only ampa or gaba input. - For the strucutre of the dictionary check the print_guide - - synaptic_contribution_dict: dict, optional, default=None - by default the synaptic contributions of all afferent projections is equal - one can define other contributions in this dict - give for each affernt projection the contribution to the synaptic load of the target population - For the strucutre of the dictionary check the print_guide - """ - - ### set synaptic load - ### is dict --> replace internal dict values - if isinstance(synaptic_load_dict, dict): - ### check if correct number of population - if len(list(synaptic_load_dict.keys())) != len( - list(self.syn_load_dict.keys()) - ): - error_msg = f"ERROR set_syn_load: wrong number of populations given with 'synaptic_load_dict' given={len(list(synaptic_load_dict.keys()))}, expected={len(list(self.syn_load_dict.keys()))}" - self.log(error_msg) - raise ValueError(error_msg) - ### loop over all populations - for pop_name in synaptic_load_dict.keys(): - ### cehck pop name - if pop_name not in list(self.syn_load_dict.keys()): - error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - value_list = synaptic_load_dict[pop_name] - ### check value list - if len(value_list) != len(self.syn_load_dict[pop_name]): - error_msg = f"ERROR set_syn_load: for population {pop_name}, {len(self.syn_load_dict[pop_name])} syn load values should be given but {len(value_list)} were given" - self.log(error_msg) - raise ValueError(error_msg) - if not ( - (np.array(value_list) <= 1).all() - and (np.array(value_list) >= 0).all() - ): - error_msg = f"ERROR set_syn_load: the values for synaptic loads should be equal or smaller than 1, given for population {pop_name}: {value_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### replace internal values with given values - self.syn_load_dict[pop_name] = value_list - else: - ### is not a dict --> check number - try: - synaptic_load = float(synaptic_load_dict) - except: - error_msg = "ERROR set_syn_load: if synaptic_load_dict is not a dictionary it should be a single number!" - self.log(error_msg) - raise ValueError(error_msg) - if not (synaptic_load <= 1 and synaptic_load >= 0): - error_msg = "ERROR set_syn_load: value for synaptic_loadshould be equal or smaller than 1" - self.log(error_msg) - raise ValueError(error_msg) - ### replace internal values with given value - for pop_name in self.syn_load_dict.keys(): - for idx in range(len(self.syn_load_dict[pop_name])): - self.syn_load_dict[pop_name][idx] = synaptic_load - ### transform syn load dict in correct form with projection target type keys - syn_load_dict = {} - for pop_name in self.pop_name_list: - syn_load_dict[pop_name] = {} - if ( - "ampa" in self.afferent_projection_dict[pop_name]["target"] - and "gaba" in self.afferent_projection_dict[pop_name]["target"] - ): - syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] - syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][1] - elif "ampa" in self.afferent_projection_dict[pop_name]["target"]: - syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] - syn_load_dict[pop_name]["gaba"] = 0 - elif "gaba" in self.afferent_projection_dict[pop_name]["target"]: - syn_load_dict[pop_name]["ampa"] = 0 - syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][0] - self.syn_load_dict = syn_load_dict - - ### set synaptic contribution - if not isinstance(synaptic_contribution_dict, type(None)): - ### loop over all given populations - for pop_name in synaptic_contribution_dict.keys(): - ### check pop_name - if pop_name not in list(self.syn_contr_dict.keys()): - error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### loop over given projection target type (ampa,gaba) - for given_proj_target_type in synaptic_contribution_dict[ - pop_name - ].keys(): - ### check given target type - if not ( - given_proj_target_type == "ampa" - or given_proj_target_type == "gaba" - ): - error_msg = f"ERROR set_syn_load: with the synaptic_contribution_dict for each given population a 'ampa' and/or 'gaba' dictionary contianing the corresponding afferent projections should be given, given key={given_proj_target_type}" - self.log(error_msg) - raise ValueError(error_msg) - ### check if for the projection target type the correct number of projections is given - given_proj_name_list = list( - synaptic_contribution_dict[pop_name][ - given_proj_target_type - ].keys() - ) - internal_proj_name_list = list( - self.syn_contr_dict[pop_name][given_proj_target_type].keys() - ) - if len(given_proj_name_list) != len(internal_proj_name_list): - error_msg = f"ERROR set_syn_load: in synaptic_contribution_dict for population {pop_name} and target_type {given_proj_target_type} wrong number of projections is given\ngiven={given_proj_name_list}, expected={internal_proj_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### check if given contributions for the target type sum up to 1 - given_contribution_arr = np.array( - list( - synaptic_contribution_dict[pop_name][ - given_proj_target_type - ].values() - ) - ) - if round(given_contribution_arr.sum(), 6) != 1: - error_msg = f"ERROR set_syn_load: given synaptic contributions for population {pop_name} and target_type {given_proj_target_type} do not sum up to 1: given={given_contribution_arr}-->{round(given_contribution_arr.sum(),6)}" - self.log(error_msg) - raise ValueError(error_msg) - ### loop over given afferent projections - for proj_name in given_proj_name_list: - ### check if projection name exists - if proj_name not in internal_proj_name_list: - error_msg = f"ERROR set_syn_load: given projection {proj_name} given with synaptic_contribution_dict no possible projection, possible={internal_proj_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### replace internal value of the projection with given value - self.syn_contr_dict[pop_name][given_proj_target_type][ - proj_name - ] = synaptic_contribution_dict[pop_name][ - given_proj_target_type - ][ - proj_name - ] - - ### set the weights in the afferent_projection_dict based on the given synaptic contributions - for pop_name in self.pop_name_list: - weight_list = [] - for proj_name in self.afferent_projection_dict[pop_name][ - "projection_names" - ]: - ### get proj info - proj_dict = self.get_proj_dict(proj_name) - proj_target_type = proj_dict["proj_target_type"] - - ### obtain the weight using the given syn_contr_dict and the syn_contr_max_dict (assuming max weights) - target_type_contr_dict = self.syn_contr_dict[pop_name][proj_target_type] - target_type_contr_max_dict = self.get_syn_contr_dict( - pop_name=pop_name, - target_type=proj_target_type, - use_max_weights=True, - normalize=True, - ) - ### convert the synaptic contribution dicts to arrays - target_type_contr_arr = np.array(list(target_type_contr_dict.values())) - target_type_contr_max_arr = np.array( - list(target_type_contr_max_dict.values()) - ) - ### get the transformation from synaptic contributions assuming max weights to given synaptic contributions - contr_transform_arr = target_type_contr_max_arr / target_type_contr_arr - ### normalize the transform_arr by the largest scaling --> obtain the weight factors - contr_transform_arr /= contr_transform_arr.max() - ### get the weight of the current projection - weight = ( - self.g_max_dict[pop_name][proj_target_type] - * contr_transform_arr[ - list(target_type_contr_dict.keys()).index(proj_name) - ] - ) - ### append weight to weight list - weight_list.append(weight) - ### replace the weights in the afferent_projection_dict - self.afferent_projection_dict[pop_name]["weights"] = weight_list - - ### now scale the weights based on the synaptic load - for pop_name in self.pop_name_list: - for target_type in ["ampa", "gaba"]: - ### get the synaptic load based on the weights - syn_load = self.get_syn_load(pop_name=pop_name, target_type=target_type) - ### if the obtained syn load with the weights is smaller than the given target syn load - ### print warning because upscaling is not possible, syn load is smaller than the user wanted - print( - f"syn_load={syn_load}, target={self.syn_load_dict[pop_name][target_type]}" - ) - if syn_load < self.syn_load_dict[pop_name][target_type]: - ### the weights cannot be upscaled because syn_load was obtained with max weights - ### --> print a warning - warning_txt = f"WARNING set_syn_load: the synaptic load for population {pop_name} and target_type {target_type} cannot reach teh given synaptic load using the given synaptic contributions without scaling the weights over the maximum weights!\ngiven syn_load={self.syn_load_dict[pop_name][target_type]}, obtained syn_load={syn_load}" - self.log(warning_txt) - self._p_w(warning_txt) - ### update the syn_load_dict with the obtained syn_load - self.syn_load_dict[pop_name][target_type] = syn_load - elif syn_load > 0: - ### get the weights - weight_arr = np.array( - self.afferent_projection_dict[pop_name]["weights"] - ) - ### get the proj target type array - proj_target_type_arr = np.array( - self.afferent_projection_dict[pop_name]["target"] - ) - ### select the weights for the target type - weight_arr = weight_arr[proj_target_type_arr == target_type] - ### scale the weights - weight_arr *= self.syn_load_dict[pop_name][target_type] / syn_load - ### update the weights in the afferent_projection_dict - weight_idx_arr = np.where(proj_target_type_arr == target_type)[0] - for weight_idx_new, weight_idx_original in enumerate( - weight_idx_arr - ): - self.afferent_projection_dict[pop_name]["weights"][ - weight_idx_original - ] = weight_arr[weight_idx_new] - - ### print guide - self._p_g(_p_g_after_set_syn_load) - - def set_weights(self, weights): - for pop_name in self.pop_name_list: - self.afferent_projection_dict[pop_name]["weights"] = [] - for proj_name in self.afferent_projection_dict[pop_name][ - "projection_names" - ]: - self.afferent_projection_dict[pop_name]["weights"].append( - weights[pop_name][proj_name] - ) - - def get_syn_load(self, pop_name: str, target_type: str) -> float: - """ - Calculates the synaptic load of a population for a given target type for the given weights of the afferent_projection_dict - - Args: - pop_name: str - name of the population - - target_type: str - either 'ampa' or 'gaba' - - Returns: - syn_load: float - synaptic load of the population for the given target type - """ - ### get the proj target type array - proj_target_type_arr = np.array( - self.afferent_projection_dict[pop_name]["target"] - ) - if target_type in proj_target_type_arr: - ### get the weights - weight_arr = np.array(self.afferent_projection_dict[pop_name]["weights"]) - ### select the weights for the target type - weight_arr = weight_arr[proj_target_type_arr == target_type] - ### get the pre size - size_arr = np.array(self.afferent_projection_dict[pop_name]["size"]) - ### select the pre size for the target type - size_arr = size_arr[proj_target_type_arr == target_type] - ### get the probaility - prob_arr = np.array(self.afferent_projection_dict[pop_name]["probability"]) - ### select the probability for the target type - prob_arr = prob_arr[proj_target_type_arr == target_type] - ### get the firing rate - firing_rate_arr = np.array( - self.afferent_projection_dict[pop_name]["target firing rate"] - ) - ### select the firing rate for the target type - firing_rate_arr = firing_rate_arr[proj_target_type_arr == target_type] - - ### get the synaptic load based on weights, sizes, probabilities and max weights - syn_load = np.sum(weight_arr * size_arr * prob_arr * firing_rate_arr) / ( - self.g_max_dict[pop_name][target_type] - * np.sum(size_arr * prob_arr * firing_rate_arr) - ) - else: - syn_load = 0 - - return syn_load - - def get_template_synaptic_contribution_dict(self, given_dict): - """ - converts the full template dict with all keys for populations, target-types and projections into a reduced dict - which only contains the keys which lead to values smaller 1 - """ - - ret_dict = {} - for key in given_dict.keys(): - if isinstance(given_dict[key], dict): - rec_dict = self.get_template_synaptic_contribution_dict(given_dict[key]) - if len(rec_dict) > 0: - ret_dict[key] = self.get_template_synaptic_contribution_dict( - given_dict[key] - ) - else: - if given_dict[key] < 1: - ret_dict[key] = given_dict[key] - - return ret_dict - - def divide_almost_equal(self, number, num_parts): - # Calculate the quotient and remainder - quotient, remainder = divmod(number, num_parts) - - # Initialize a list to store the almost equal integers - result = [quotient] * num_parts - - # Distribute the remainder evenly among the integers - for i in range(remainder): - result[i] += 1 - - return result - - def compile_net_many_sequential(self): - network_list = [ - net_many_dict["net"] - for net_many_dict_list in self.net_many_dict.values() - for net_many_dict in net_many_dict_list - ] - for net in network_list: - self.compile_net_many(net=net) - - def compile_net_many_parallel(self): - nr_available_workers = int(multiprocessing.cpu_count() / 2) - network_list = [ - net_many_dict["net"] - for net_many_dict_list in self.net_many_dict.values() - for net_many_dict in net_many_dict_list - ] - with multiprocessing.Pool(nr_available_workers) as p: - p.map(self.compile_net_many, network_list) - - ### for each network have network idx - ### network 0 is base network - ### netork 1,2,3...N are the single neuron networks for the N populations - ### start idx = N+1 (inclusive), end_idx = number many networks + N (inclusive) - for net_idx in range( - len(self.pop_name_list) + 1, len(network_list) + len(self.pop_name_list) + 1 - ): - ### get the name of the run folder of the network - ### search for a folder which starts with run_ - ### there should only be 1 --> get run_folder_name as str - run_folder_name = _find_folder_with_prefix( - base_path=f"annarchy_folders/many_net_{net_idx}", prefix="run_" - ) - run_folder_name = f"/scratch/olmai/Projects/PhD/CompNeuroPy/CompNeuroPy/examples/model_configurator/annarchy_folders/many_net_{net_idx}//{run_folder_name}" - - print(run_folder_name) - ### import the ANNarchyCore.so module from this folder - spec = importlib.util.spec_from_file_location( - f"ANNarchyCore{net_idx}", f"{run_folder_name}/ANNarchyCore{net_idx}.so" - ) - foo = importlib.util.module_from_spec(spec) - spec.loader.exec_module(foo) - - ### overwrite the entries in the network manager - _network[net_idx]["instance"] = foo - _network[net_idx]["compiled"] = True - _network[net_idx]["directory"] = run_folder_name - - def get_max_syn_currents(self, pop_name: str) -> list: - """ - obtain I_app_max, g_ampa_max and g_gaba max. - f_max = f_0 + f_t + 100 - I_app_max causes f_max (increases f from f_0 to f_max) - g_gaba_max causes max IPSP - g_ampa_max cancels out g_gaba_max IPSP - - Args: - pop_name: str - population name from original model - - return: - list containing [I_max, g_ampa_max, g_gaba_max] - - Abbreviations: - f_max: max firing rate - - f_0: firing rate without syn currents - - f_t: target firing rate - """ - - ### TODO: problem for g_gaba: what if resting potential is <=-90... - ### find g_gaba max using max IPSP - self.log("search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)") - g_gaba_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_ipsp( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], - g_gaba=X_val, - ), - y_bound=self.max_psp_dict[pop_name], - X_0=0, - y_0=0, - alpha_abs=0.005, - X_increase=0.1, - ) - - ### for g_ampa EPSPs can lead to spiking - ### --> find g_ampa max by "overriding" IPSP of g_gaba max - self.log( - f"search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" - ) - g_ampa_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_ipsp( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], - g_ampa=X_val, - g_gaba=g_gaba_max, - ), - y_bound=0, - X_0=0, - y_0=self.get_ipsp( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], - g_ampa=0, - g_gaba=g_gaba_max, - ), - alpha_abs=0.005, - X_increase=g_gaba_max / 10, - ) - - ### get f_0 and f_max - f_0 = self.get_rate( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.net_single_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - )[0] - f_max = f_0 + self.target_firing_rate_dict[pop_name] + 100 - - ### find I_max with f_0, and f_max using incremental_continuous_bound_search - self.log("search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0)") - I_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_rate( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.net_single_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app=X_val, - )[0], - y_bound=f_max, - X_0=0, - y_0=f_0, - alpha_abs=1, - ) - - return [I_max, g_ampa_max, g_gaba_max] - - def incremental_continuous_bound_search( - self, - y_X, - y_bound, - X_0, - y_0, - alpha_rel=0.01, - alpha_abs=None, - n_it_max=100, - X_increase=1, - saturation_thresh=10, - saturation_warning=True, - accept_non_dicontinuity=False, - bound_type="equal", - ): - """ - you have system X --> y - you want X for y=y_bound (either upper or lower bound) - if you increase X (from starting point) y gets closer to y_bound! - - expectes a continuous funciton without from P_0(X_0,y_0) to P_bound(X_bound, y_bound) - if it finds a saturation or non-continuous "step" on the way to P_bound it will return - the X_bound for the end of the continuous part from P_0 to P_bound --> y_bound will not - be reached - - Args: - y_X: function - returns a single number given a single number, call like y = y_X(X) - increasing X should bring y closer to y_bound - - y_bound: number - the bound for y for which an X_bound should be found - - X_0: number - start value of X, from where the search should start - - y_0: number - start value of y which results from X_0 - - alpha_rel: number, optional, default=0.001 - allowed relative tolerance for deviations of y from y_bound - if alpha_abs is given it overrides alpha_rel - - alpha_abs: number, optional, default=None - allowed absolute tolerance for deviations of y from y_bound - if alpha_abs is given it overrides alpha_rel - - n_it_max: number, optional, default=100 - maximum of iterations to find X_bound - - X_increase: number, optional, default=1 - the first increase of X (starting from X_0) to obtain the first new y_val - i.e. first calculation is: y_val = y_X(X_0+X_increase) - - saturation_thresh: number, optional, default=5 - if y does not change while increasing X by X_increase the search will stop - after this number of trials - - saturation_warning: bool, optional, default=True - if you want to get a warning when the saturation is reached during search - - accept_non_dicontinuity: bool, optional, default=False - if you do not want to search only in the first continuous search space - - bound_type: str, optional, default="equal" - equal, greater or less - equal: result should be near bound within tolerance - greater: result should be at least larger bound within tolerance - less: result should be smaller bound within tolerance - - return: - X_bound: - X value which causes y=y_bound - """ - ### TODO catch difference to target goes up in both directions - ### then nothing new is predicted --> fails - - self.log( - f"find X_bound for: y_0(X_0={X_0})={y_0} --> y_bound(X_bound=??)={y_bound}" - ) - - ### get tolerance - tolerance = abs(y_bound - y_0) * alpha_rel - if not isinstance(alpha_abs, type(None)): - tolerance = alpha_abs - - ### define stop condition - if bound_type == "equal": - stop_condition = ( - lambda y_val, n_it: ( - ((y_bound - tolerance) <= y_val) - and (y_val <= (y_bound + tolerance)) - ) - or n_it >= n_it_max - ) - elif bound_type == "greater": - stop_condition = ( - lambda y_val, n_it: ( - ((y_bound - 0) <= y_val) and (y_val <= (y_bound + 2 * tolerance)) - ) - or n_it >= n_it_max - ) - elif bound_type == "less": - stop_condition = ( - lambda y_val, n_it: ( - ((y_bound - 2 * tolerance) <= y_val) and (y_val <= (y_bound + 0)) - ) - or n_it >= n_it_max - ) - - ### check if y(X) is increasing - is_increasing = y_bound > y_0 - - ### search for X_val - X_list_predict = [X_0] - y_list_predict = [y_0] - X_list_all = [X_0] - y_list_all = [y_0] - n_it_first_round = 0 - X_val = X_0 + X_increase - y_val = y_0 - y_not_changed_counter = 0 - X_change_predicted = X_increase - while not stop_condition(y_val, n_it_first_round): - ### get y_val for X - y_val_pre = y_val - y_val = y_X(X_val) - y_change = y_val_pre - y_val - - ### store search history - X_list_all.append(X_val) - y_list_all.append(y_val) - - ### get next X_val depending on if y_val changed or not - if abs(y_change) > 0: - ### append X_val and y_val to y_list/X_list - y_list_predict.append(y_val) - X_list_predict.append(X_val) - ### predict new X_val using y_bound as predictor - X_val_pre = X_val - X_val = self.predict_1d( - X=y_list_predict, y=X_list_predict, X_pred=y_bound - )[0] - X_change_predicted = X_val - X_val_pre - ### now actually update X_val - X_val = X_val_pre + X_change_predicted * (1 + y_not_changed_counter / 2) - else: - ### just increase X_val - X_val = X_val + X_change_predicted * (1 + y_not_changed_counter / 2) - - ### check saturation of y_val - if abs(y_change) < tolerance: - ### increase saturation counter - ### saturation counter also increases updates of X_val - y_not_changed_counter += 1 - else: - ### reset saturation counter - y_not_changed_counter = 0 - - ### break if y_val saturated - if y_not_changed_counter >= saturation_thresh: - break - - ### increase iterator - n_it_first_round += 1 - - ### catch the initial point already satisified stop condition - if len(X_list_all) == 1: - warning_txt = "WARNING incremental_continuous_bound_search: search did not start because initial point already satisfied stop condition!" - self._p_w(warning_txt) - self.log(warning_txt) - return X_0 - - ### warning if search saturated - if (y_not_changed_counter >= saturation_thresh) and saturation_warning: - warning_txt = f"WARNING incremental_continuous_bound_search: search saturated at y={y_list_predict[-1]} while searching for X_val for y_bound={y_bound}" - self._p_w(warning_txt) - self.log(warning_txt) - self.log("initial search lists:") - self.log("all:") - self.log(np.array([X_list_all, y_list_all]).T) - self.log("predict:") - self.log(np.array([X_list_predict, y_list_predict]).T) - - ### if search saturated right at the begining --> search failed (i.e. y did not change while increasing X) - if (y_not_changed_counter >= saturation_thresh) and len(X_list_predict) == 1: - error_msg = "ERROR incremental_continuous_bound_search: search failed because changing X_val did not change y_val" - self.log(error_msg) - raise AssertionError(error_msg) - - ### get best X value for which y is closest to y_bound - idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) - X_bound = X_list_predict[idx_best] - - ### sort y_list_predict and corresponding X_list_predict - ### get value pair which is before bound and value pair which is behind bound - ### if this does not work... use previous X_0 and X_bound - sort_idx_arr = np.argsort(y_list_predict) - X_arr_predict_sort = np.array(X_list_predict)[sort_idx_arr] - y_arr_predict_sort = np.array(y_list_predict)[sort_idx_arr] - over_y_bound_arr = y_arr_predict_sort > y_bound - over_y_bound_changed_idx = np.where(np.diff(over_y_bound_arr))[0] - if len(over_y_bound_changed_idx) == 1: - if over_y_bound_changed_idx[0] < len(y_arr_predict_sort): - X_aside_change_list = [ - X_arr_predict_sort[over_y_bound_changed_idx[0]], - X_arr_predict_sort[over_y_bound_changed_idx[0] + 1], - ] - y_aside_change_list = [ - y_arr_predict_sort[over_y_bound_changed_idx[0]], - y_arr_predict_sort[over_y_bound_changed_idx[0] + 1], - ] - X_0 = min(X_aside_change_list) - X_bound = max(X_aside_change_list) - y_0 = min(y_aside_change_list) - self.log("predict sorted:") - self.log(np.array([X_arr_predict_sort, y_arr_predict_sort, over_y_bound_arr]).T) - self.log(over_y_bound_changed_idx) - - ### if y cannot get larger or smaller than y_bound one has to check if you not "overshoot" with X_bound - ### --> fine tune result by investigating the space between X_0 and X_bound and predict a new X_bound - self.log(f"X_0: {X_0}, X_bound:{X_bound} for final predict list") - X_space_arr = np.linspace(X_0, X_bound, 100) - y_val = y_0 - [-1, 1][int(is_increasing)] - X_list_predict = [] - y_list_predict = [] - X_list_all = [] - y_list_all = [] - did_break = False - n_it_second_round = 0 - for X_val in X_space_arr: - y_val_pre = y_val - y_val = y_X(X_val) - X_list_all.append(X_val) - y_list_all.append(y_val) - if y_val != y_val_pre: - ### if y_val changed - ### append X_val and y_val to y_list/X_list - y_list_predict.append(y_val) - X_list_predict.append(X_val) - ### if already over y_bound -> stop - if y_val > y_bound and is_increasing: - did_break = True - break - if y_val < y_bound and not is_increasing: - did_break = True - break - n_it_second_round += 1 - ### if did break early --> use again finer bounds - if did_break and n_it_second_round < 90: - X_space_arr = np.linspace( - X_list_predict[-2], X_list_predict[-1], 100 - n_it_second_round - ) - y_val = y_list_predict[-2] - for X_val in X_space_arr: - y_val_pre = y_val - y_val = y_X(X_val) - X_list_all.append(X_val) - y_list_all.append(y_val) - if y_val != y_val_pre: - ### if y_val changed - ### append X_val and y_val to y_list/X_list - y_list_predict.append(y_val) - X_list_predict.append(X_val) - ### if already over y_bound -> stop - if y_val > y_bound and is_increasing: - break - if y_val < y_bound and not is_increasing: - break - ### sort value lists - sort_idx_all_arr = np.argsort(X_list_all) - X_list_all = (np.array(X_list_all)[sort_idx_all_arr]).tolist() - y_list_all = (np.array(y_list_all)[sort_idx_all_arr]).tolist() - sort_idx_predict_arr = np.argsort(X_list_predict) - X_list_predict = (np.array(X_list_predict)[sort_idx_predict_arr]).tolist() - y_list_predict = (np.array(y_list_predict)[sort_idx_predict_arr]).tolist() - - ### log - self.log("final predict lists:") - self.log("all:") - self.log(np.array([X_list_all, y_list_all]).T) - self.log("predict:") - self.log(np.array([X_list_predict, y_list_predict]).T) - - ### check if there is a discontinuity in y_all, starting with the first used value in y_predict - ### update all values with first predict value - first_y_used_in_predict = y_list_predict[0] - idx_first_y_in_all = y_list_all.index(first_y_used_in_predict) - y_list_all = y_list_all[idx_first_y_in_all:] - ### get discontinuity - discontinuity_idx_list = self.get_discontinuity_idx_list(y_list_all) - self.log("discontinuity_idx_list") - self.log(f"{discontinuity_idx_list}") - if len(discontinuity_idx_list) > 0 and not accept_non_dicontinuity: - ### there is a discontinuity - discontinuity_idx = discontinuity_idx_list[0] - ### only use values until discontinuity - y_bound_new = y_list_all[discontinuity_idx] - idx_best = y_list_predict.index(y_bound_new) - X_val_best = X_list_predict[idx_best] - y_val_best = y_list_predict[idx_best] - ### print warning - warning_txt = f"WARNING incremental_continuous_bound_search: found discontinuity, only reached y={y_bound_new} while searching for y_bound={y_bound}" - self._p_w(warning_txt) - ### log - self.log(warning_txt) - self.log( - f"discontinuities detected --> only use last values until first discontinuity: X={X_val_best}, y={y_val_best}" - ) - else: - ### there is no discontinuity - ### there can still be duplicates in the y_list --> remove them - ### get arrays - X_arr_predict = np.array(X_list_predict) - y_arr_predict = np.array(y_list_predict) - ### get unique indices - _, unique_indices = np.unique(y_arr_predict, return_index=True) - ### get arrays without duplicates in y_list - X_arr_predict = X_arr_predict[unique_indices] - y_arr_predict = y_arr_predict[unique_indices] - - ### now predict final X_val using y_arr - X_val = self.predict_1d( - X=y_arr_predict, y=X_arr_predict, X_pred=y_bound, linear=False - )[0] - y_val = y_X(X_val) - - ### append it to lists - X_list_predict.append(X_val) - y_list_predict.append(y_val) - - ### find best - idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) - X_val_best = X_list_predict[idx_best] - y_val_best = y_list_predict[idx_best] - - ### log - self.log(f"final values: X={X_val_best}, y={y_val_best}") - - ### warning for max iteration search - if not (n_it_first_round < n_it_max): - warning_txt = f"WARNING incremental_continuous_bound_search: reached max iterations to find X_bound to get y_bound={y_bound}, found X_bound causes y={y_val_best}" - self._p_w(warning_txt) - self.log(warning_txt) - - return X_val_best - - def get_discontinuity_idx_list(self, arr): - """ - Args: - arr: array-like - array for which its checked if there are discontinuities - """ - arr = np.array(arr) - range_data = arr.max() - arr.min() - diff_arr = np.diff(arr) - diff_rel_range_arr = diff_arr / range_data - diff_rel_range_abs_arr = np.absolute(diff_rel_range_arr) - peaks = find_peaks( - diff_rel_range_abs_arr, prominence=10 * np.mean(diff_rel_range_abs_arr) - ) - peaks_idx_list = peaks[0] - - return peaks_idx_list - - def predict_1d(self, X, y, X_pred, linear=True): - """ - Args: - X: array-like - X values - - y: array-like - y values, same size as X_values - - X_pred: array-like or number - X value(s) for which new y value(s) are predicted - - linear: bool, optional, default=True - if interpolation is linear - - return: - Y_pred_arr: array - predicted y values for X_pred - """ - if not linear: - if len(X) >= 4: - y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="cubic") - elif len(X) >= 3: - y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="quadratic") - else: - y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="linear") - y_pred_arr = y_X(X_pred) - return y_pred_arr.reshape(1) - - def get_rate_dict( - self, - net, - population_dict, - variable_init_sampler_dict, - monitor_dict, - I_app_dict, - g_ampa_dict, - g_gaba_dict, - ): - """ - function to obtain the firing rates of the populations of - the network given with 'idx' for given I_app, g_ampa and g_gaba values - - Args: - idx: int - network index given by the parallel_run function - - net: object - network object given by the parallel_run function - - net_many_dict: dict - dictionary containing a population_dict and a monitor_dict - which contain for each population name the - - ANNarchy Population object of the magic network - - ANNarchy Monitor object of the magic network - - I_app_arr_dict: dict of arrays - dictionary containing for each population the array with input values for I_app - - g_ampa_arr_dict: dict of arrays - dictionary containing for each population the array with input values for g_ampa - - g_gaba_arr_dict: dict of arrays - dictionary containing for each population the array with input values for g_gaba - - variable_init_sampler_dict: dict - dictionary containing for each population the initial variables sampler object - with the function.sample() to get initial values of the neurons - - self: object - the model_configurator object - - return: - f_rec_arr_dict: dict of arrays - dictionary containing for each population the array with the firing rates for the given inputs - """ - ### reset and set init values - net.reset() - for pop_name, varaible_init_sampler in variable_init_sampler_dict.items(): - population = net.get(population_dict[pop_name]) - variable_init_arr = varaible_init_sampler.sample(len(population), seed=0) - for var_idx, var_name in enumerate(population.variables): - set_val = variable_init_arr[:, var_idx] - setattr(population, var_name, set_val) - - ### slow down conductances (i.e. make them constant) - for pop_name in population_dict.keys(): - population = net.get(population_dict[pop_name]) - population.tau_ampa = 1e20 - population.tau_gaba = 1e20 - ### apply given variables - for pop_name in population_dict.keys(): - population = net.get(population_dict[pop_name]) - population.I_app = I_app_dict[pop_name] - population.g_ampa = g_ampa_dict[pop_name] - population.g_gaba = g_gaba_dict[pop_name] - ### simulate 500 ms initial duration + X ms - net.simulate(500 + self.simulation_dur) - ### get rate for the last X ms - f_arr_dict = {} - for pop_name in population_dict.keys(): - population = net.get(population_dict[pop_name]) - monitor = net.get(monitor_dict[pop_name]) - spike_dict = monitor.get("spike") - f_arr = np.zeros(len(population)) - for idx_n, n in enumerate(spike_dict.keys()): - time_list = np.array(spike_dict[n]) - nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) - rate = nbr_spks / (self.simulation_dur / 1000) - f_arr[idx_n] = rate - f_arr_dict[pop_name] = f_arr - - return f_arr_dict - - def get_rate( - self, - net, - population, - variable_init_sampler, - monitor, - I_app=0, - g_ampa=0, - g_gaba=0, - ): - """ - simulates a population for X+500 ms and returns the firing rate of each neuron for the last X ms - X is defined with self.simulation_dur - - Args: - net: ANNarchy network - network which contains the population and monitor - - population: ANNarchy population - population which is recorded and stimulated - - variable_init_sampler: object - containing the initial values of the population neuron, use .sample() to get values - - monitor: ANNarchy monitor - to record spikes from population - - I_app: number or arr, optional, default = 0 - applied current to the population neurons, has to have the same size as the population - - g_ampa: number or arr, optional, default = 0 - applied ampa conductance to the population neurons, has to have the same size as the population - - g_gaba: number or arr, optional, default = 0 - applied gaba conductance to the population neurons, has to have the same size as the population - """ - ### reset and set init values - net.reset() - self.set_init_variables(population, variable_init_sampler) - ### slow down conductances (i.e. make them constant) - population.tau_ampa = 1e20 - population.tau_gaba = 1e20 - ### apply given variables - population.I_app = I_app - population.g_ampa = g_ampa - population.g_gaba = g_gaba - ### simulate 500 ms initial duration + X ms - net.simulate(500 + self.simulation_dur) - ### get rate for the last X ms - spike_dict = monitor.get("spike") - f_arr = np.zeros(len(population)) - for idx_n, n in enumerate(spike_dict.keys()): - time_list = np.array(spike_dict[n]) - nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) - rate = nbr_spks / (self.simulation_dur / 1000) - f_arr[idx_n] = rate - - return f_arr - - def get_ipsp( - self, - net: Network, - population: Population, - variable_init_sampler, - monitor, - I_app_hold, - g_ampa=0, - g_gaba=0, - do_plot=False, - ): - """ - simulates a single spike at t=50 ms and records the change of v within a voltage_clamp neuron - - Args: - net: ANNarchy network - network which contains the population and monitor - - population: ANNarchy population - population which is recorded and stimulated - - variable_init_sampler: object - containing the initial values of the population neuron, use .sample() to get values - - monitor: ANNarchy monitor - to record v_clamp_rec from population - - g_ampa: number, optional, default = 0 - applied ampa conductance to the population neuron at t=50 ms - - g_gaba: number, optional, default = 0 - applied gaba conductance to the population neurons at t=50 ms - """ - ### reset network and set initial values - net.reset() - self.set_init_variables(population, variable_init_sampler) - ### apply input - population.I_app = I_app_hold - ### simulate 50 ms initial duration - net.simulate(50) - ### apply given conductances --> changes v - v_rec_rest = population.v[0] - population.v_psp_thresh = v_rec_rest - population.g_ampa = g_ampa - population.g_gaba = g_gaba - ### simulate until v is near v_rec_rest again - net.simulate_until(max_duration=self.simulation_dur, population=population) - ### get the psp = maximum of difference of v_rec and v_rec_rest - v_rec = monitor.get("v")[:, 0] - spike_dict = monitor.get("spike") - spike_timestep_list = spike_dict[0] + [net.get_current_step()] - end_timestep = int(round(min(spike_timestep_list), 0)) - psp = float( - np.absolute(np.clip(v_rec[:end_timestep] - v_rec_rest, None, 0)).max() - ) - - if do_plot: - plt.figure() - plt.title( - f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" - ) - plt.plot(v_rec) - plt.savefig( - f"tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" - ) - plt.close("all") - - return psp - - def compile_net_many(self, net): - compile_in_folder( - folder_name=f"many_net_{net.id}", net=net, clean=True, silent=True - ) - - def create_many_neuron_network(self): - """ - creates a ANNarchy magic network with all popualtions which should be configured the size - of the populations is equal and is obtianed by dividing the number of the - interpolation values by the number of networks which will be used during run_parallel - - return: - net_many_dict: dict - contains - - population_dict: for all population names the created population in the magic network - - monitor_dict: for all population names the created monitors in the magic network - """ - self.log("create many neurons network") - - ### for each population of the given model which should be configured - ### create a population with a given size - ### create a monitor recording spikes - ### create a network containing the population and the monitor - many_neuron_population_list = [] - many_neuron_monitor_list = [] - many_neuron_network_list = [] - for pop_name in self.pop_name_list: - ### create the neuron model with poisson spike trains - ### get the initial arguments of the neuron - neuron_model = self.neuron_model_dict[pop_name] - ### names of arguments - init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) - init_arguments_name_list.remove("self") - init_arguments_name_list.remove("name") - init_arguments_name_list.remove("description") - ### arguments dict - init_arguments_dict = { - init_arguments_name: getattr(neuron_model, init_arguments_name) - for init_arguments_name in init_arguments_name_list - } - ### get the afferent populations - afferent_population_list = [] - proj_target_type_list = [] - for proj_name in self.afferent_projection_dict[pop_name][ - "projection_names" - ]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_name = proj_dict["pre_pop_name"] - afferent_population_list.append(pre_pop_name) - proj_target_type_list.append(proj_dict["proj_target_type"]) - - ### for each afferent population create a binomial spike train equation string - ### add it to the equations - ### and add the related parameters to the parameters - - ### split the equations and parameters string - equations_line_split_list = str( - init_arguments_dict["equations"] - ).splitlines() - - parameters_line_split_list = str( - init_arguments_dict["parameters"] - ).splitlines() - - ### add the binomial spike train equations and parameters - ( - equations_line_split_list, - parameters_line_split_list, - ) = self.add_binomial_input( - equations_line_split_list, - parameters_line_split_list, - afferent_population_list, - proj_target_type_list, - ) - - ### combine string lines to multiline strings again - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - init_arguments_dict["equations"] = "\n".join(equations_line_split_list) - - ### create neuron model with new equations - neuron_model_new = Neuron(**init_arguments_dict) - - # print("new neuron model:") - # print(neuron_model_new) - - ### create the many neuron population - my_pop = Population( - geometry=self.nr_neurons_per_net, - neuron=neuron_model_new, - name=f"many_neuron_{pop_name}", - ) - - ### set the attributes of the neurons - for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: - setattr(my_pop, attr_name, attr_val) - - ### create Monitor for many neuron - my_mon = Monitor(my_pop, ["spike"]) - - ### create the network with population and monitor - my_net = Network() - my_net.add(my_pop) - my_net.add(my_mon) - - ### compile network - compile_in_folder(folder_name=f"many_neuron_{pop_name}", net=my_net) - - ### append the lists - many_neuron_network_list.append(my_net) - many_neuron_population_list.append(my_net.get(my_pop)) - many_neuron_monitor_list.append(my_net.get(my_mon)) - - net_many_dict = { - "network_list": many_neuron_network_list, - "population_list": many_neuron_population_list, - "monitor_list": many_neuron_monitor_list, - } - return net_many_dict - - def add_binomial_input( - self, - equations_line_split_list, - parameters_line_split_list, - afferent_population_list, - proj_target_type_list, - ): - ### loop over afferent populations to add the new equation lines and parameters - for pre_pop_name in afferent_population_list: - ### define the spike train of a pre population as a binomial process with number of trials = number of pre neurons and success probability = spike probability (taken from Poisson neurons) - ### the obtained value is the number of spikes at a time step times the weight - poisson_equation_str = f"{pre_pop_name}_spike_train = Binomial({pre_pop_name}_size, {pre_pop_name}_spike_prob)" - ### add the equation line - equations_line_split_list.insert(1, poisson_equation_str) - ### add the parameters - parameters_line_split_list.append(f"{pre_pop_name}_size = 0 : population") - parameters_line_split_list.append( - f"{pre_pop_name}_spike_prob = 0 : population" - ) - parameters_line_split_list.append(f"{pre_pop_name}_weight = 0 : population") - - ### change the g_ampa and g_gaba line, they additionally are the sum of the spike trains - for equation_line_idx, equation_line in enumerate(equations_line_split_list): - ### remove whitespaces - line = equation_line.replace(" ", "") - ### check if line contains g_ampa - if "dg_ampa/dt" in line: - ### get the right side of the equation - line_right = line.split("=")[1] - line_left = line.split("=")[0] - ### remove and store tags_str - tags_str = "" - if len(line_right.split(":")) > 1: - line_right, tags_str = line_right.split(":") - ### get the populations whose spike train should be appended in g_ampa - afferent_population_to_append_list = [] - for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): - if proj_target_type_list[pre_pop_idx] == "ampa": - afferent_population_to_append_list.append(pre_pop_name) - if len(afferent_population_to_append_list) > 0: - ### change right side, add the sum of the spike trains - line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" - ### add tags_str again - if tags_str != "": - line_right = f"{line_right}:{tags_str}" - ### combine line again and replace the list entry in equations_line_split_list - line = f"{line_left}={line_right}" - equations_line_split_list[equation_line_idx] = line - - ### check if line contains g_gaba - if "dg_gaba/dt" in line: - ### get the right side of the equation - line_right = line.split("=")[1] - line_left = line.split("=")[0] - ### remove and store tags_str - tags_str = "" - if len(line_right.split(":")) > 1: - line_right, tags_str = line_right.split(":") - ### get the populations whose spike train should be appended in g_ampa - afferent_population_to_append_list = [] - for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): - if proj_target_type_list[pre_pop_idx] == "gaba": - afferent_population_to_append_list.append(pre_pop_name) - if len(afferent_population_to_append_list) > 0: - ### change right side, add the sum of the spike trains - line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" - ### add tags_str again - if tags_str != "": - line_right = f"{line_right}:{tags_str}" - ### combine line again and replace the list entry in equations_line_split_list - line = f"{line_left}={line_right}" - equations_line_split_list[equation_line_idx] = line - - return (equations_line_split_list, parameters_line_split_list) - - def get_v_clamp_2000( - self, - net: Network, - population, - monitor=None, - v=None, - I_app=None, - variable_init_sampler=None, - pre_pop_name_list=[], - eff_size_list=[], - rate_list=[], - weight_list=[], - return_1000=False, - ): - """ - the returned values is dv/dt - --> to get the hypothetical change of v for a single time step multiply with dt! - """ - ### reset network and set initial values - net.reset() - net.set_seed(0) - if not isinstance(variable_init_sampler, type(None)): - self.set_init_variables(population, variable_init_sampler) - ### set v and I_app - if not isinstance(v, type(None)): - population.v = v - if not isinstance(I_app, type(None)): - population.I_app = I_app - ### set the weights and rates of the binomial spike trains of the afferent populations - for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): - setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) - setattr( - population, - f"{pre_pop_name}_spike_prob", - (rate_list[pre_pop_idx] / 1000) * dt(), - ) - setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) - ### simulate 2000 ms - net.simulate(2000) - - if return_1000: - v_clamp_rec_arr = monitor.get("v_clamp_rec_sign")[:, 0] - return np.mean(v_clamp_rec_arr[-int(round(1000 / dt(), 0)) :]) - return population.v_clamp_rec[0] - - def get_voltage_clamp_equations(self, init_arguments_dict, pop_name): - """ - works with - dv/dt = ... - v += ... - """ - ### get the dv/dt equation from equations - ### find the line with dv/dt= or v+= or v= - eq = str(init_arguments_dict["equations"]) - eq = eq.splitlines() - line_is_v_list = [False] * len(eq) - ### check in which lines v is defined - for line_idx, line in enumerate(eq): - line_is_v_list[line_idx] = self.get_line_is_v(line) - ### raise error if no v or multiple times v - if True not in line_is_v_list or sum(line_is_v_list) > 1: - raise ValueError( - f"ERROR model_configurator create_net_single_voltage_clamp: In the equations of the neurons has to be exactly a single line which defines dv/dt or v, not given for population {pop_name}" - ) - ### set the v equation - eq_v = eq[line_is_v_list.index(True)] - - ### if equation type is v += ... --> just take right side - if "+=" in eq_v: - ### create the new equations for the ANNarchy neuron - ### create two lines, the voltage clamp line v+=0 and the - ### right sight of v+=... separately - eq_new_0 = f"v_clamp_rec_sign = {eq_v.split('+=')[1]}" - eq_new_1 = f"v_clamp_rec = fabs({eq_v.split('+=')[1]})" - eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" - eq_new_3 = "v+=0" - ### remove old v line and insert new lines - del eq[line_is_v_list.index(True)] - eq.insert(line_is_v_list.index(True), eq_new_0) - eq.insert(line_is_v_list.index(True), eq_new_1) - eq.insert(line_is_v_list.index(True), eq_new_2) - eq.insert(line_is_v_list.index(True), eq_new_3) - eq = "\n".join(eq) - ### return new neuron equations - return eq - - ### if equation type is dv/dt = ... --> get the right side of dv/dt=... - ### transform eq_v - ### remove whitespaces - ### remove tags and store them for later - ### TODO replace random distributions and mathematical expressions which may be on the left side - eq_v = eq_v.replace(" ", "") - eq_v = eq_v.replace("dv/dt", "delta_v") - eq_tags_list = eq_v.split(":") - eq_v = eq_tags_list[0] - if len(eq_tags_list) > 1: - tags = eq_tags_list[1] - else: - tags = None - - ### split the equation at "=" and move everything on one side (other side = 0) - eq_v_splitted = eq_v.split("=") - left_side = eq_v_splitted[0] - right_side = "right_side" - eq_v_one_side = f"{right_side}-({left_side})" - - ### prepare the sympy equation generation - attributes_name_list = self.neuron_model_attributes_dict[pop_name] - attributes_tuple = symbols(",".join(attributes_name_list)) - ### for each attribute of the neuron a sympy symbol - attributes_sympy_dict = { - key: attributes_tuple[attributes_name_list.index(key)] - for key in attributes_name_list - } - ### furhter create symbols for delta_v and right_side - attributes_sympy_dict["delta_v"] = Symbol("delta_v") - attributes_sympy_dict["right_side"] = Symbol("right_side") - - ### get the sympy equation expression by evaluating the string - eq_sympy = evaluate_expression_with_dict( - expression=eq_v_one_side, value_dict=attributes_sympy_dict - ) - - ### solve the equation to delta_v - result = solve(eq_sympy, attributes_sympy_dict["delta_v"], dict=True) - if len(result) != 1: - raise ValueError( - f"ERROR model_configurator create_net_single_voltage_clamp: Could not find solution for dv/dt for neuronmodel of population {pop_name}!" - ) - result = str(result[0][attributes_sympy_dict["delta_v"]]) - - ### replace right_side by the actual right side - result = result.replace("right_side", f"({eq_v_splitted[1]})") - - ### TODO replace mathematical expressions and random distributions back to previous - - ### now create the new equations for the ANNarchy neuron - ### create three lines, the voltage clamp line "dv/dt=0", - ### the obtained line which would be the right side of dv/dt, - ### and this right side sotred from the previous time step - ### v_clamp_rec should be an absolute value - eq_new_0 = f"v_clamp_rec_sign = {result}" - eq_new_1 = f"v_clamp_rec = fabs({result})" - eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" - ### add stored tags to new dv/dt equation - if not isinstance(tags, type(None)): - eq_new_3 = f"dv/dt=0 : {tags}" - else: - eq_new_3 = "dv/dt=0" - ### remove old v line and insert new three lines - del eq[line_is_v_list.index(True)] - eq.insert(line_is_v_list.index(True), eq_new_0) - eq.insert(line_is_v_list.index(True), eq_new_1) - eq.insert(line_is_v_list.index(True), eq_new_2) - eq.insert(line_is_v_list.index(True), eq_new_3) - eq = "\n".join(eq) - ### return new neuron equations - return eq - - def get_line_is_v(self, line: str): - """ - check if a equation string contains dv/dt or v= or v+= - """ - if "v" not in line: - return False - - ### remove whitespaces - line = line.replace(" ", "") - - ### check for dv/dt - if "dv/dt" in line: - return True - - ### check for v update - if ("v=" in line or "v+=" in line) and line.startswith("v"): - return True - - return False - - def get_line_is_g_ampa(self, line: str): - """ - check if a equation string contains dg_ampa/dt - """ - - ### remove whitespaces - line = line.replace(" ", "") - - ### check for dv/dt - if "dv/dt" in line: - return True - - ### check for v update - if ("v=" in line or "v+=" in line) and line.startswith("v"): - return True - - return False - - def get_init_neuron_variables_for_psp(self, net, pop, v_rest, I_app_hold): - """ - get the variables of the given population after simulating 2000 ms - - Args: - net: ANNarchy network - the network which contains the pop - - pop: ANNarchy population - the population whose variables are obtained - - """ - ### reset neuron and deactivate input and set v_rest - net.reset() - pop.v = v_rest - pop.I_app = I_app_hold - - ### get the variables of the neuron after 5000 ms - net.simulate(5000) - var_name_list = list(pop.variables) - var_arr = np.zeros((1, len(var_name_list))) - get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) - var_arr[0, :] = get_arr[:, 0] - - ### create a sampler with the one data sample - sampler = self.var_arr_sampler(var_arr, var_name_list) - return sampler - - class var_arr_sampler: - def __init__(self, var_arr, var_name_list) -> None: - self.var_arr_shape = var_arr.shape - self.is_const = ( - np.std(var_arr, axis=0) <= np.mean(np.absolute(var_arr), axis=0) / 1000 - ) - self.constant_arr = var_arr[0, self.is_const] - self.not_constant_val_arr = var_arr[:, np.logical_not(self.is_const)] - self.var_name_list = var_name_list - - def sample(self, n=1, seed=0): - """ - Args: - n: int, optional, default=1 - number of samples - - seed: int, optional, default=0 - seed for rng - """ - ### get random idx - rng = np.random.default_rng(seed=seed) - random_idx_arr = rng.integers(low=0, high=self.var_arr_shape[0], size=n) - ### sample with random idx - sample_arr = self.not_constant_val_arr[random_idx_arr] - ### create return array - ret_arr = np.zeros((n,) + self.var_arr_shape[1:]) - ### add samples to return array - ret_arr[:, np.logical_not(self.is_const)] = sample_arr - ### add constant values to return array - ret_arr[:, self.is_const] = self.constant_arr - - return ret_arr - - def get_nr_many_neurons(self, nr_neurons, nr_networks): - """ - Splits the number of neurons in almost equally sized parts. - - Args: - nr_neurons: int - number of neurons which should be splitted - - nr_networks: int - number of networks over which the neurons should be equally distributed - """ - return self.divide_almost_equal(number=nr_neurons, num_parts=nr_networks) - - def get_max_weight_dict_for_pop(self, pop_name): - """ - get the weight dict for a single population - - Args: - pop_name: str - population name - - return: dict - keys = afferent projection names, values = max weights - """ - - ### loop over afferent projections - max_w_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - ### find max weight for projection - max_weight_of_proj = self.get_max_weight_of_proj(proj_name=proj_name) - max_w_list.append(max_weight_of_proj) - self.afferent_projection_dict[pop_name]["max_weight"] = max_w_list - - ### remove weight key from self.afferent_projection_dict[pop_name] which was added during the process - self.afferent_projection_dict[pop_name].pop("weights") - - ### now create the dictionary structure for return - # { - # "ampa": {"projection_name": "max_weight value"...}, - # "gaba": {"projection_name": "max_weight value"...}, - # } - max_weight_dict_for_pop = {"ampa": {}, "gaba": {}} - ### loop over all afferent projections - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - proj_target_type = proj_dict["proj_target_type"] - proj_max_weight = proj_dict["proj_max_weight"] - ### add max weight of projection to the corresponding target type in the return dict - max_weight_dict_for_pop[proj_target_type][proj_name] = proj_max_weight - - return max_weight_dict_for_pop - - def get_proj_dict(self, proj_name): - """ - get a dictionary for a specified projection which contains following information: - post_pop_name - proj_target_type - idx_proj - spike_frequency - proj_weight - g_max - - Args: - proj_name: str - projection name - - return: - proj_dict: dict - keys see above - """ - ### get pre_pop_name - pre_pop_name = self.pre_pop_name_dict[proj_name] - ### get pre_pop_name - pre_pop_size = self.pre_pop_size_dict[proj_name] - ### get post_pop_name - post_pop_name = self.post_pop_name_dict[proj_name] - ### get idx_proj and proj_target_type - idx_proj = self.afferent_projection_dict[post_pop_name][ - "projection_names" - ].index(proj_name) - proj_target_type = self.afferent_projection_dict[post_pop_name]["target"][ - idx_proj - ] - ### get spike frequency - f_t = self.afferent_projection_dict[post_pop_name]["target firing rate"][ - idx_proj - ] - p = self.afferent_projection_dict[post_pop_name]["probability"][idx_proj] - s = self.afferent_projection_dict[post_pop_name]["size"][idx_proj] - spike_frequency = f_t * p * s - ### get weight - try: - proj_weight = self.afferent_projection_dict[post_pop_name]["weights"][ - idx_proj - ] - except: - proj_weight = None - ### g_max - try: - g_max = self.g_max_dict[post_pop_name][proj_target_type] - except: - g_max = None - ### get max weight - try: - proj_max_weight = self.afferent_projection_dict[post_pop_name][ - "max_weight" - ][idx_proj] - except: - proj_max_weight = None - - return { - "pre_pop_name": pre_pop_name, - "pre_pop_size": pre_pop_size, - "post_pop_name": post_pop_name, - "proj_target_type": proj_target_type, - "idx_proj": idx_proj, - "spike_frequency": spike_frequency, - "proj_weight": proj_weight, - "g_max": g_max, - "proj_max_weight": proj_max_weight, - "proj_prob": p, - } - - def get_max_weight_of_proj(self, proj_name): - """ - find the max weight of a specified projection using incremental_continuous_bound_search - increasing weights of projection increases conductance g of projection --> increase - until g_max is found - - Args: - proj_name: str - projection name - - return: - w_max: number - """ - ### log task - self.log(f"get w_max for {proj_name}") - - ### g_max for projection - proj_dict = self.get_proj_dict(proj_name) - g_max = proj_dict["g_max"] - - ### find max weight with incremental_continuous_bound_search - ### increase weights until g_max is reached - self.log("search w_max with y(X) = g(w=X)") - w_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_g_of_single_proj( - weight=X_val, - proj_name=proj_name, - ), - y_bound=g_max, - X_0=0, - y_0=0, - ) - - return w_max - - def get_g_of_single_proj(self, weight, proj_name): - """ - given a weight for a specified projection get the resulting conductance value g - in the target population - - Args: - weight: number - the weight of the projection - - proj_name: str - projection name - - return: - g_val: number - """ - ### get some projection infos - proj_dict = self.get_proj_dict(proj_name) - pop_name = proj_dict["post_pop_name"] - idx_proj = proj_dict["idx_proj"] - proj_target_type = proj_dict["proj_target_type"] - - ### set weights in the afferent_projection_dict - ### set all weights to zero except the weight of the current proj which is set to the given weight - weight_list = [0] * self.nr_afferent_proj_dict[pop_name] - weight_list[idx_proj] = weight - self.afferent_projection_dict[pop_name]["weights"] = weight_list - - ### get the g_ampa and g_gaba values based on the current afferent_projection_dict weights - mean_g = self.get_g_values_of_pop(pop_name) - - ### then return the conductance related to the specified projection - return mean_g[proj_target_type] - - def get_g_values_of_pop(self, pop_name): - """ - calculate the average g_ampa and g_gaba values of the specified population based on the weights - defined in the afferent_projection_dict - - Args: - pop_name: str - population name - """ - spike_times_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} - spike_weights_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} - ### loop over afferent projections - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - ### get projection infos - proj_dict = self.get_proj_dict(proj_name) - proj_weight = proj_dict["proj_weight"] - proj_target_type = proj_dict["proj_target_type"] - spike_frequency = proj_dict["spike_frequency"] - ### get spike times over the simulation duration for the spike frequency - if spike_frequency > 0: - spike_times_arr = self.get_spike_times_arr( - spike_frequency=spike_frequency - ) - else: - spike_times_arr = np.array([]) - ### get weights array - spike_weights_arr = np.ones(len(spike_times_arr)) * proj_weight - ### store spike times and weights for the target type of the projection - spike_times_dict[proj_target_type].append(spike_times_arr) - spike_weights_dict[proj_target_type].append(spike_weights_arr) - - mean_g = {} - for target_type in ["ampa", "gaba"]: - ### concatenate spike times and corresponding weights of different afferent projections - spike_times_arr = np.concatenate(spike_times_dict[target_type]) - spike_weights_arr = np.concatenate(spike_weights_dict[target_type]) - - ### sort the spike times and corresponding weights - sort_idx = np.argsort(spike_times_arr) - spike_times_arr = spike_times_arr[sort_idx] - spike_weights_arr = spike_weights_arr[sort_idx] - - ### calculate mean g values from the spike times and corresponding weights - mean_g[target_type] = self.get_mean_g( - spike_times_arr=spike_times_arr, - spike_weights_arr=spike_weights_arr, - tau=self.tau_dict[pop_name][target_type], - ) - - return mean_g - - def get_spike_times_arr(self, spike_frequency): - """ - get spike times for a given spike frequency - - Args: - spike_frequency: number - spike frequency in Hz - """ - expected_nr_spikes = int( - round((500 + self.simulation_dur) * (spike_frequency / 1000), 0) - ) - ### isi_arr in timesteps - isi_arr = poisson.rvs( - (1 / (spike_frequency * (dt() / 1000))), size=expected_nr_spikes - ) - ### convert to ms - isi_arr = isi_arr * dt() - - ### get spike times from isi_arr - spike_times_arr = np.cumsum(isi_arr) - - ### only use spikes which are in the simulation time - spike_times_arr = spike_times_arr[spike_times_arr < (self.simulation_dur + 500)] - - return spike_times_arr - - def get_mean_g(self, spike_times_arr, spike_weights_arr, tau): - """ - calculates the mean conductance g for given spike times, corresponding weights (increases of g) and time constant - - Args: - spike_times_arr: arr - 1d array containing spike times in ms - - spike_weights_arr: arr - 1d array containing the weights corresponding to the spike times - - tau: number - time constant of the exponential decay of the conductance g in ms - """ - ### TODO instead of calculating the mean, create a conductance trace for the simulation time - if np.sum(spike_weights_arr) > 0: - ### get inter spike interval array - isis_g_arr = np.diff(spike_times_arr) - ### calc mean g - mean_w = np.mean(spike_weights_arr) - mean_isi = np.mean(isis_g_arr) - mean_g = mean_w / ((1 / np.exp(-mean_isi / tau)) - 1) - else: - mean_g = 0 - - return mean_g - - -def get_rate_parallel( - idx, - net, - population: Population, - variable_init_sampler, - monitor: Monitor, - I_app_arr, - weight_list: list, - pre_pop_name_list: list, - rate_list: list, - eff_size_list: list, - simulation_dur: int, -): - """ - function to obtain the firing rates of the populations of - the network given with 'idx' for given I_app, g_ampa and g_gaba values - - Args: - idx: int - network index given by the parallel_run function - - net: object - network object given by the parallel_run function - - pop_name_list: list of str - list with population names of network - - population_list: list of ANNarchy Population object - list of population objets of magic network - - variable_init_sampler_list: list of sampler objects - for each population a sampler object with function .sample to get initial variable values - - monitor_list: list of ANNarchy Monitor objects - list of monitor objets of magic network recording spikes from the populations - - I_app_list: list of arrays - list containing for each population the array with input values for I_app - - g_ampa_list: list of arrays - list containing for each population the array with input values for g_ampa - - g_gaba_list: list of arrays - list containing for each population the array with input values for g_gaba - - simulation_dur: int - simulation duration - - return: - f_rec_arr_list: list of arrays - list containing for each population the array with the firing rates for the given inputs - """ - ### reset and set init values - net.reset() - ### sample init values, one could sample different values for multiple neurons - ### but here we sample a single sample and use it for all neurons - variable_init_arr = variable_init_sampler.sample(1, seed=0) - var_name_list = variable_init_sampler.var_name_list - variable_init_arr = np.array([variable_init_arr[0]] * len(population)) - for var_name in enumerate(population.variables): - if var_name in var_name_list: - set_val = variable_init_arr[:, var_name_list.index(var_name)] - setattr(population, var_name, set_val) - - ### set the weights and rates of the poisson spike traces of the afferent populations - for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): - setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) - setattr( - population, - f"{pre_pop_name}_spike_prob", - (rate_list[pre_pop_idx] / 1000) * dt(), - ) - setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) - - ### set the I_app - population.I_app = I_app_arr - - ### simulate 500 ms initial duration + X ms - if "stn" in population.name and False: - net.simulate(500) - time_arr = np.arange(500, 500 + simulation_dur, dt()) - cor_spike_train_list = [] - gpe_spike_train_list = [] - g_ampa_list = [] - g_gaba_list = [] - I_app_list = [] - for time_ms in time_arr: - net.simulate(dt()) - if "cor_spike_train" in population.attributes: - cor_spike_train_list.append(population.cor_spike_train[0]) - else: - cor_spike_train_list.append(0) - if "gpe_spike_train" in population.attributes: - gpe_spike_train_list.append(population.gpe_spike_train[0]) - else: - gpe_spike_train_list.append(0) - g_ampa_list.append(population.g_ampa[0]) - g_gaba_list.append(population.g_gaba[0]) - I_app_list.append(population.I_app[0]) - plt.figure(figsize=(6.4, 4.8 * 2)) - plt.subplot(211) - plt.ylabel("g_ampa") - plt.plot(time_arr, g_ampa_list, "k.") - plt.subplot(212) - plt.ylabel("g_gaba") - plt.plot(time_arr, g_gaba_list, "k.") - plt.tight_layout() - plt.savefig("stn_input_configurator.png", dpi=300) - plt.close("all") - else: - net.simulate(500 + simulation_dur) - - ### get rate for the last X ms - spike_dict = monitor.get("spike") - f_arr = np.zeros(len(population)) - for idx_n, n in enumerate(spike_dict.keys()): - time_list = np.array(spike_dict[n]) - nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) - rate = nbr_spks / (simulation_dur / 1000) - f_arr[idx_n] = rate - return f_arr - - -_p_g_1 = """First call get_max_syn. -This determines max synaptic conductances and weights of all afferent projections of the model populations and returns a dictionary with max weights.""" - -_p_g_after_get_weights = ( - lambda template_weight_dict, template_synaptic_load_dict, template_synaptic_contribution_dict: f"""Now either set the weights of all projections directly or first set the synaptic load of the populations and the synaptic contributions of the afferent projections. -You can set the weights using the function .set_weights() which requires a weight_dict as argument. -Use this template for the weight_dict: - -{template_weight_dict} - -The values within the template are the maximum weight values. - - -You can set the synaptic load and contribution using the function .set_syn_load() which requires a synaptic_load_dict or a single number between 0 and 1 for the synaptic load of the populations and a synaptic_contribution_dict for the synaptic contributions to the synaptic load of the afferent projections. -Use this template for the synaptic_load_dict: - -{template_synaptic_load_dict} - -'ampa_load' and 'gaba_load' are placeholders, replace them with values between 0 and 1. - -Use this template for the synaptic_contribution_dict: - -{template_synaptic_contribution_dict} - -The shown contributions of the afferent projections are based on the assumption that the maximum weights are used. The contributions of all afferent projections of a single population have to sum up to 1! -""" -) -_p_g_after_set_syn_load = """Synaptic loads and contributions, i.e. weights set. Now call .get_base to obtain the baseline currents for the model populations. With .set_base you can directly set these baselines and the current weights in the model and compile the model. -""" +class GetWeightTemplates: + def __init__(self): + pass diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp_old.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp_old.py new file mode 100644 index 0000000..4d472a1 --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp_old.py @@ -0,0 +1,4020 @@ +from CompNeuroPy import ( + cnp_clear, + compile_in_folder, + data_obj, + evaluate_expression_with_dict, + timing_decorator, + print_df, + save_variables, + load_variables, + clear_dir, + CompNeuroModel, + CompNeuroMonitors, + PlotRecordings, +) +from CompNeuroPy.system_functions import _find_folder_with_prefix +from CompNeuroPy.neuron_models import poisson_neuron +from ANNarchy import ( + Population, + get_population, + Monitor, + Network, + get_projection, + dt, + parallel_run, + simulate, + reset, + Neuron, + simulate_until, + Uniform, + get_current_step, + projections, + populations, +) + +# from ANNarchy.core.Global import _network +import numpy as np +from scipy.interpolate import interp1d, interpn +from scipy.signal import find_peaks, argrelmin +import matplotlib.pyplot as plt +import inspect +import textwrap +import os +import itertools +from tqdm import tqdm +import multiprocessing +import importlib.util +from time import time, strftime +import datetime +from sympy import symbols, Symbol, sympify, solve +from hyperopt import fmin, tpe, hp +import pandas as pd +from scipy.stats import poisson +from ANNarchy.extensions.bold import BoldMonitor +from sklearn.linear_model import LinearRegression +from CompNeuroPy.examples.model_configurator.reduce_model import _CreateReducedModel + + +class model_configurator: + def __init__( + self, + model: CompNeuroModel, + target_firing_rate_dict, + interpolation_grid_points=10, + max_psp=10, + do_not_config_list=[], + print_guide=False, + I_app_variable="I_app", + ) -> None: + """ + Args: + model: CompNeuroPy generate_model object + it's not important if the model is created or compiled but after running + the model_configurator only the given model will exist so do not create + something else in ANNarchy! + + target_firing_rate_dict: dict + keys = population names of model which should be configured, values = target firing rates in Hz + + interpolation_grid_points: int, optional, default=10 + how many points should be used for the interpolation of the f-I-g curve on a single axis + + max_psp: int, optional, default=10 + maximum post synaptic potential in mV + + do_not_config_list: list, optional, default=[] + list with strings containing population names of populations which should not be configured + + print_guide: bool, optional, default=False + if you want to get information about what you could do with model_configurator + + I_app_variable: str, optional, default="I_app" + the name of the varaible in the populations which represents the applied current + TODO: not implemented yet, default value is always used + + Functions: + get_max_syn: + returns a dictionary with weight ranges for all afferent projections of the configured populations + """ + self.model = model + self.target_firing_rate_dict = target_firing_rate_dict + self.pop_name_list = list(target_firing_rate_dict.keys()) + for do_not_pop_name in do_not_config_list: + self.pop_name_list.remove(do_not_pop_name) + self.I_app_max_dict = {pop_name: None for pop_name in self.pop_name_list} + self.g_max_dict = {pop_name: None for pop_name in self.pop_name_list} + self.tau_dict = {pop_name: None for pop_name in self.pop_name_list} + self.nr_afferent_proj_dict = {pop_name: None for pop_name in self.pop_name_list} + self.net_many_dict = {pop_name: None for pop_name in self.pop_name_list} + self.net_single_dict = {pop_name: None for pop_name in self.pop_name_list} + self.net_single_v_clamp_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.max_weight_dict = {pop_name: None for pop_name in self.pop_name_list} + self.variable_init_sampler_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.f_I_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} + self.I_f_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} + self.afferent_projection_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.neuron_model_dict = {pop_name: None for pop_name in self.pop_name_list} + self.neuron_model_parameters_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.neuron_model_attributes_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.max_psp_dict = {pop_name: None for pop_name in self.pop_name_list} + self.possible_rates_dict = {pop_name: None for pop_name in self.pop_name_list} + self.extreme_firing_rates_df_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.prepare_psp_dict = {pop_name: None for pop_name in self.pop_name_list} + ### set max psp for a single spike + self.max_psp_dict = {pop_name: max_psp for pop_name in self.pop_name_list} + ### print things + self.log_exist = False + self.caller_name = "" + self.log("model configurator log:") + self.print_guide = print_guide + ### simulation things + self.simulation_dur = 5000 + self.simulation_dur_estimate_time = 50 + self.nr_neurons_per_net = 100 + + ### do things for which the model needs to be created (it will not be available later) + self.analyze_model() + + ### get reduced model + self.model_reduced = _CreateReducedModel( + model=self.model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, + ).model_reduced + + ### print guide + self._p_g(_p_g_1) + + def analyze_model(self): + """ + prepares the creation of the single neuron and many neuron networks + """ + + ### clear ANNarchy and create the model + cnp_clear() + self.model.create(do_compile=False) + + ### get the neuron models from the model + for pop_name in self.pop_name_list: + self.neuron_model_dict[pop_name] = get_population(pop_name).neuron_type + self.neuron_model_parameters_dict[pop_name] = get_population( + pop_name + ).init.items() + self.neuron_model_attributes_dict[pop_name] = get_population( + pop_name + ).attributes + + ### do further things for which the model needs to be created + ### get the afferent projection dict for the populations (model needed!) + for pop_name in self.pop_name_list: + ### get afferent projection dict + self.log(f"get the afferent_projection_dict for {pop_name}") + self.afferent_projection_dict[pop_name] = self.get_afferent_projection_dict( + pop_name=pop_name + ) + + ### create dictionary with timeconstants of g_ampa and g_gaba of the populations + for pop_name in self.pop_name_list: + self.tau_dict[pop_name] = { + "ampa": get_population(pop_name).tau_ampa, + "gaba": get_population(pop_name).tau_gaba, + } + + ### get the post_pop_name_dict + self.post_pop_name_dict = {} + for proj_name in self.model.projections: + self.post_pop_name_dict[proj_name] = get_projection(proj_name).post.name + + ### get the pre_pop_name_dict + self.pre_pop_name_dict = {} + for proj_name in self.model.projections: + self.pre_pop_name_dict[proj_name] = get_projection(proj_name).pre.name + + ### get the pre_pop_size_dict + self.pre_pop_size_dict = {} + for proj_name in self.model.projections: + self.pre_pop_size_dict[proj_name] = get_projection(proj_name).pre.size + + ### clear ANNarchy --> the model is not available anymore + cnp_clear() + + def get_afferent_projection_dict(self, pop_name): + """ + creates a dictionary containing + projection_names + target firing rate + probability + size + target + for each afferent projection (=first level of keys) of the specified population + + Args: + pop_name: str + populaiton name + + return: dict of dicts + """ + ### check if model is available + if not self.model.created: + error_msg = "ERROR model_configurator get_afferent_projection_dict: the model has to be created!" + self.log(error_msg) + raise AssertionError(error_msg) + ### get projection names + afferent_projection_dict = {} + afferent_projection_dict["projection_names"] = [] + for projection in self.model.projections: + if get_projection(projection).post.name == pop_name: + afferent_projection_dict["projection_names"].append(projection) + + self.nr_afferent_proj_dict[pop_name] = len( + afferent_projection_dict["projection_names"] + ) + + ### get target firing rates resting-state for afferent projections + afferent_projection_dict["target firing rate"] = [] + afferent_projection_dict["probability"] = [] + afferent_projection_dict["size"] = [] + afferent_projection_dict["target"] = [] + for projection in afferent_projection_dict["projection_names"]: + pre_pop_name = get_projection(projection).pre.name + ### target firing rate + afferent_projection_dict["target firing rate"].append( + self.target_firing_rate_dict[pre_pop_name] + ) + ### probability, _connection_args only if connect_fixed_prob (i.e. connector_name==Random) + afferent_projection_dict["probability"].append( + get_projection(projection)._connection_args[0] + ) + ### size + afferent_projection_dict["size"].append(len(get_projection(projection).pre)) + ### target type + afferent_projection_dict["target"].append(get_projection(projection).target) + + return afferent_projection_dict + + def get_max_syn(self, cache=True, clear=False): + """ + get the weight dictionary for all populations given in target_firing_rate_dict + keys = population names, values = dict which contain values = afferent projection names, values = lists with w_min and w_max + """ + ### clear cache to create new cache + if cache and clear: + self.log("clear cache of get_max_syn") + clear_dir("./.model_configurator_cache/get_max_syn") + + ### check cache for get_max_syn + cache_worked = False + if cache: + try: + loaded_variables_dict = load_variables( + name_list=[ + "net_single_dict", + "prepare_psp_dict", + "I_app_max_dict", + "g_max_dict", + "syn_contr_dict", + "syn_load_dict", + ], + path="./.model_configurator_cache/get_max_syn", + ) + ( + self.net_single_dict, + self.prepare_psp_dict, + self.I_app_max_dict, + self.g_max_dict, + self.syn_contr_dict, + self.syn_load_dict, + ) = loaded_variables_dict.values() + ### create dummy network for single network and actually create network for single_v_clamp (single_v_clamp needed in get_base) + self.create_single_neuron_networks( + single_net=False, single_net_v_clamp=True, prepare_psp=False + ) + cache_worked = True + except: + cache_worked = False + + if not cache_worked: + ### create single neuron networks + self.create_single_neuron_networks() + + ### get max synaptic things with single neuron networks + for pop_name in self.pop_name_list: + self.log(pop_name) + ### get max I_app and max weights (i.e. g_ampa, g_gaba) + txt = f"get max I_app, g_ampa and g_gaba using network_single for {pop_name}" + print(txt) + self.log(txt) + I_app_max, g_ampa_max, g_gaba_max = self.get_max_syn_currents( + pop_name=pop_name, + ) + + self.I_app_max_dict[pop_name] = I_app_max + self.g_max_dict[pop_name] = { + "ampa": g_ampa_max, + "gaba": g_gaba_max, + } + + ### obtain the synaptic contributions assuming max weights + self.syn_contr_dict = {} + for pop_name in self.pop_name_list: + self.syn_contr_dict[pop_name] = {} + for target_type in ["ampa", "gaba"]: + self.log(f"get synaptic contributions for {pop_name} {target_type}") + self.syn_contr_dict[pop_name][target_type] = ( + self.get_syn_contr_dict( + pop_name=pop_name, + target_type=target_type, + use_max_weights=True, + normalize=True, + ) + ) + + ### create the synaptic load template dict + self.syn_load_dict = {} + for pop_name in self.pop_name_list: + self.syn_load_dict[pop_name] = [] + if "ampa" in self.afferent_projection_dict[pop_name]["target"]: + self.syn_load_dict[pop_name].append("ampa_load") + if "gaba" in self.afferent_projection_dict[pop_name]["target"]: + self.syn_load_dict[pop_name].append("gaba_load") + + ### save variables in cache + ### obtain variables which should be cached / are needed later + ### do not cache ANNarchy objects + net_single_dict_to_cache = {} + for key, val in self.net_single_dict.items(): + net_single_dict_to_cache[key] = { + "variable_init_sampler": val["variable_init_sampler"] + } + save_variables( + variable_list=[ + net_single_dict_to_cache, + self.prepare_psp_dict, + self.I_app_max_dict, + self.g_max_dict, + self.syn_contr_dict, + self.syn_load_dict, + ], + name_list=[ + "net_single_dict", + "prepare_psp_dict", + "I_app_max_dict", + "g_max_dict", + "syn_contr_dict", + "syn_load_dict", + ], + path="./.model_configurator_cache/get_max_syn", + ) + + ### only return synaptic contributions smaller 1 + template_synaptic_contribution_dict = ( + self.get_template_synaptic_contribution_dict(given_dict=self.syn_contr_dict) + ) + + self._p_g( + _p_g_after_get_weights( + template_weight_dict=self.g_max_dict, + template_synaptic_load_dict=self.syn_load_dict, + template_synaptic_contribution_dict=template_synaptic_contribution_dict, + ) + ) + return self.max_weight_dict + + def get_syn_contr_dict( + self, pop_name: str, target_type: str, use_max_weights=False, normalize=False + ) -> dict: + """ + get the relative synaptic contribution list of a population for a given target type + weights are obtained from the afferent_projection_dict, if there are no weights --> use max weights + + Args: + pop_name: str + population name + + target_type: str + target type of the afferent projections of the population + + use_max_weights: bool, optional, default=False + if True the max weights are used, if False the weights from the afferent_projection_dict are used + + Returns: + rel_syn_contr_dict: dict + keys = projection names, values = relative synaptic contributions + """ + ### g_max have to be obtained already + assert not ( + isinstance(self.g_max_dict[pop_name][target_type], type(None)) + ), "ERROR, get_rel_syn_contr_list: g_max have to be obtained already" + ### get list of relative synaptic contributions + proj_name_list = [] + rel_syn_contr_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + proj_target_type = proj_dict["proj_target_type"] + weight = proj_dict["proj_weight"] + if isinstance(weight, type(None)) or use_max_weights: + weight = self.g_max_dict[pop_name][target_type] + if proj_target_type == target_type: + rel_syn_contr_list.append(proj_dict["spike_frequency"] * weight) + proj_name_list.append(proj_name) + ### normalize the list + if normalize: + rel_syn_contr_arr = np.array(rel_syn_contr_list) + rel_syn_contr_arr = rel_syn_contr_arr / np.sum(rel_syn_contr_arr) + rel_syn_contr_list = rel_syn_contr_arr.tolist() + ### combine proj_name_list and rel_syn_contr_list to an dict + rel_syn_contr_dict = { + proj_name: rel_syn_contr + for proj_name, rel_syn_contr in zip(proj_name_list, rel_syn_contr_list) + } + + return rel_syn_contr_dict + + def create_single_neuron_networks( + self, single_net=True, single_net_v_clamp=True, prepare_psp=True + ): + ### clear ANNarchy + cnp_clear() + + ### create the single neuron networks + for pop_name in self.pop_name_list: + txt = f"create network_single for {pop_name}" + print(txt) + self.log(txt) + + ### the network with the standard neuron + if single_net: + self.net_single_dict[pop_name] = self.create_net_single( + pop_name=pop_name + ) + else: + ### dummy network for the pop + net_single_dummy = Network() + pop_single_dummy = Population( + 1, + neuron=Neuron(equations="r=1"), + name=f"dummy_single_{pop_name}", + ) + mon_single_dummy = Monitor(pop_single_dummy, ["r"]) + net_single_dummy.add([pop_single_dummy, mon_single_dummy]) + + ### the network with the voltage clamp version neuron + if single_net_v_clamp: + self.net_single_v_clamp_dict[pop_name] = ( + self.create_net_single_voltage_clamp(pop_name=pop_name) + ) + else: + ### dummy network for the pop + net_single_v_clamp_dummy = Network() + pop_single_v_clamp_dummy = Population( + 1, + neuron=Neuron(equations="r=1"), + name=f"dummy_single_v_clamp_{pop_name}", + ) + mon_single_v_clamp_dummy = Monitor(pop_single_v_clamp_dummy, ["r"]) + net_single_v_clamp_dummy.add( + [pop_single_v_clamp_dummy, mon_single_v_clamp_dummy] + ) + + ### get v_rest and correspodning I_app_hold + if prepare_psp: + self.prepare_psp_dict[pop_name] = self.find_v_rest_for_psp( + pop_name, do_plot=False + ) + + def create_net_single(self, pop_name): + """ + creates a network with the neuron type of the population given by pop_name + the number of neurons is 1 + + Args: + pop_name: str + population name + """ + + ### for stop condition for recording psp --> add v_before_psp and v_psp_thresh to equations/parameters + + ### get the initial arguments of the neuron + neuron_model = self.neuron_model_dict[pop_name] + ### names of arguments + init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) + init_arguments_name_list.remove("self") + init_arguments_name_list.remove("name") + init_arguments_name_list.remove("description") + ### arguments dict + init_arguments_dict = { + init_arguments_name: getattr(neuron_model, init_arguments_name) + for init_arguments_name in init_arguments_name_list + } + ### add v_before_psp=v at the beginning of the equations + equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() + equations_line_split_list.insert(0, "v_before_psp = v") + init_arguments_dict["equations"] = "\n".join(equations_line_split_list) + ### add v_psp_thresh to the parameters + parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() + parameters_line_split_list.append("v_psp_thresh = 0 : population") + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + + ### create neuron model with new equations + neuron_model_new = Neuron(**init_arguments_dict) + + ### create the single neuron population + single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_{pop_name}", + stop_condition=f"((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", + ) + ### set the attributes of the neuron + for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: + setattr(single_neuron, attr_name, attr_val) + + ### create Monitor for single neuron + mon_single = Monitor(single_neuron, ["spike", "v"]) + + ### create network with single neuron + net_single = Network() + net_single.add([single_neuron, mon_single]) + compile_in_folder( + folder_name=f"single_net_{pop_name}", silent=True, net=net_single + ) + + ### get the values of the variables after 2000 ms simulation + variable_init_sampler = self.get_init_neuron_variables( + net_single, net_single.get(single_neuron) + ) + + ### network dict + net_single_dict = { + "net": net_single, + "population": net_single.get(single_neuron), + "monitor": net_single.get(mon_single), + "variable_init_sampler": variable_init_sampler, + } + + return net_single_dict + + def get_init_neuron_variables(self, net, pop): + """ + get the variables of the given population after simulating 2000 ms + + Args: + net: ANNarchy network + the network which contains the pop + + pop: ANNarchy population + the population whose variables are obtained + + """ + ### reset neuron and deactivate input + net.reset() + pop.I_app = 0 + + ### 10000 ms init duration + net.simulate(10000) + + ### simulate 2000 ms and check every dt the variables of the neuron + time_steps = int(2000 / dt()) + var_name_list = list(pop.variables) + var_arr = np.zeros((time_steps, len(var_name_list))) + for time_idx in range(time_steps): + net.simulate(dt()) + get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) + var_arr[time_idx, :] = get_arr[:, 0] + net.reset() + + ### create a sampler with the data samples of from the 1000 ms simulation + sampler = self.var_arr_sampler(var_arr, var_name_list) + return sampler + + def create_net_single_voltage_clamp(self, pop_name): + """ + creates a network with the neuron type of the population given by pop_name + the number of neurons is 1 + + The equation wich defines the chagne of v is set to zero and teh change of v + is stored in the new variable v_clamp_rec + + Args: + pop_name: str + population name + """ + + ### get the initial arguments of the neuron + neuron_model = self.neuron_model_dict[pop_name] + ### names of arguments + init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) + init_arguments_name_list.remove("self") + init_arguments_name_list.remove("name") + init_arguments_name_list.remove("description") + ### arguments dict + init_arguments_dict = { + init_arguments_name: getattr(neuron_model, init_arguments_name) + for init_arguments_name in init_arguments_name_list + } + ### get new equations for voltage clamp + equations_new = self.get_voltage_clamp_equations(init_arguments_dict, pop_name) + init_arguments_dict["equations"] = equations_new + ### add v_clamp_rec_thresh to the parameters + parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() + parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + + ### for each afferent population create a binomial spike train equation string + ### add it to the equations + ### and add the related parameters to the parameters + + ### get the afferent populations + afferent_population_list = [] + proj_target_type_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_name = proj_dict["pre_pop_name"] + afferent_population_list.append(pre_pop_name) + proj_target_type_list.append(proj_dict["proj_target_type"]) + + ### split the equations and parameters string + equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() + + parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() + + ### add the binomial spike train equations and parameters + ( + equations_line_split_list, + parameters_line_split_list, + ) = self.add_binomial_input( + equations_line_split_list, + parameters_line_split_list, + afferent_population_list, + proj_target_type_list, + ) + + ### combine string lines to multiline strings again + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + init_arguments_dict["equations"] = "\n".join(equations_line_split_list) + + ### create neuron model with new equations + neuron_model_new = Neuron(**init_arguments_dict) + + ### create the single neuron population + single_neuron_v_clamp = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_v_clamp_{pop_name}", + ) + + ### set the attributes of the neuron + for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: + setattr(single_neuron_v_clamp, attr_name, attr_val) + + ### create Monitor for single neuron + mon_single = Monitor(single_neuron_v_clamp, ["v_clamp_rec_sign"]) + + ### create network with single neuron + net_single = Network() + net_single.add([single_neuron_v_clamp, mon_single]) + compile_in_folder( + folder_name=f"single_v_clamp_net_{pop_name}", silent=True, net=net_single + ) + + ### network dict + net_single_dict = { + "net": net_single, + "population": net_single.get(single_neuron_v_clamp), + "monitor": net_single.get(mon_single), + } + + return net_single_dict + + def find_v_rest_for_psp(self, pop_name, do_plot=False): + """ + using both single networks to find v_rest and I_app_hold + """ + + ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can only be >= 0) + self.log("search v_rest with y(X) = delta_v_2000(v=X) using grid search") + v_arr = np.linspace(-90, -20, 200) + v_clamp_arr = np.array( + [ + self.get_v_clamp_2000( + v=X_val, + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + ) + for X_val in v_arr + ] + ) + v_rest = np.min(v_arr[argrelmin(v_clamp_arr)[0]]) + if do_plot: + plt.figure() + plt.plot(v_arr, v_clamp_arr) + plt.axvline(v_rest, color="k") + plt.axhline(0, color="k", ls="dashed") + plt.savefig(f"v_clamp_{pop_name}.png") + plt.close("all") + + ### do again the simulation with the obtained v_rest to get the stady state values + detla_v_rest = ( + self.get_v_clamp_2000( + v=v_rest, + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + ) + * dt() + ) + obtained_variables = { + var_name: getattr( + self.net_single_v_clamp_dict[pop_name]["population"], var_name + ) + for var_name in self.net_single_v_clamp_dict[pop_name][ + "population" + ].variables + } + self.log( + f"for {pop_name} found v_rest={v_rest} with delta_v_2000(v=v_rest)={detla_v_rest}" + ) + + ### check if the neuron stays at v_rest with normal neuron + ### if it stays --> use new value as v_rest (its even a bit finer as before) + ### if it not stays --> find I_app which holds the membrane potential constant + v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables) + v_rest_arr_is_const = ( + np.std(v_rest_arr, axis=0) + <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 + ) + if v_rest_arr_is_const: + ### v_rest found, no I_app_hold needed + v_rest = v_rest_arr[-1] + I_app_hold = 0 + self.log(f"final v_rest = {v_rest_arr[-1]}") + else: + ### there is no v_rest i.e. neuron is self-active --> find smallest negative I_app to silence neuron + self.log( + "neuron seems to be self-active --> find smallest I_app to silence the neuron" + ) + + ### negative current initially reduces v + ### then v climbs back up + ### check if the second half of v is constant if yes fine if not increase negative I_app + ### find I_app_hold with incremental_continuous_bound_search + self.log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") + I_app_hold = -self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_v_rest_arr_const( + pop_name=pop_name, + obtained_variables=obtained_variables, + I_app=-X_val, + ), + y_bound=0, + X_0=0, + y_0=self.get_v_rest_arr_const( + pop_name=pop_name, + obtained_variables=obtained_variables, + I_app=0, + ), + X_increase=detla_v_rest, + accept_non_dicontinuity=True, + bound_type="greater", + ) + ### again simulate the neuron with the obtained I_app_hold to get the new v_rest + v_rest_arr = self.get_new_v_rest_2000( + pop_name, obtained_variables, I_app=I_app_hold + ) + v_rest = v_rest_arr[-1] + self.log(f"I_app_hold = {I_app_hold}, resulting v_rest = {v_rest}") + + ### get the sampler for the initial variables + variable_init_sampler = self.get_init_neuron_variables_for_psp( + net=self.net_single_dict[pop_name]["net"], + pop=self.net_single_dict[pop_name]["population"], + v_rest=v_rest, + I_app_hold=I_app_hold, + ) + + return { + "v_rest": v_rest, + "I_app_hold": I_app_hold, + "variable_init_sampler": variable_init_sampler, + } + + def get_v_rest_arr_const( + self, pop_name, obtained_variables, I_app, return_bool=False + ): + """ + sets I_app and obtained varaibles in single neuron + simulates 2000 ms and returns how much the v changes + 0 = constant, negative = not constant + """ + v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables, I_app=I_app) + v_rest_arr = v_rest_arr[len(v_rest_arr) // 2 :] + + if return_bool: + return 0 <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( + v_rest_arr, axis=0 + ) + else: + return np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( + v_rest_arr, axis=0 + ) + + def get_new_v_rest_2000( + self, pop_name, obtained_variables, I_app=None, do_plot=True + ): + """ + use single_net to simulate 2000 ms and return v + """ + net: Network = self.net_single_dict[pop_name]["net"] + pop = self.net_single_dict[pop_name]["population"] + monitor = self.net_single_dict[pop_name]["monitor"] + net.reset() + ### set variables + for var_name, var_val in obtained_variables.items(): + if var_name in pop.variables: + setattr(pop, var_name, var_val) + if not isinstance(I_app, type(None)): + pop.I_app = I_app + ### simulate + net.simulate(2000) + v_arr = monitor.get("v")[:, 0] + + if do_plot: + plt.figure() + plt.title(f"{pop.I_app}") + plt.plot(v_arr) + plt.savefig(f"tmp_v_rest_{pop_name}.png") + plt.close("all") + + return v_arr + + def get_nr_spikes_from_v_rest_2000( + self, pop_name, obtained_variables, I_app=None, do_plot=True + ): + """ + use single_net to simulate 2000 ms and return number spikes + """ + net = self.net_single_dict[pop_name]["net"] + pop = self.net_single_dict[pop_name]["population"] + mon = self.net_single_dict[pop_name]["monitor"] + net.reset() + ### set variables + for var_name, var_val in obtained_variables.items(): + if var_name in pop.variables: + setattr(pop, var_name, var_val) + if not isinstance(I_app, type(None)): + pop.I_app = I_app + ### simulate + simulate(2000) + ### get spikes + spike_dict = mon.get("spike") + nr_spikes = len(spike_dict[0]) + return nr_spikes + + def log(self, txt): + caller_frame = inspect.currentframe().f_back + caller_name = caller_frame.f_code.co_name + + if caller_name == self.caller_name: + txt = f"{textwrap.indent(str(txt), ' ')}" + else: + txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" + + self.caller_name = caller_name + + if self.log_exist: + with open("model_conf_log", "a") as f: + print(txt, file=f) + else: + with open("model_conf_log", "w") as f: + print(txt, file=f) + self.log_exist = True + + def _p_g(self, txt): + """ + prints guiding text + """ + print_width = min([os.get_terminal_size().columns, 80]) + + if self.print_guide: + print("\n[model_configurator guide]:") + for line in txt.splitlines(): + wrapped_text = textwrap.fill( + line, width=print_width - 5, replace_whitespace=False + ) + wrapped_text = textwrap.indent(wrapped_text, " |") + print(wrapped_text) + print("") + + def _p_w(self, txt): + """ + prints warning + """ + print_width = min([os.get_terminal_size().columns, 80]) + + print("\n[model_configurator WARNING]:") + for line in str(txt).splitlines(): + wrapped_text = textwrap.fill( + line, width=print_width - 5, replace_whitespace=False + ) + wrapped_text = textwrap.indent(wrapped_text, " |") + print(wrapped_text) + print("") + + def get_base(self): + """ + Obtain the baseline currents for the configured populations to obtian the target firing rates + with the currently set weights, set by .set_weights or .set_syn_load + + return: + I_base_dict, dict + Dictionary with baseline curretns for all configured populations. + """ + ### TODO: current problem: model is without noise... but how large and for what is noise??? + ### neurons all behave equally (e.g. spike at same time), this changes due to different inputs ("noise" in input) + ### this could also be prevented by initializing all neurons differently (along there periodic u-v curve) + ### or by adding noise to conductances or baseline current + ### thenthe question is, how is the relation between added noise and the noise in the input + ### TODO: I've decided for noise depending on the input current (scaled by specified SNR) + ### without input there is no noise, decorrelate neurons by random initial values + ### TODO: current idea is: to find max syn things the noise has to be deactivated and to find baseline currents the noise has to be activated + ### so single neuron networks should be without noise, an then here noise should be activated, maybe requirement for model conf will be a variable called noise to turn on and off noise + for pop_name in self.pop_name_list: + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + proj_dict = self.get_proj_dict(proj_name) + print(f"set weight of {proj_name} to {proj_dict['proj_weight']}") + + ### set the weights of the normal model + model = self._set_weights_of_model(mode=0) + + ### set initial variables of populations (do not initialize all neurons the same) + for pop_name in self.pop_name_list: + population = get_population(pop_name) + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + + ### record and simulate + mon_dict = {pop_name: ["spike"] for pop_name in model.populations} + mon = CompNeuroMonitors(mon_dict=mon_dict) + mon.start() + simulate(1000) + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + plan = { + "position": list(range(1, len(model.populations) + 1)), + "compartment": model.populations, + "variable": ["spike"] * len(model.populations), + "format": ["hybrid"] * len(model.populations), + } + PlotRecordings( + figname="model_conf_normal_model.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(plan["position"]), 1), + plan=plan, + ) + + ### set the weights of the reduced model + model = self._set_weights_of_model(mode=1) + + ### set initial variables of populations (do not initialize all neurons the same) + for pop_name in self.pop_name_list: + population = get_population(f"{pop_name}_reduced") + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + + ### record and simulate + mon_dict = {f"{pop_name}_reduced": ["spike"] for pop_name in mon_dict.keys()} + mon = CompNeuroMonitors(mon_dict=mon_dict) + mon.start() + simulate(1000) + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + plan["compartment"] = [ + f"{pop_name}_reduced" for pop_name in plan["compartment"] + ] + PlotRecordings( + figname="model_conf_reduced_model.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(plan["position"]), 1), + plan=plan, + ) + ### next check if populations which should not be tuned have the correct firing rates, if not warning that the populations are tuned but if the rate of the not tuned populations changes this might also change the tuned populations' rates + ### next activate noise and then performe search algorithm ith reduced model with input varaibles = I_app of populations and output variables = firing rates of populations + ### TODO get base with reduced model + quit() + + def _set_weights_of_model(self, mode=0): + """ + Set the weights of the model to the current weights from the + afferent_projection_dict. + """ + ### clear ANNarchy + cnp_clear() + + ### create the original model + if mode == 0: + model = self.model + elif mode == 1: + model = self.model_reduced + model.create() + + for pop_name in self.pop_name_list: + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + if mode == 0: + ### set weght of projection + proj_dict = self.get_proj_dict(proj_name) + get_projection(proj_name).w = proj_dict["proj_weight"] + elif mode == 1: + ### set weight of the projection in the conductance-calculating + ### input current population + proj_dict = self.get_proj_dict(proj_name) + proj_weight = proj_dict["proj_weight"] + proj_target_type = proj_dict["proj_target_type"] + setattr( + get_population(f"{pop_name}_{proj_target_type}_aux"), + f"weights_{proj_name}", + proj_weight, + ) + return model + + def find_base_current(self, net_many_dict): + """ + search through whole I_app space + for each population simulate a network with 10000 neurons, each neuron has a different I_app value + g_ampa and g_gaba values are internally created using + the weigths stored in the afferent_projection dict + and target firing rates stored in the target_firing_rate_dict + """ + + I_app_arr_list = [] + weight_list_list = [] + pre_pop_name_list_list = [] + rate_list_list = [] + eff_size_list_list = [] + ### get lists which define the current weights to the afferent populations + ### get lists which define the current rates of the afferent populations + ### get lists with the names of the afferent populations + ### the length of the lists has to be the number of networks i.e. the number of populations + for pop_name in self.pop_name_list: + ### get the weights, names, rates of the afferent populations + weight_list = self.afferent_projection_dict[pop_name]["weights"] + proj_name_list = self.afferent_projection_dict[pop_name]["projection_names"] + pre_pop_name_list = [ + self.get_proj_dict(proj_name)["pre_pop_name"] + for proj_name in proj_name_list + ] + rate_list = self.get_rate_list_for_pop(pop_name) + eff_size_list = self.get_eff_size_list_for_pop(pop_name) + ### get correct magnitude of I_app using the voltage clamp networks + I_app_magnitude = self.get_I_app_magnitude( + pop_name, + pre_pop_name_list=pre_pop_name_list, + eff_size_list=eff_size_list, + rate_list=rate_list, + weight_list=weight_list, + ) + ### get the I_app_arr + I_app_arr = np.linspace( + I_app_magnitude, + I_app_magnitude + self.I_app_max_dict[pop_name], + self.nr_neurons_per_net, + ) + ### append these lists to the list for all post populations i.e. networks + weight_list_list.append(weight_list) + pre_pop_name_list_list.append(pre_pop_name_list) + rate_list_list.append(rate_list) + eff_size_list_list.append(eff_size_list) + I_app_arr_list.append(I_app_arr) + + ### create list with variable_init_samplers of populations + variable_init_sampler_list = [ + self.net_single_dict[pop_name]["variable_init_sampler"] + for pop_name in self.pop_name_list + ] + + ### get firing rates obtained with all I_app values + ### rates depend on the current weights and the current target firing rates + nr_networks = len(self.pop_name_list) + possible_firing_rates_list_list = parallel_run( + method=get_rate_parallel, + networks=net_many_dict["network_list"], + **{ + "population": net_many_dict["population_list"], + "variable_init_sampler": variable_init_sampler_list, + "monitor": net_many_dict["monitor_list"], + "I_app_arr": I_app_arr_list, + "weight_list": weight_list_list, + "pre_pop_name_list": pre_pop_name_list_list, + "rate_list": rate_list_list, + "eff_size_list": eff_size_list_list, + "simulation_dur": [self.simulation_dur] * nr_networks, + }, + ) + + ### catch if target firing rate in any population cannot be reached + I_app_best_dict = {} + target_firing_rate_changed = False + for pop_idx, pop_name in enumerate(self.pop_name_list): + target_firing_rate = self.target_firing_rate_dict[pop_name] + possible_firing_rates_arr = np.array( + possible_firing_rates_list_list[pop_idx] + ) + I_app_arr = I_app_arr_list[pop_idx] + print(f"firing rates for pop {pop_name}") + print(f"{I_app_arr}") + print(f"{possible_firing_rates_arr}\n") + possible_f_min = possible_firing_rates_arr.min() + possible_f_max = possible_firing_rates_arr.max() + if not ( + target_firing_rate >= possible_f_min + and target_firing_rate <= possible_f_max + ): + new_target_firing_rate = np.array([possible_f_min, possible_f_max])[ + np.argmin( + np.absolute( + np.array([possible_f_min, possible_f_max]) + - target_firing_rate + ) + ) + ] + ### if the possible firing rates are too small --> what (high) firing rate could be maximally reached with a hypothetical g_ampa_max and I_app_max + ### if the possible firing rates are too large --> waht (low) firing rate could be reached with g_gaba_max and -I_app_max + warning_txt = f"WARNING get_possible_rates: target firing rate of population {pop_name}({target_firing_rate}) cannot be reached.\nPossible range with current synaptic load: [{round(possible_f_min,1)},{round(possible_f_max,1)}].\nSet firing rate to {round(new_target_firing_rate,1)}." + self._p_w(warning_txt) + self.log(warning_txt) + self.target_firing_rate_dict[pop_name] = new_target_firing_rate + target_firing_rate = self.target_firing_rate_dict[pop_name] + target_firing_rate_changed = True + ### find best I_app for reaching target firing rate + best_idx = np.argmin( + np.absolute(possible_firing_rates_arr - target_firing_rate) + ) + ### take all possible firing rates in range target firing rate +-10 + lower_rate = max([0, target_firing_rate - 10]) + higher_rate = target_firing_rate + 10 + rate_range_idx_arr = ( + (possible_firing_rates_arr >= lower_rate).astype(int) + * (possible_firing_rates_arr <= higher_rate).astype(int) + ).astype(bool) + possible_firing_rates_arr = possible_firing_rates_arr[rate_range_idx_arr] + I_app_arr = I_app_arr[rate_range_idx_arr] + ### now do linear fit to find I_app for target firing rate + if len(I_app_arr) > 10: + reg = LinearRegression().fit( + X=possible_firing_rates_arr.reshape(-1, 1), y=I_app_arr + ) + I_app_best_dict[pop_name] = reg.predict( + np.array([[target_firing_rate]]) + )[0] + else: + I_app_best_dict[pop_name] = 0 + plt.figure(figsize=(6.4, 4.8 * 2)) + plt.subplot(211) + plt.plot(I_app_arr, possible_firing_rates_arr) + plt.axhline(target_firing_rate, color="k") + plt.axvline(I_app_best_dict[pop_name], color="r") + plt.subplot(212) + plt.plot( + I_app_arr, np.absolute(possible_firing_rates_arr - target_firing_rate) + ) + plt.tight_layout() + plt.savefig(f"possible_firing_rate_{pop_name}.png", dpi=300) + plt.close("all") + + if target_firing_rate_changed and False: + print_df(pd.DataFrame(self.afferent_projection_dict)) + print_df(pd.DataFrame(self.g_max_dict)) + ### TODO cannot reach firing rates for example for thal because I_app_max is too small, this +100Hz method seems not to work well + ### maybe use the weights and a voltage clamp neuron to find I_app + ### like with I_app_hold + ### weights i.e. spike trains cause dv/dt to be e.g. extremely negative --> then find I_app to make dv/dt zero + ### this I_app should then be "near" the I_app needed to reach the target firing rate + quit() + + return [target_firing_rate_changed, I_app_best_dict] + + def get_I_app_magnitude( + self, + pop_name, + pre_pop_name_list=[], + eff_size_list=[], + rate_list=[], + weight_list=[], + ): + """ + Get the correct magnitude of I_app for the given population. + The correct magnitude is the magnitude which is to negate the synaptic currents caused by the afferent populations. + Use the curretn weights and rates from the afferent_projection_dict and target_firing_rate_dict. + """ + print(f"get v clamp of {pop_name}") + print(f"pre_pop_name_list: {pre_pop_name_list}") + print(f"eff_size_list: {eff_size_list}") + print(f"rate_list: {rate_list}") + print(f"weight_list: {weight_list}") + print(f"I_app_hold: {self.prepare_psp_dict[pop_name]['I_app_hold']}") + print(f"v_rest: {self.prepare_psp_dict[pop_name]['v_rest']}") + + detla_v_rest_0 = ( + self.get_v_clamp_2000( + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], + v=None, + I_app=0, + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + pre_pop_name_list=pre_pop_name_list, + eff_size_list=eff_size_list, + rate_list=rate_list, + weight_list=weight_list, + return_1000=True, + ) + * dt() + ) + + if detla_v_rest_0 > 0: + I_app_sign = -1 + else: + I_app_sign = 1 + + self.log("search I_app_magnitude with y(X) = detla_v(I_app=X)") + I_app_magnitude = I_app_sign * self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_v_clamp_2000( + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], + v=None, + I_app=I_app_sign * X_val, + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + pre_pop_name_list=pre_pop_name_list, + eff_size_list=eff_size_list, + rate_list=rate_list, + weight_list=weight_list, + return_1000=True, + ) + * dt(), + y_bound=0, + X_0=0, + y_0=detla_v_rest_0, + alpha_abs=0.005, + ) + + print(f"I_app_magnitude: {I_app_magnitude}\n") + + return I_app_magnitude + + def get_rate_list_for_pop(self, pop_name): + """ + get the rate list for the afferent populations of the given population + """ + rate_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_name = proj_dict["pre_pop_name"] + pre_rate = self.target_firing_rate_dict[pre_pop_name] + rate_list.append(pre_rate) + return rate_list + + def get_eff_size_list_for_pop(self, pop_name): + """ + get the effective size list for the afferent populations of the given population + """ + eff_size_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_size = proj_dict["pre_pop_size"] + proj_prob = proj_dict["proj_prob"] + eff_size = int(round(pre_pop_size * proj_prob, 0)) + eff_size_list.append(eff_size) + return eff_size_list + + def set_base(self, I_base_dict=None, I_base_variable="base_mean"): + """ + Set baseline currents in model, compile model and set weights in model. + + Args: + I_base_dict: dict, optional, default=None + Dictionary with baseline currents for all populations, if None the baselines are obtained by .get_base + + I_base_variable: str, optional, default="mean_base" + Name of the variable which represents the baseline current in the configured populations. They all have to have the same variable. + """ + ### check I_base_dict + if isinstance(I_base_dict, type(None)): + I_base_dict = self.get_base() + + ### clear annarchy, create model and set baselines and weights + cnp_clear() + self.model.create(do_compile=False) + ### set initial variables of populations + for pop_name in self.pop_name_list: + population = get_population(pop_name) + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + ### set baselines + for pop_name in I_base_dict.keys(): + get_val = getattr(get_population(pop_name), I_base_variable) + try: + set_val = np.ones(len(get_val)) * I_base_dict[pop_name] + except: + set_val = I_base_dict[pop_name] + setattr(get_population(pop_name), I_base_variable, set_val) + ### compile + self.model.compile() + ### set weights + for pop_name in self.pop_name_list: + for proj_idx, proj_name in enumerate( + self.afferent_projection_dict[pop_name]["projection_names"] + ): + weight_val = self.afferent_projection_dict[pop_name]["weights"][ + proj_idx + ] + get_projection(proj_name).w = weight_val + + return I_base_dict + + def set_init_variables(self, population, variable_init_sampler): + """ + Set the initial variables of the given population to the given values. + """ + variable_init_arr = variable_init_sampler.sample(len(population), seed=0) + var_name_list = variable_init_sampler.var_name_list + for var_name in population.variables: + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) + + def get_time_in_x_sec(self, x): + """ + Args: + x: int + how many seconds add to the current time + + return: + formatted_future_time: str + string of the future time in HH:MM:SS + """ + # Get the current time + current_time = datetime.datetime.now() + + # Add 10 seconds to the current time + future_time = current_time + datetime.timedelta(seconds=x) + + # Format future_time as HH:MM:SS + formatted_future_time = future_time.strftime("%H:%M:%S") + + return formatted_future_time + + def get_interpolation(self): + """ + get the interpolations to + predict f with I_app, g_ampa and g_gaba + + sets the class variable self.f_I_g_curve_dict --> for each population a f_I_g_curve function + """ + + ### create model + net_many_dict = self.create_many_neuron_network() + + ### get interpolation data + txt = "get interpolation data..." + print(txt) + self.log(txt) + ### for each population get the input arrays for I_app, g_ampa and g_gaba + ### while getting inputs define which values should be used later + input_dict = self.get_input_for_many_neurons_net() + + ### create list with variable_init_samplers of populations + variable_init_sampler_list = [ + self.net_single_dict[pop_name]["variable_init_sampler"] + for pop_name in self.pop_name_list + ] + + ### run the run_parallel with a reduced simulation duration and obtain a time estimate for the full duration + ### TODO use directly measureing simulation time to get time estimate + start = time() + parallel_run( + method=get_rate_parallel, + number=self.nr_networks, + **{ + "pop_name_list": [self.pop_name_list] * self.nr_networks, + "population_list": [list(net_many_dict["population_dict"].values())] + * self.nr_networks, + "variable_init_sampler_list": [variable_init_sampler_list] + * self.nr_networks, + "monitor_list": [list(net_many_dict["monitor_dict"].values())] + * self.nr_networks, + "I_app_list": input_dict["I_app_list"], + "g_ampa_list": input_dict["g_ampa_list"], + "g_gaba_list": input_dict["g_gaba_list"], + "simulation_dur": [dt()] * self.nr_networks, + }, + ) + reset() + end = time() + offset_time = end - start + start = time() + parallel_run( + method=get_rate_parallel, + number=self.nr_networks, + **{ + "pop_name_list": [self.pop_name_list] * self.nr_networks, + "population_list": [list(net_many_dict["population_dict"].values())] + * self.nr_networks, + "variable_init_sampler_list": [variable_init_sampler_list] + * self.nr_networks, + "monitor_list": [list(net_many_dict["monitor_dict"].values())] + * self.nr_networks, + "I_app_list": input_dict["I_app_list"], + "g_ampa_list": input_dict["g_ampa_list"], + "g_gaba_list": input_dict["g_gaba_list"], + "simulation_dur": [self.simulation_dur_estimate_time] + * self.nr_networks, + }, + ) + reset() + end = time() + time_estimate = np.clip( + round( + (end - start - offset_time) + * (self.simulation_dur / self.simulation_dur_estimate_time), + 0, + ), + 0, + None, + ) + + txt = f"start parallel_run of many neurons network on {self.nr_networks} threads, will take approx. {time_estimate} s (end: {self.get_time_in_x_sec(x=time_estimate)})..." + print(txt) + self.log(txt) + ### simulate the many neurons network with the input arrays splitted into the network populations sizes + ### and get the data of all populations + ### run_parallel + start = time() + f_rec_arr_list_list = parallel_run( + method=get_rate_parallel, + number=self.nr_networks, + **{ + "pop_name_list": [self.pop_name_list] * self.nr_networks, + "population_list": [list(net_many_dict["population_dict"].values())] + * self.nr_networks, + "variable_init_sampler_list": [variable_init_sampler_list] + * self.nr_networks, + "monitor_list": [list(net_many_dict["monitor_dict"].values())] + * self.nr_networks, + "I_app_list": input_dict["I_app_list"], + "g_ampa_list": input_dict["g_ampa_list"], + "g_gaba_list": input_dict["g_gaba_list"], + "simulation_dur": [self.simulation_dur] * self.nr_networks, + }, + ) + end = time() + txt = f"took {end-start} s" + print(txt) + self.log(txt) + + ### combine the list of outputs from parallel_run to one output per population + output_of_populations_dict = self.get_output_of_populations( + f_rec_arr_list_list, input_dict + ) + + ### create interpolation for each population + ### it can be a 1D to 3D interpolation, default (if everything works fine) is + ### 3D interpolation with "x": "I_app", "y": "g_ampa", "z": "g_gaba" + for pop_name in self.pop_name_list: + ### get whole input arrays + I_app_value_array = None + g_ampa_value_array = None + g_gaba_value_array = None + if self.I_app_max_dict[pop_name] > 0: + I_app_value_array = input_dict["I_app_arr_dict"][pop_name] + if self.g_max_dict[pop_name]["ampa"] > 0: + g_ampa_value_array = input_dict["g_ampa_arr_dict"][pop_name] + if self.g_max_dict[pop_name]["gaba"] > 0: + g_gaba_value_array = input_dict["g_gaba_arr_dict"][pop_name] + + ### get the interpolation + self.f_I_g_curve_dict[pop_name] = self.get_interp_3p( + values=output_of_populations_dict[pop_name], + model_conf_obj=self, + var_name_dict={"x": "I_app", "y": "g_ampa", "z": "g_gaba"}, + x=I_app_value_array, + y=g_ampa_value_array, + z=g_gaba_value_array, + ) + + self.did_get_interpolation = True + + ### with interpolation get the firing rates for all extreme values of I_app, g_ampa, g_gaba + for pop_name in self.pop_name_list: + self.extreme_firing_rates_df_dict[pop_name] = ( + self.get_extreme_firing_rates_df(pop_name) + ) + + def get_extreme_firing_rates_df(self, pop_name): + """ + get the firing rates for all extreme values of I_app, g_ampa, g_gaba + + Args: + pop_name: str + popualtion name + + return: + table_df: pandas dataframe + containing the firing rates for all extreme values of I_app, g_ampa, g_gaba + """ + I_app_list = [-self.I_app_max_dict[pop_name], self.I_app_max_dict[pop_name]] + g_ampa_list = [0, self.g_max_dict[pop_name]["ampa"]] + g_gaba_list = [0, self.g_max_dict[pop_name]["gaba"]] + ### create all combiniations of I_app_list, g_ampa_list, g_gaba_list in a single list + comb_list = self.get_all_combinations_of_lists( + [I_app_list, g_ampa_list, g_gaba_list] + ) + + ### get the firing rates for all combinations + f_list = [] + for I_app, g_ampa, g_gaba in comb_list: + f_list.append( + self.f_I_g_curve_dict[pop_name](x=I_app, y=g_ampa, z=g_gaba)[0] + ) + + ### now get the same for names + I_app_name_list = ["min", "max"] + g_ampa_name_list = ["min", "max"] + g_gaba_name_list = ["min", "max"] + ### create all combiniations of I_app_name_list, g_ampa_name_list, g_gaba_name_list in a single list + comb_name_list = self.get_all_combinations_of_lists( + [I_app_name_list, g_ampa_name_list, g_gaba_name_list] + ) + + ### create a dict as table with header I_app, g_ampa, g_gaba + table_dict = { + "I_app": np.array(comb_name_list)[:, 0].tolist(), + "g_ampa": np.array(comb_name_list)[:, 1].tolist(), + "g_gaba": np.array(comb_name_list)[:, 2].tolist(), + "f": f_list, + } + + ### create a pandas dataframe from the table_dict + table_df = pd.DataFrame(table_dict) + + return table_df + + def get_all_combinations_of_lists(self, list_of_lists): + """ + get all combinations of lists in a single list + example: [[1,2],[3,4],[5,6]] --> [[1,3,5],[1,3,6],[1,4,5],[1,4,6],[2,3,5],[2,3,6],[2,4,5],[2,4,6]] + """ + return list(itertools.product(*list_of_lists)) + + def get_output_of_populations(self, f_rec_arr_list_list, input_dict): + """ + restructure the output of run_parallel so that for each population a single array with firing rates is obtained + + Args: + f_rec_arr_list_list: list of lists of arrays + first lists contain different network runs, second level lists contain arrays for the different populations + return: + output_pop_dict: dict of arrays + for each population a single array with firing rates + """ + output_pop_dict = {} + for pop_name in self.pop_name_list: + output_pop_dict[pop_name] = [] + ### first loop selecting the network + for f_rec_arr_list in f_rec_arr_list_list: + ### second loop selecting the population + for pop_idx, pop_name in enumerate(self.pop_name_list): + ### append the recorded values to the array of the corresponding population + output_pop_dict[pop_name].append(f_rec_arr_list[pop_idx]) + + ### concatenate the arrays of the individual populations + for pop_name in self.pop_name_list: + output_pop_dict[pop_name] = np.concatenate(output_pop_dict[pop_name]) + + ### use the input dict to only use values which should be used + ### lis of lists, first list level = networks, second list level = populations then you get array with input values + ### so same format as f_rec_arr_list_list + use_I_app_arr_list_list = input_dict["use_I_app_list"] + use_g_ampa_arr_list_list = input_dict["use_g_ampa_list"] + use_g_gaba_arr_list_list = input_dict["use_g_gaba_list"] + + ### now get for each population an array which contains the info if the values should be used + use_output_pop_dict = {} + for pop_name in self.pop_name_list: + use_output_pop_dict[pop_name] = [] + ### first loop selecting the network + for net_idx in range(len(use_I_app_arr_list_list)): + use_I_app_arr_list = use_I_app_arr_list_list[net_idx] + use_g_ampa_arr_list = use_g_ampa_arr_list_list[net_idx] + use_g_gaba_arr_list = use_g_gaba_arr_list_list[net_idx] + ### second loop selecting the population + for pop_idx, pop_name in enumerate(self.pop_name_list): + ### only use values if for all input values use is True + use_I_app_arr = use_I_app_arr_list[pop_idx] + use_g_ampa_arr = use_g_ampa_arr_list[pop_idx] + use_g_gaba_arr = use_g_gaba_arr_list[pop_idx] + use_value_arr = np.logical_and(use_I_app_arr, use_g_ampa_arr) + use_value_arr = np.logical_and(use_value_arr, use_g_gaba_arr) + ### append the recorded values to the array of the corresponding population + use_output_pop_dict[pop_name].append(use_value_arr) + + ### concatenate the arrays of the individual populations + for pop_name in self.pop_name_list: + use_output_pop_dict[pop_name] = np.concatenate( + use_output_pop_dict[pop_name] + ) + + ### finaly only use values defined by ues_output... + for pop_name in self.pop_name_list: + output_pop_dict[pop_name] = output_pop_dict[pop_name][ + use_output_pop_dict[pop_name] + ] + + return output_pop_dict + + def get_input_for_many_neurons_net(self): + """ + get the inputs for the parallel many neurons network simulation + + need a list of dicts, keys=pop_name, lsit=number of networks + """ + + ### create dicts with lists for the populations + I_app_arr_list_dict = {} + g_ampa_arr_list_dict = {} + g_gaba_arr_list_dict = {} + use_I_app_arr_list_dict = {} + use_g_ampa_arr_list_dict = {} + use_g_gaba_arr_list_dict = {} + I_app_arr_dict = {} + g_ampa_arr_dict = {} + g_gaba_arr_dict = {} + for pop_name in self.pop_name_list: + ### prepare grid for I, g_ampa and g_gaba + ### bounds + g_ampa_max = self.g_max_dict[pop_name]["ampa"] + g_gaba_max = self.g_max_dict[pop_name]["gaba"] + I_max = self.I_app_max_dict[pop_name] + + ### create value_arrays + I_app_value_array = np.linspace( + -I_max, I_max, self.nr_vals_interpolation_grid + ) + g_ampa_value_array = np.linspace( + 0, g_ampa_max, self.nr_vals_interpolation_grid + ) + g_gaba_value_array = np.linspace( + 0, g_gaba_max, self.nr_vals_interpolation_grid + ) + + ### store these value arrays for each pop + I_app_arr_dict[pop_name] = I_app_value_array + g_ampa_arr_dict[pop_name] = g_ampa_value_array + g_gaba_arr_dict[pop_name] = g_gaba_value_array + + ### create use values arrays + use_I_app_array = np.array([I_max > 0] * self.nr_vals_interpolation_grid) + use_g_ampa_array = np.array( + [g_ampa_max > 0] * self.nr_vals_interpolation_grid + ) + use_g_gaba_array = np.array( + [g_gaba_max > 0] * self.nr_vals_interpolation_grid + ) + ### use at least a single value + use_I_app_array[0] = True + use_g_ampa_array[0] = True + use_g_gaba_array[0] = True + + ### get all combinations (grid) of value_arrays + I_g_arr = np.array( + list( + itertools.product( + *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] + ) + ) + ) + + ### get all combinations (grid) of the use values arrays + use_I_g_arr = np.array( + list( + itertools.product( + *[use_I_app_array, use_g_ampa_array, use_g_gaba_array] + ) + ) + ) + + ### individual value arrays from combinations + I_app_arr = I_g_arr[:, 0] + g_ampa_arr = I_g_arr[:, 1] + g_gaba_arr = I_g_arr[:, 2] + + ### individual use values arrays from combinations + use_I_app_arr = use_I_g_arr[:, 0] + use_g_ampa_arr = use_I_g_arr[:, 1] + use_g_gaba_arr = use_I_g_arr[:, 2] + + ### split the arrays for the networks + networks_size_list = np.array( + [self.nr_neurons_of_pop_per_net] * self.nr_networks + ) + split_idx_arr = np.cumsum(networks_size_list)[:-1] + ### after this split the last array may be smaller than the others --> append zeros + ### value arrays + I_app_arr_list = np.split(I_app_arr, split_idx_arr) + g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) + g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) + ### use value arrays + use_I_app_arr_list = np.split(use_I_app_arr, split_idx_arr) + use_g_ampa_arr_list = np.split(use_g_ampa_arr, split_idx_arr) + use_g_gaba_arr_list = np.split(use_g_gaba_arr, split_idx_arr) + + ### check if last network is smaler + if self.nr_last_network < self.nr_neurons_of_pop_per_net: + ### if yes --> append zeros to value arrays + ### and append False to use values arrays + nr_of_zeros_append = round( + self.nr_neurons_of_pop_per_net - self.nr_last_network, 0 + ) + ### value arrays + I_app_arr_list[-1] = np.concatenate( + [I_app_arr_list[-1], np.zeros(nr_of_zeros_append)] + ) + g_ampa_arr_list[-1] = np.concatenate( + [g_ampa_arr_list[-1], np.zeros(nr_of_zeros_append)] + ) + g_gaba_arr_list[-1] = np.concatenate( + [g_gaba_arr_list[-1], np.zeros(nr_of_zeros_append)] + ) + ### use values arrays + use_I_app_arr_list[-1] = np.concatenate( + [use_I_app_arr_list[-1], np.array([False] * nr_of_zeros_append)] + ) + use_g_ampa_arr_list[-1] = np.concatenate( + [use_g_ampa_arr_list[-1], np.array([False] * nr_of_zeros_append)] + ) + use_g_gaba_arr_list[-1] = np.concatenate( + [use_g_gaba_arr_list[-1], np.array([False] * nr_of_zeros_append)] + ) + + ### store the array lists into the population dicts + ### value arrays + I_app_arr_list_dict[pop_name] = I_app_arr_list + g_ampa_arr_list_dict[pop_name] = g_ampa_arr_list + g_gaba_arr_list_dict[pop_name] = g_gaba_arr_list + ### use value arrays + use_I_app_arr_list_dict[pop_name] = use_I_app_arr_list + use_g_ampa_arr_list_dict[pop_name] = use_g_ampa_arr_list + use_g_gaba_arr_list_dict[pop_name] = use_g_gaba_arr_list + + ### restructure the dict of lists into a list for networks of list for populations + I_app_list = [] + g_ampa_list = [] + g_gaba_list = [] + use_I_app_list = [] + use_g_ampa_list = [] + use_g_gaba_list = [] + for net_idx in range(self.nr_networks): + ### value arrays + I_app_list.append( + [ + I_app_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + g_ampa_list.append( + [ + g_ampa_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + g_gaba_list.append( + [ + g_gaba_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + ### use values arrays + use_I_app_list.append( + [ + use_I_app_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + use_g_ampa_list.append( + [ + use_g_ampa_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + use_g_gaba_list.append( + [ + use_g_gaba_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + + return { + "I_app_list": I_app_list, + "g_ampa_list": g_ampa_list, + "g_gaba_list": g_gaba_list, + "use_I_app_list": use_I_app_list, + "use_g_ampa_list": use_g_ampa_list, + "use_g_gaba_list": use_g_gaba_list, + "I_app_arr_dict": I_app_arr_dict, + "g_ampa_arr_dict": g_ampa_arr_dict, + "g_gaba_arr_dict": g_gaba_arr_dict, + } + + for pop_name in self.pop_name_list: + ### prepare grid for I, g_ampa and g_gaba + ### bounds + g_ampa_max = self.g_max_dict[pop_name]["ampa"] + g_gaba_max = self.g_max_dict[pop_name]["gaba"] + I_max = self.I_app_max_dict[pop_name] + ### number of points for individual value arrays: I, g_ampa and g_gaba + number_of_points = np.round( + self.nr_neurons_net_many_total ** (1 / 3), 0 + ).astype(int) + ### create value_arrays + I_app_value_array = np.linspace(-I_max, I_max, number_of_points) + g_ampa_value_array = np.linspace(0, g_ampa_max, number_of_points) + g_gaba_value_array = np.linspace(0, g_gaba_max, number_of_points) + ### get all combinations (grid) of value_arrays + I_g_arr = np.array( + list( + itertools.product( + *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] + ) + ) + ) + ### individual value arrays from combinations + I_app_arr = I_g_arr[:, 0] + g_ampa_arr = I_g_arr[:, 1] + g_gaba_arr = I_g_arr[:, 2] + + ### split the arrays into the sizes of the many-neuron networks + split_idx_arr = np.cumsum(self.nr_many_neurons_list[pop_name])[:-1] + + I_app_arr_list = np.split(I_app_arr, split_idx_arr) + g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) + g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) + + class get_interp_3p: + def __init__( + self, values, model_conf_obj, var_name_dict, x=None, y=None, z=None + ) -> None: + """ + x, y, and z are the increasing gid steps on the interpolation grid + set z=None to get 2D interpiolation + set y and z = None to get 1D interpolation + """ + self.x = x + self.y = y + self.z = z + self.values = values + self.model_conf_obj = model_conf_obj + self.var_name_dict = var_name_dict + + if ( + isinstance(self.x, type(None)) + and isinstance(self.y, type(None)) + and isinstance(self.z, type(None)) + ): + error_msg = ( + "ERROR get_interp_3p: at least one of x,y,z has to be an array" + ) + model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + + def __call__(self, x=None, y=None, z=None): + ### check x + if isinstance(x, type(None)): + if not isinstance(self.x, type(None)): + error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['x']} were given but sample points are missing!" + self.model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + tmp_x = 0 + else: + if isinstance(self.x, type(None)): + warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['x']} are given but no interpolation values for {self.var_name_dict['x']} were given!" + self.model_conf_obj.log(warning_txt) + x = None + tmp_x = 0 + else: + tmp_x = x + + ### check y + if isinstance(y, type(None)): + if not isinstance(self.y, type(None)): + error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['y']} were given but sample points are missing!" + self.model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + tmp_y = 0 + else: + if isinstance(self.y, type(None)): + warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['y']} are given but no interpolation values for {self.var_name_dict['y']} were given!" + self.model_conf_obj.log(warning_txt) + y = None + tmp_y = 0 + else: + tmp_y = y + + ### check z + if isinstance(z, type(None)): + if not isinstance(self.y, type(None)): + error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['z']} were given but sample points are missing!" + self.model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + tmp_z = 0 + else: + if isinstance(self.z, type(None)): + warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['z']} are given but no interpolation values for {self.var_name_dict['z']} were given!" + self.model_conf_obj._p_w(warning_txt) + self.model_conf_obj.log(warning_txt) + z = None + tmp_z = 0 + else: + tmp_z = z + + ### get input arrays + input_arr_dict = { + "x": np.array(tmp_x).reshape(-1), + "y": np.array(tmp_y).reshape(-1), + "z": np.array(tmp_z).reshape(-1), + } + + ### check if the arrays with size larger 1 have same size + size_arr = np.array([val.size for val in input_arr_dict.values()]) + mask = size_arr > 1 + if True in mask: + input_size = size_arr[mask][0] + if not (input_size == size_arr[mask]).all(): + raise ValueError( + "ERROR model_configurator get_interp_3p: x,y,z sample points have to be either single values or arrays. All arrays have to have same size" + ) + + ### if there are inputs only consisting of a single value --> duplicate to increase size if there are also array inputs + for idx, larger_1 in enumerate(mask): + if not larger_1 and True in mask: + val = input_arr_dict[list(input_arr_dict.keys())[idx]][0] + input_arr_dict[list(input_arr_dict.keys())[idx]] = ( + np.ones(input_size) * val + ) + + ### get the sample points + use_variable_names_list = ["x", "y", "z"] + if isinstance(x, type(None)): + use_variable_names_list.remove("x") + if isinstance(y, type(None)): + use_variable_names_list.remove("y") + if isinstance(z, type(None)): + use_variable_names_list.remove("z") + point_arr = np.array( + [input_arr_dict[var_name] for var_name in use_variable_names_list] + ).T + + ### get the grid points, only use these which are not None + use_variable_names_list = ["x", "y", "z"] + if isinstance(self.x, type(None)): + use_variable_names_list.remove("x") + if isinstance(self.y, type(None)): + use_variable_names_list.remove("y") + if isinstance(self.z, type(None)): + use_variable_names_list.remove("z") + + interpolation_grid_arr_dict = { + "x": self.x, + "y": self.y, + "z": self.z, + } + points = tuple( + [ + interpolation_grid_arr_dict[var_name] + for var_name in use_variable_names_list + ] + ) + + ### get shape of values + values_shape = tuple( + [ + interpolation_grid_arr_dict[var_name].size + for var_name in use_variable_names_list + ] + ) + + return interpn( + points=points, + values=self.values.reshape(values_shape), + xi=point_arr, + ) + + def set_syn_load(self, synaptic_load_dict, synaptic_contribution_dict=None): + """ + Args: + synaptic_load_dict: dict or number + either a dictionary with keys = all population names the model_configurator should configure + or a single number between 0 and 1 + The dictionary values should be lists which contain either 2 values for ampa and gaba load, + only 1 value if the population has only ampa or gaba input. + For the strucutre of the dictionary check the print_guide + + synaptic_contribution_dict: dict, optional, default=None + by default the synaptic contributions of all afferent projections is equal + one can define other contributions in this dict + give for each affernt projection the contribution to the synaptic load of the target population + For the strucutre of the dictionary check the print_guide + """ + + ### set synaptic load + ### is dict --> replace internal dict values + if isinstance(synaptic_load_dict, dict): + ### check if correct number of population + if len(list(synaptic_load_dict.keys())) != len( + list(self.syn_load_dict.keys()) + ): + error_msg = f"ERROR set_syn_load: wrong number of populations given with 'synaptic_load_dict' given={len(list(synaptic_load_dict.keys()))}, expected={len(list(self.syn_load_dict.keys()))}" + self.log(error_msg) + raise ValueError(error_msg) + ### loop over all populations + for pop_name in synaptic_load_dict.keys(): + ### cehck pop name + if pop_name not in list(self.syn_load_dict.keys()): + error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + value_list = synaptic_load_dict[pop_name] + ### check value list + if len(value_list) != len(self.syn_load_dict[pop_name]): + error_msg = f"ERROR set_syn_load: for population {pop_name}, {len(self.syn_load_dict[pop_name])} syn load values should be given but {len(value_list)} were given" + self.log(error_msg) + raise ValueError(error_msg) + if not ( + (np.array(value_list) <= 1).all() + and (np.array(value_list) >= 0).all() + ): + error_msg = f"ERROR set_syn_load: the values for synaptic loads should be equal or smaller than 1, given for population {pop_name}: {value_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### replace internal values with given values + self.syn_load_dict[pop_name] = value_list + else: + ### is not a dict --> check number + try: + synaptic_load = float(synaptic_load_dict) + except: + error_msg = "ERROR set_syn_load: if synaptic_load_dict is not a dictionary it should be a single number!" + self.log(error_msg) + raise ValueError(error_msg) + if not (synaptic_load <= 1 and synaptic_load >= 0): + error_msg = "ERROR set_syn_load: value for synaptic_loadshould be equal or smaller than 1" + self.log(error_msg) + raise ValueError(error_msg) + ### replace internal values with given value + for pop_name in self.syn_load_dict.keys(): + for idx in range(len(self.syn_load_dict[pop_name])): + self.syn_load_dict[pop_name][idx] = synaptic_load + ### transform syn load dict in correct form with projection target type keys + syn_load_dict = {} + for pop_name in self.pop_name_list: + syn_load_dict[pop_name] = {} + if ( + "ampa" in self.afferent_projection_dict[pop_name]["target"] + and "gaba" in self.afferent_projection_dict[pop_name]["target"] + ): + syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] + syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][1] + elif "ampa" in self.afferent_projection_dict[pop_name]["target"]: + syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] + syn_load_dict[pop_name]["gaba"] = 0 + elif "gaba" in self.afferent_projection_dict[pop_name]["target"]: + syn_load_dict[pop_name]["ampa"] = 0 + syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][0] + self.syn_load_dict = syn_load_dict + + ### set synaptic contribution + if not isinstance(synaptic_contribution_dict, type(None)): + ### loop over all given populations + for pop_name in synaptic_contribution_dict.keys(): + ### check pop_name + if pop_name not in list(self.syn_contr_dict.keys()): + error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### loop over given projection target type (ampa,gaba) + for given_proj_target_type in synaptic_contribution_dict[ + pop_name + ].keys(): + ### check given target type + if not ( + given_proj_target_type == "ampa" + or given_proj_target_type == "gaba" + ): + error_msg = f"ERROR set_syn_load: with the synaptic_contribution_dict for each given population a 'ampa' and/or 'gaba' dictionary contianing the corresponding afferent projections should be given, given key={given_proj_target_type}" + self.log(error_msg) + raise ValueError(error_msg) + ### check if for the projection target type the correct number of projections is given + given_proj_name_list = list( + synaptic_contribution_dict[pop_name][ + given_proj_target_type + ].keys() + ) + internal_proj_name_list = list( + self.syn_contr_dict[pop_name][given_proj_target_type].keys() + ) + if len(given_proj_name_list) != len(internal_proj_name_list): + error_msg = f"ERROR set_syn_load: in synaptic_contribution_dict for population {pop_name} and target_type {given_proj_target_type} wrong number of projections is given\ngiven={given_proj_name_list}, expected={internal_proj_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### check if given contributions for the target type sum up to 1 + given_contribution_arr = np.array( + list( + synaptic_contribution_dict[pop_name][ + given_proj_target_type + ].values() + ) + ) + if round(given_contribution_arr.sum(), 6) != 1: + error_msg = f"ERROR set_syn_load: given synaptic contributions for population {pop_name} and target_type {given_proj_target_type} do not sum up to 1: given={given_contribution_arr}-->{round(given_contribution_arr.sum(),6)}" + self.log(error_msg) + raise ValueError(error_msg) + ### loop over given afferent projections + for proj_name in given_proj_name_list: + ### check if projection name exists + if proj_name not in internal_proj_name_list: + error_msg = f"ERROR set_syn_load: given projection {proj_name} given with synaptic_contribution_dict no possible projection, possible={internal_proj_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### replace internal value of the projection with given value + self.syn_contr_dict[pop_name][given_proj_target_type][ + proj_name + ] = synaptic_contribution_dict[pop_name][ + given_proj_target_type + ][ + proj_name + ] + + ### set the weights in the afferent_projection_dict based on the given synaptic contributions + for pop_name in self.pop_name_list: + weight_list = [] + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + ### get proj info + proj_dict = self.get_proj_dict(proj_name) + proj_target_type = proj_dict["proj_target_type"] + + ### obtain the weight using the given syn_contr_dict and the syn_contr_max_dict (assuming max weights) + target_type_contr_dict = self.syn_contr_dict[pop_name][proj_target_type] + target_type_contr_max_dict = self.get_syn_contr_dict( + pop_name=pop_name, + target_type=proj_target_type, + use_max_weights=True, + normalize=True, + ) + ### convert the synaptic contribution dicts to arrays + target_type_contr_arr = np.array(list(target_type_contr_dict.values())) + target_type_contr_max_arr = np.array( + list(target_type_contr_max_dict.values()) + ) + ### get the transformation from synaptic contributions assuming max weights to given synaptic contributions + contr_transform_arr = target_type_contr_max_arr / target_type_contr_arr + ### normalize the transform_arr by the largest scaling --> obtain the weight factors + contr_transform_arr /= contr_transform_arr.max() + ### get the weight of the current projection + weight = ( + self.g_max_dict[pop_name][proj_target_type] + * contr_transform_arr[ + list(target_type_contr_dict.keys()).index(proj_name) + ] + ) + ### append weight to weight list + weight_list.append(weight) + ### replace the weights in the afferent_projection_dict + self.afferent_projection_dict[pop_name]["weights"] = weight_list + + ### now scale the weights based on the synaptic load + for pop_name in self.pop_name_list: + for target_type in ["ampa", "gaba"]: + ### get the synaptic load based on the weights + syn_load = self.get_syn_load(pop_name=pop_name, target_type=target_type) + ### if the obtained syn load with the weights is smaller than the given target syn load + ### print warning because upscaling is not possible, syn load is smaller than the user wanted + print( + f"syn_load={syn_load}, target={self.syn_load_dict[pop_name][target_type]}" + ) + if syn_load < self.syn_load_dict[pop_name][target_type]: + ### the weights cannot be upscaled because syn_load was obtained with max weights + ### --> print a warning + warning_txt = f"WARNING set_syn_load: the synaptic load for population {pop_name} and target_type {target_type} cannot reach teh given synaptic load using the given synaptic contributions without scaling the weights over the maximum weights!\ngiven syn_load={self.syn_load_dict[pop_name][target_type]}, obtained syn_load={syn_load}" + self.log(warning_txt) + self._p_w(warning_txt) + ### update the syn_load_dict with the obtained syn_load + self.syn_load_dict[pop_name][target_type] = syn_load + elif syn_load > 0: + ### get the weights + weight_arr = np.array( + self.afferent_projection_dict[pop_name]["weights"] + ) + ### get the proj target type array + proj_target_type_arr = np.array( + self.afferent_projection_dict[pop_name]["target"] + ) + ### select the weights for the target type + weight_arr = weight_arr[proj_target_type_arr == target_type] + ### scale the weights + weight_arr *= self.syn_load_dict[pop_name][target_type] / syn_load + ### update the weights in the afferent_projection_dict + weight_idx_arr = np.where(proj_target_type_arr == target_type)[0] + for weight_idx_new, weight_idx_original in enumerate( + weight_idx_arr + ): + self.afferent_projection_dict[pop_name]["weights"][ + weight_idx_original + ] = weight_arr[weight_idx_new] + + ### print guide + self._p_g(_p_g_after_set_syn_load) + + def set_weights(self, weights): + for pop_name in self.pop_name_list: + self.afferent_projection_dict[pop_name]["weights"] = [] + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + self.afferent_projection_dict[pop_name]["weights"].append( + weights[pop_name][proj_name] + ) + + def get_syn_load(self, pop_name: str, target_type: str) -> float: + """ + Calculates the synaptic load of a population for a given target type for the given weights of the afferent_projection_dict + + Args: + pop_name: str + name of the population + + target_type: str + either 'ampa' or 'gaba' + + Returns: + syn_load: float + synaptic load of the population for the given target type + """ + ### get the proj target type array + proj_target_type_arr = np.array( + self.afferent_projection_dict[pop_name]["target"] + ) + if target_type in proj_target_type_arr: + ### get the weights + weight_arr = np.array(self.afferent_projection_dict[pop_name]["weights"]) + ### select the weights for the target type + weight_arr = weight_arr[proj_target_type_arr == target_type] + ### get the pre size + size_arr = np.array(self.afferent_projection_dict[pop_name]["size"]) + ### select the pre size for the target type + size_arr = size_arr[proj_target_type_arr == target_type] + ### get the probaility + prob_arr = np.array(self.afferent_projection_dict[pop_name]["probability"]) + ### select the probability for the target type + prob_arr = prob_arr[proj_target_type_arr == target_type] + ### get the firing rate + firing_rate_arr = np.array( + self.afferent_projection_dict[pop_name]["target firing rate"] + ) + ### select the firing rate for the target type + firing_rate_arr = firing_rate_arr[proj_target_type_arr == target_type] + + ### get the synaptic load based on weights, sizes, probabilities and max weights + syn_load = np.sum(weight_arr * size_arr * prob_arr * firing_rate_arr) / ( + self.g_max_dict[pop_name][target_type] + * np.sum(size_arr * prob_arr * firing_rate_arr) + ) + else: + syn_load = 0 + + return syn_load + + def get_template_synaptic_contribution_dict(self, given_dict): + """ + converts the full template dict with all keys for populations, target-types and projections into a reduced dict + which only contains the keys which lead to values smaller 1 + """ + + ret_dict = {} + for key in given_dict.keys(): + if isinstance(given_dict[key], dict): + rec_dict = self.get_template_synaptic_contribution_dict(given_dict[key]) + if len(rec_dict) > 0: + ret_dict[key] = self.get_template_synaptic_contribution_dict( + given_dict[key] + ) + else: + if given_dict[key] < 1: + ret_dict[key] = given_dict[key] + + return ret_dict + + def divide_almost_equal(self, number, num_parts): + # Calculate the quotient and remainder + quotient, remainder = divmod(number, num_parts) + + # Initialize a list to store the almost equal integers + result = [quotient] * num_parts + + # Distribute the remainder evenly among the integers + for i in range(remainder): + result[i] += 1 + + return result + + def compile_net_many_sequential(self): + network_list = [ + net_many_dict["net"] + for net_many_dict_list in self.net_many_dict.values() + for net_many_dict in net_many_dict_list + ] + for net in network_list: + self.compile_net_many(net=net) + + def compile_net_many_parallel(self): + nr_available_workers = int(multiprocessing.cpu_count() / 2) + network_list = [ + net_many_dict["net"] + for net_many_dict_list in self.net_many_dict.values() + for net_many_dict in net_many_dict_list + ] + with multiprocessing.Pool(nr_available_workers) as p: + p.map(self.compile_net_many, network_list) + + ### for each network have network idx + ### network 0 is base network + ### netork 1,2,3...N are the single neuron networks for the N populations + ### start idx = N+1 (inclusive), end_idx = number many networks + N (inclusive) + for net_idx in range( + len(self.pop_name_list) + 1, len(network_list) + len(self.pop_name_list) + 1 + ): + ### get the name of the run folder of the network + ### search for a folder which starts with run_ + ### there should only be 1 --> get run_folder_name as str + run_folder_name = _find_folder_with_prefix( + base_path=f"annarchy_folders/many_net_{net_idx}", prefix="run_" + ) + run_folder_name = f"/scratch/olmai/Projects/PhD/CompNeuroPy/CompNeuroPy/examples/model_configurator/annarchy_folders/many_net_{net_idx}//{run_folder_name}" + + print(run_folder_name) + ### import the ANNarchyCore.so module from this folder + spec = importlib.util.spec_from_file_location( + f"ANNarchyCore{net_idx}", f"{run_folder_name}/ANNarchyCore{net_idx}.so" + ) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + + ### overwrite the entries in the network manager + _network[net_idx]["instance"] = foo + _network[net_idx]["compiled"] = True + _network[net_idx]["directory"] = run_folder_name + + def get_max_syn_currents(self, pop_name: str) -> list: + """ + obtain I_app_max, g_ampa_max and g_gaba max. + f_max = f_0 + f_t + 100 + I_app_max causes f_max (increases f from f_0 to f_max) + g_gaba_max causes max IPSP + g_ampa_max cancels out g_gaba_max IPSP + + Args: + pop_name: str + population name from original model + + return: + list containing [I_max, g_ampa_max, g_gaba_max] + + Abbreviations: + f_max: max firing rate + + f_0: firing rate without syn currents + + f_t: target firing rate + """ + + ### TODO: problem for g_gaba: what if resting potential is <=-90... + ### find g_gaba max using max IPSP + self.log("search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)") + g_gaba_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_ipsp( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], + g_gaba=X_val, + ), + y_bound=self.max_psp_dict[pop_name], + X_0=0, + y_0=0, + alpha_abs=0.005, + X_increase=0.1, + ) + + ### for g_ampa EPSPs can lead to spiking + ### --> find g_ampa max by "overriding" IPSP of g_gaba max + self.log( + f"search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" + ) + g_ampa_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_ipsp( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], + g_ampa=X_val, + g_gaba=g_gaba_max, + ), + y_bound=0, + X_0=0, + y_0=self.get_ipsp( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], + g_ampa=0, + g_gaba=g_gaba_max, + ), + alpha_abs=0.005, + X_increase=g_gaba_max / 10, + ) + + ### get f_0 and f_max + f_0 = self.get_rate( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.net_single_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + )[0] + f_max = f_0 + self.target_firing_rate_dict[pop_name] + 100 + + ### find I_max with f_0, and f_max using incremental_continuous_bound_search + self.log("search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0)") + I_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_rate( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.net_single_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app=X_val, + )[0], + y_bound=f_max, + X_0=0, + y_0=f_0, + alpha_abs=1, + ) + + return [I_max, g_ampa_max, g_gaba_max] + + def incremental_continuous_bound_search( + self, + y_X, + y_bound, + X_0, + y_0, + alpha_rel=0.01, + alpha_abs=None, + n_it_max=100, + X_increase=1, + saturation_thresh=10, + saturation_warning=True, + accept_non_dicontinuity=False, + bound_type="equal", + ): + """ + you have system X --> y + you want X for y=y_bound (either upper or lower bound) + if you increase X (from starting point) y gets closer to y_bound! + + expectes a continuous funciton without from P_0(X_0,y_0) to P_bound(X_bound, y_bound) + if it finds a saturation or non-continuous "step" on the way to P_bound it will return + the X_bound for the end of the continuous part from P_0 to P_bound --> y_bound will not + be reached + + Args: + y_X: function + returns a single number given a single number, call like y = y_X(X) + increasing X should bring y closer to y_bound + + y_bound: number + the bound for y for which an X_bound should be found + + X_0: number + start value of X, from where the search should start + + y_0: number + start value of y which results from X_0 + + alpha_rel: number, optional, default=0.001 + allowed relative tolerance for deviations of y from y_bound + if alpha_abs is given it overrides alpha_rel + + alpha_abs: number, optional, default=None + allowed absolute tolerance for deviations of y from y_bound + if alpha_abs is given it overrides alpha_rel + + n_it_max: number, optional, default=100 + maximum of iterations to find X_bound + + X_increase: number, optional, default=1 + the first increase of X (starting from X_0) to obtain the first new y_val + i.e. first calculation is: y_val = y_X(X_0+X_increase) + + saturation_thresh: number, optional, default=5 + if y does not change while increasing X by X_increase the search will stop + after this number of trials + + saturation_warning: bool, optional, default=True + if you want to get a warning when the saturation is reached during search + + accept_non_dicontinuity: bool, optional, default=False + if you do not want to search only in the first continuous search space + + bound_type: str, optional, default="equal" + equal, greater or less + equal: result should be near bound within tolerance + greater: result should be at least larger bound within tolerance + less: result should be smaller bound within tolerance + + return: + X_bound: + X value which causes y=y_bound + """ + ### TODO catch difference to target goes up in both directions + ### then nothing new is predicted --> fails + + self.log( + f"find X_bound for: y_0(X_0={X_0})={y_0} --> y_bound(X_bound=??)={y_bound}" + ) + + ### get tolerance + tolerance = abs(y_bound - y_0) * alpha_rel + if not isinstance(alpha_abs, type(None)): + tolerance = alpha_abs + + ### define stop condition + if bound_type == "equal": + stop_condition = ( + lambda y_val, n_it: ( + ((y_bound - tolerance) <= y_val) + and (y_val <= (y_bound + tolerance)) + ) + or n_it >= n_it_max + ) + elif bound_type == "greater": + stop_condition = ( + lambda y_val, n_it: ( + ((y_bound - 0) <= y_val) and (y_val <= (y_bound + 2 * tolerance)) + ) + or n_it >= n_it_max + ) + elif bound_type == "less": + stop_condition = ( + lambda y_val, n_it: ( + ((y_bound - 2 * tolerance) <= y_val) and (y_val <= (y_bound + 0)) + ) + or n_it >= n_it_max + ) + + ### check if y(X) is increasing + is_increasing = y_bound > y_0 + + ### search for X_val + X_list_predict = [X_0] + y_list_predict = [y_0] + X_list_all = [X_0] + y_list_all = [y_0] + n_it_first_round = 0 + X_val = X_0 + X_increase + y_val = y_0 + y_not_changed_counter = 0 + X_change_predicted = X_increase + while not stop_condition(y_val, n_it_first_round): + ### get y_val for X + y_val_pre = y_val + y_val = y_X(X_val) + y_change = y_val_pre - y_val + + ### store search history + X_list_all.append(X_val) + y_list_all.append(y_val) + + ### get next X_val depending on if y_val changed or not + if abs(y_change) > 0: + ### append X_val and y_val to y_list/X_list + y_list_predict.append(y_val) + X_list_predict.append(X_val) + ### predict new X_val using y_bound as predictor + X_val_pre = X_val + X_val = self.predict_1d( + X=y_list_predict, y=X_list_predict, X_pred=y_bound + )[0] + X_change_predicted = X_val - X_val_pre + ### now actually update X_val + X_val = X_val_pre + X_change_predicted * (1 + y_not_changed_counter / 2) + else: + ### just increase X_val + X_val = X_val + X_change_predicted * (1 + y_not_changed_counter / 2) + + ### check saturation of y_val + if abs(y_change) < tolerance: + ### increase saturation counter + ### saturation counter also increases updates of X_val + y_not_changed_counter += 1 + else: + ### reset saturation counter + y_not_changed_counter = 0 + + ### break if y_val saturated + if y_not_changed_counter >= saturation_thresh: + break + + ### increase iterator + n_it_first_round += 1 + + ### catch the initial point already satisified stop condition + if len(X_list_all) == 1: + warning_txt = "WARNING incremental_continuous_bound_search: search did not start because initial point already satisfied stop condition!" + self._p_w(warning_txt) + self.log(warning_txt) + return X_0 + + ### warning if search saturated + if (y_not_changed_counter >= saturation_thresh) and saturation_warning: + warning_txt = f"WARNING incremental_continuous_bound_search: search saturated at y={y_list_predict[-1]} while searching for X_val for y_bound={y_bound}" + self._p_w(warning_txt) + self.log(warning_txt) + self.log("initial search lists:") + self.log("all:") + self.log(np.array([X_list_all, y_list_all]).T) + self.log("predict:") + self.log(np.array([X_list_predict, y_list_predict]).T) + + ### if search saturated right at the begining --> search failed (i.e. y did not change while increasing X) + if (y_not_changed_counter >= saturation_thresh) and len(X_list_predict) == 1: + error_msg = "ERROR incremental_continuous_bound_search: search failed because changing X_val did not change y_val" + self.log(error_msg) + raise AssertionError(error_msg) + + ### get best X value for which y is closest to y_bound + idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) + X_bound = X_list_predict[idx_best] + + ### sort y_list_predict and corresponding X_list_predict + ### get value pair which is before bound and value pair which is behind bound + ### if this does not work... use previous X_0 and X_bound + sort_idx_arr = np.argsort(y_list_predict) + X_arr_predict_sort = np.array(X_list_predict)[sort_idx_arr] + y_arr_predict_sort = np.array(y_list_predict)[sort_idx_arr] + over_y_bound_arr = y_arr_predict_sort > y_bound + over_y_bound_changed_idx = np.where(np.diff(over_y_bound_arr))[0] + if len(over_y_bound_changed_idx) == 1: + if over_y_bound_changed_idx[0] < len(y_arr_predict_sort): + X_aside_change_list = [ + X_arr_predict_sort[over_y_bound_changed_idx[0]], + X_arr_predict_sort[over_y_bound_changed_idx[0] + 1], + ] + y_aside_change_list = [ + y_arr_predict_sort[over_y_bound_changed_idx[0]], + y_arr_predict_sort[over_y_bound_changed_idx[0] + 1], + ] + X_0 = min(X_aside_change_list) + X_bound = max(X_aside_change_list) + y_0 = min(y_aside_change_list) + self.log("predict sorted:") + self.log(np.array([X_arr_predict_sort, y_arr_predict_sort, over_y_bound_arr]).T) + self.log(over_y_bound_changed_idx) + + ### if y cannot get larger or smaller than y_bound one has to check if you not "overshoot" with X_bound + ### --> fine tune result by investigating the space between X_0 and X_bound and predict a new X_bound + self.log(f"X_0: {X_0}, X_bound:{X_bound} for final predict list") + X_space_arr = np.linspace(X_0, X_bound, 100) + y_val = y_0 - [-1, 1][int(is_increasing)] + X_list_predict = [] + y_list_predict = [] + X_list_all = [] + y_list_all = [] + did_break = False + n_it_second_round = 0 + for X_val in X_space_arr: + y_val_pre = y_val + y_val = y_X(X_val) + X_list_all.append(X_val) + y_list_all.append(y_val) + if y_val != y_val_pre: + ### if y_val changed + ### append X_val and y_val to y_list/X_list + y_list_predict.append(y_val) + X_list_predict.append(X_val) + ### if already over y_bound -> stop + if y_val > y_bound and is_increasing: + did_break = True + break + if y_val < y_bound and not is_increasing: + did_break = True + break + n_it_second_round += 1 + ### if did break early --> use again finer bounds + if did_break and n_it_second_round < 90: + X_space_arr = np.linspace( + X_list_predict[-2], X_list_predict[-1], 100 - n_it_second_round + ) + y_val = y_list_predict[-2] + for X_val in X_space_arr: + y_val_pre = y_val + y_val = y_X(X_val) + X_list_all.append(X_val) + y_list_all.append(y_val) + if y_val != y_val_pre: + ### if y_val changed + ### append X_val and y_val to y_list/X_list + y_list_predict.append(y_val) + X_list_predict.append(X_val) + ### if already over y_bound -> stop + if y_val > y_bound and is_increasing: + break + if y_val < y_bound and not is_increasing: + break + ### sort value lists + sort_idx_all_arr = np.argsort(X_list_all) + X_list_all = (np.array(X_list_all)[sort_idx_all_arr]).tolist() + y_list_all = (np.array(y_list_all)[sort_idx_all_arr]).tolist() + sort_idx_predict_arr = np.argsort(X_list_predict) + X_list_predict = (np.array(X_list_predict)[sort_idx_predict_arr]).tolist() + y_list_predict = (np.array(y_list_predict)[sort_idx_predict_arr]).tolist() + + ### log + self.log("final predict lists:") + self.log("all:") + self.log(np.array([X_list_all, y_list_all]).T) + self.log("predict:") + self.log(np.array([X_list_predict, y_list_predict]).T) + + ### check if there is a discontinuity in y_all, starting with the first used value in y_predict + ### update all values with first predict value + first_y_used_in_predict = y_list_predict[0] + idx_first_y_in_all = y_list_all.index(first_y_used_in_predict) + y_list_all = y_list_all[idx_first_y_in_all:] + ### get discontinuity + discontinuity_idx_list = self.get_discontinuity_idx_list(y_list_all) + self.log("discontinuity_idx_list") + self.log(f"{discontinuity_idx_list}") + if len(discontinuity_idx_list) > 0 and not accept_non_dicontinuity: + ### there is a discontinuity + discontinuity_idx = discontinuity_idx_list[0] + ### only use values until discontinuity + y_bound_new = y_list_all[discontinuity_idx] + idx_best = y_list_predict.index(y_bound_new) + X_val_best = X_list_predict[idx_best] + y_val_best = y_list_predict[idx_best] + ### print warning + warning_txt = f"WARNING incremental_continuous_bound_search: found discontinuity, only reached y={y_bound_new} while searching for y_bound={y_bound}" + self._p_w(warning_txt) + ### log + self.log(warning_txt) + self.log( + f"discontinuities detected --> only use last values until first discontinuity: X={X_val_best}, y={y_val_best}" + ) + else: + ### there is no discontinuity + ### there can still be duplicates in the y_list --> remove them + ### get arrays + X_arr_predict = np.array(X_list_predict) + y_arr_predict = np.array(y_list_predict) + ### get unique indices + _, unique_indices = np.unique(y_arr_predict, return_index=True) + ### get arrays without duplicates in y_list + X_arr_predict = X_arr_predict[unique_indices] + y_arr_predict = y_arr_predict[unique_indices] + + ### now predict final X_val using y_arr + X_val = self.predict_1d( + X=y_arr_predict, y=X_arr_predict, X_pred=y_bound, linear=False + )[0] + y_val = y_X(X_val) + + ### append it to lists + X_list_predict.append(X_val) + y_list_predict.append(y_val) + + ### find best + idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) + X_val_best = X_list_predict[idx_best] + y_val_best = y_list_predict[idx_best] + + ### log + self.log(f"final values: X={X_val_best}, y={y_val_best}") + + ### warning for max iteration search + if not (n_it_first_round < n_it_max): + warning_txt = f"WARNING incremental_continuous_bound_search: reached max iterations to find X_bound to get y_bound={y_bound}, found X_bound causes y={y_val_best}" + self._p_w(warning_txt) + self.log(warning_txt) + + return X_val_best + + def get_discontinuity_idx_list(self, arr): + """ + Args: + arr: array-like + array for which its checked if there are discontinuities + """ + arr = np.array(arr) + range_data = arr.max() - arr.min() + diff_arr = np.diff(arr) + diff_rel_range_arr = diff_arr / range_data + diff_rel_range_abs_arr = np.absolute(diff_rel_range_arr) + peaks = find_peaks( + diff_rel_range_abs_arr, prominence=10 * np.mean(diff_rel_range_abs_arr) + ) + peaks_idx_list = peaks[0] + + return peaks_idx_list + + def predict_1d(self, X, y, X_pred, linear=True): + """ + Args: + X: array-like + X values + + y: array-like + y values, same size as X_values + + X_pred: array-like or number + X value(s) for which new y value(s) are predicted + + linear: bool, optional, default=True + if interpolation is linear + + return: + Y_pred_arr: array + predicted y values for X_pred + """ + if not linear: + if len(X) >= 4: + y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="cubic") + elif len(X) >= 3: + y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="quadratic") + else: + y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="linear") + y_pred_arr = y_X(X_pred) + return y_pred_arr.reshape(1) + + def get_rate_dict( + self, + net, + population_dict, + variable_init_sampler_dict, + monitor_dict, + I_app_dict, + g_ampa_dict, + g_gaba_dict, + ): + """ + function to obtain the firing rates of the populations of + the network given with 'idx' for given I_app, g_ampa and g_gaba values + + Args: + idx: int + network index given by the parallel_run function + + net: object + network object given by the parallel_run function + + net_many_dict: dict + dictionary containing a population_dict and a monitor_dict + which contain for each population name the + - ANNarchy Population object of the magic network + - ANNarchy Monitor object of the magic network + + I_app_arr_dict: dict of arrays + dictionary containing for each population the array with input values for I_app + + g_ampa_arr_dict: dict of arrays + dictionary containing for each population the array with input values for g_ampa + + g_gaba_arr_dict: dict of arrays + dictionary containing for each population the array with input values for g_gaba + + variable_init_sampler_dict: dict + dictionary containing for each population the initial variables sampler object + with the function.sample() to get initial values of the neurons + + self: object + the model_configurator object + + return: + f_rec_arr_dict: dict of arrays + dictionary containing for each population the array with the firing rates for the given inputs + """ + ### reset and set init values + net.reset() + for pop_name, varaible_init_sampler in variable_init_sampler_dict.items(): + population = net.get(population_dict[pop_name]) + variable_init_arr = varaible_init_sampler.sample(len(population), seed=0) + for var_idx, var_name in enumerate(population.variables): + set_val = variable_init_arr[:, var_idx] + setattr(population, var_name, set_val) + + ### slow down conductances (i.e. make them constant) + for pop_name in population_dict.keys(): + population = net.get(population_dict[pop_name]) + population.tau_ampa = 1e20 + population.tau_gaba = 1e20 + ### apply given variables + for pop_name in population_dict.keys(): + population = net.get(population_dict[pop_name]) + population.I_app = I_app_dict[pop_name] + population.g_ampa = g_ampa_dict[pop_name] + population.g_gaba = g_gaba_dict[pop_name] + ### simulate 500 ms initial duration + X ms + net.simulate(500 + self.simulation_dur) + ### get rate for the last X ms + f_arr_dict = {} + for pop_name in population_dict.keys(): + population = net.get(population_dict[pop_name]) + monitor = net.get(monitor_dict[pop_name]) + spike_dict = monitor.get("spike") + f_arr = np.zeros(len(population)) + for idx_n, n in enumerate(spike_dict.keys()): + time_list = np.array(spike_dict[n]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (self.simulation_dur / 1000) + f_arr[idx_n] = rate + f_arr_dict[pop_name] = f_arr + + return f_arr_dict + + def get_rate( + self, + net, + population, + variable_init_sampler, + monitor, + I_app=0, + g_ampa=0, + g_gaba=0, + ): + """ + simulates a population for X+500 ms and returns the firing rate of each neuron for the last X ms + X is defined with self.simulation_dur + + Args: + net: ANNarchy network + network which contains the population and monitor + + population: ANNarchy population + population which is recorded and stimulated + + variable_init_sampler: object + containing the initial values of the population neuron, use .sample() to get values + + monitor: ANNarchy monitor + to record spikes from population + + I_app: number or arr, optional, default = 0 + applied current to the population neurons, has to have the same size as the population + + g_ampa: number or arr, optional, default = 0 + applied ampa conductance to the population neurons, has to have the same size as the population + + g_gaba: number or arr, optional, default = 0 + applied gaba conductance to the population neurons, has to have the same size as the population + """ + ### reset and set init values + net.reset() + self.set_init_variables(population, variable_init_sampler) + ### slow down conductances (i.e. make them constant) + population.tau_ampa = 1e20 + population.tau_gaba = 1e20 + ### apply given variables + population.I_app = I_app + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate 500 ms initial duration + X ms + net.simulate(500 + self.simulation_dur) + ### get rate for the last X ms + spike_dict = monitor.get("spike") + f_arr = np.zeros(len(population)) + for idx_n, n in enumerate(spike_dict.keys()): + time_list = np.array(spike_dict[n]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (self.simulation_dur / 1000) + f_arr[idx_n] = rate + + return f_arr + + def get_ipsp( + self, + net: Network, + population: Population, + variable_init_sampler, + monitor, + I_app_hold, + g_ampa=0, + g_gaba=0, + do_plot=False, + ): + """ + simulates a single spike at t=50 ms and records the change of v within a voltage_clamp neuron + + Args: + net: ANNarchy network + network which contains the population and monitor + + population: ANNarchy population + population which is recorded and stimulated + + variable_init_sampler: object + containing the initial values of the population neuron, use .sample() to get values + + monitor: ANNarchy monitor + to record v_clamp_rec from population + + g_ampa: number, optional, default = 0 + applied ampa conductance to the population neuron at t=50 ms + + g_gaba: number, optional, default = 0 + applied gaba conductance to the population neurons at t=50 ms + """ + ### reset network and set initial values + net.reset() + self.set_init_variables(population, variable_init_sampler) + ### apply input + population.I_app = I_app_hold + ### simulate 50 ms initial duration + net.simulate(50) + ### apply given conductances --> changes v + v_rec_rest = population.v[0] + population.v_psp_thresh = v_rec_rest + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate until v is near v_rec_rest again + net.simulate_until(max_duration=self.simulation_dur, population=population) + ### get the psp = maximum of difference of v_rec and v_rec_rest + v_rec = monitor.get("v")[:, 0] + spike_dict = monitor.get("spike") + spike_timestep_list = spike_dict[0] + [net.get_current_step()] + end_timestep = int(round(min(spike_timestep_list), 0)) + psp = float( + np.absolute(np.clip(v_rec[:end_timestep] - v_rec_rest, None, 0)).max() + ) + + if do_plot: + plt.figure() + plt.title( + f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" + ) + plt.plot(v_rec) + plt.savefig( + f"tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" + ) + plt.close("all") + + return psp + + def compile_net_many(self, net): + compile_in_folder( + folder_name=f"many_net_{net.id}", net=net, clean=True, silent=True + ) + + def create_many_neuron_network(self): + """ + creates a ANNarchy magic network with all popualtions which should be configured the size + of the populations is equal and is obtianed by dividing the number of the + interpolation values by the number of networks which will be used during run_parallel + + return: + net_many_dict: dict + contains + - population_dict: for all population names the created population in the magic network + - monitor_dict: for all population names the created monitors in the magic network + """ + self.log("create many neurons network") + + ### for each population of the given model which should be configured + ### create a population with a given size + ### create a monitor recording spikes + ### create a network containing the population and the monitor + many_neuron_population_list = [] + many_neuron_monitor_list = [] + many_neuron_network_list = [] + for pop_name in self.pop_name_list: + ### create the neuron model with poisson spike trains + ### get the initial arguments of the neuron + neuron_model = self.neuron_model_dict[pop_name] + ### names of arguments + init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) + init_arguments_name_list.remove("self") + init_arguments_name_list.remove("name") + init_arguments_name_list.remove("description") + ### arguments dict + init_arguments_dict = { + init_arguments_name: getattr(neuron_model, init_arguments_name) + for init_arguments_name in init_arguments_name_list + } + ### get the afferent populations + afferent_population_list = [] + proj_target_type_list = [] + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_name = proj_dict["pre_pop_name"] + afferent_population_list.append(pre_pop_name) + proj_target_type_list.append(proj_dict["proj_target_type"]) + + ### for each afferent population create a binomial spike train equation string + ### add it to the equations + ### and add the related parameters to the parameters + + ### split the equations and parameters string + equations_line_split_list = str( + init_arguments_dict["equations"] + ).splitlines() + + parameters_line_split_list = str( + init_arguments_dict["parameters"] + ).splitlines() + + ### add the binomial spike train equations and parameters + ( + equations_line_split_list, + parameters_line_split_list, + ) = self.add_binomial_input( + equations_line_split_list, + parameters_line_split_list, + afferent_population_list, + proj_target_type_list, + ) + + ### combine string lines to multiline strings again + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + init_arguments_dict["equations"] = "\n".join(equations_line_split_list) + + ### create neuron model with new equations + neuron_model_new = Neuron(**init_arguments_dict) + + # print("new neuron model:") + # print(neuron_model_new) + + ### create the many neuron population + my_pop = Population( + geometry=self.nr_neurons_per_net, + neuron=neuron_model_new, + name=f"many_neuron_{pop_name}", + ) + + ### set the attributes of the neurons + for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: + setattr(my_pop, attr_name, attr_val) + + ### create Monitor for many neuron + my_mon = Monitor(my_pop, ["spike"]) + + ### create the network with population and monitor + my_net = Network() + my_net.add(my_pop) + my_net.add(my_mon) + + ### compile network + compile_in_folder(folder_name=f"many_neuron_{pop_name}", net=my_net) + + ### append the lists + many_neuron_network_list.append(my_net) + many_neuron_population_list.append(my_net.get(my_pop)) + many_neuron_monitor_list.append(my_net.get(my_mon)) + + net_many_dict = { + "network_list": many_neuron_network_list, + "population_list": many_neuron_population_list, + "monitor_list": many_neuron_monitor_list, + } + return net_many_dict + + def add_binomial_input( + self, + equations_line_split_list, + parameters_line_split_list, + afferent_population_list, + proj_target_type_list, + ): + ### loop over afferent populations to add the new equation lines and parameters + for pre_pop_name in afferent_population_list: + ### define the spike train of a pre population as a binomial process with number of trials = number of pre neurons and success probability = spike probability (taken from Poisson neurons) + ### the obtained value is the number of spikes at a time step times the weight + poisson_equation_str = f"{pre_pop_name}_spike_train = Binomial({pre_pop_name}_size, {pre_pop_name}_spike_prob)" + ### add the equation line + equations_line_split_list.insert(1, poisson_equation_str) + ### add the parameters + parameters_line_split_list.append(f"{pre_pop_name}_size = 0 : population") + parameters_line_split_list.append( + f"{pre_pop_name}_spike_prob = 0 : population" + ) + parameters_line_split_list.append(f"{pre_pop_name}_weight = 0 : population") + + ### change the g_ampa and g_gaba line, they additionally are the sum of the spike trains + for equation_line_idx, equation_line in enumerate(equations_line_split_list): + ### remove whitespaces + line = equation_line.replace(" ", "") + ### check if line contains g_ampa + if "dg_ampa/dt" in line: + ### get the right side of the equation + line_right = line.split("=")[1] + line_left = line.split("=")[0] + ### remove and store tags_str + tags_str = "" + if len(line_right.split(":")) > 1: + line_right, tags_str = line_right.split(":") + ### get the populations whose spike train should be appended in g_ampa + afferent_population_to_append_list = [] + for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): + if proj_target_type_list[pre_pop_idx] == "ampa": + afferent_population_to_append_list.append(pre_pop_name) + if len(afferent_population_to_append_list) > 0: + ### change right side, add the sum of the spike trains + line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" + ### add tags_str again + if tags_str != "": + line_right = f"{line_right}:{tags_str}" + ### combine line again and replace the list entry in equations_line_split_list + line = f"{line_left}={line_right}" + equations_line_split_list[equation_line_idx] = line + + ### check if line contains g_gaba + if "dg_gaba/dt" in line: + ### get the right side of the equation + line_right = line.split("=")[1] + line_left = line.split("=")[0] + ### remove and store tags_str + tags_str = "" + if len(line_right.split(":")) > 1: + line_right, tags_str = line_right.split(":") + ### get the populations whose spike train should be appended in g_ampa + afferent_population_to_append_list = [] + for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): + if proj_target_type_list[pre_pop_idx] == "gaba": + afferent_population_to_append_list.append(pre_pop_name) + if len(afferent_population_to_append_list) > 0: + ### change right side, add the sum of the spike trains + line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" + ### add tags_str again + if tags_str != "": + line_right = f"{line_right}:{tags_str}" + ### combine line again and replace the list entry in equations_line_split_list + line = f"{line_left}={line_right}" + equations_line_split_list[equation_line_idx] = line + + return (equations_line_split_list, parameters_line_split_list) + + def get_v_clamp_2000( + self, + net: Network, + population, + monitor=None, + v=None, + I_app=None, + variable_init_sampler=None, + pre_pop_name_list=[], + eff_size_list=[], + rate_list=[], + weight_list=[], + return_1000=False, + ): + """ + the returned values is dv/dt + --> to get the hypothetical change of v for a single time step multiply with dt! + """ + ### reset network and set initial values + net.reset() + net.set_seed(0) + if not isinstance(variable_init_sampler, type(None)): + self.set_init_variables(population, variable_init_sampler) + ### set v and I_app + if not isinstance(v, type(None)): + population.v = v + if not isinstance(I_app, type(None)): + population.I_app = I_app + ### set the weights and rates of the binomial spike trains of the afferent populations + for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): + setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) + setattr( + population, + f"{pre_pop_name}_spike_prob", + (rate_list[pre_pop_idx] / 1000) * dt(), + ) + setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) + ### simulate 2000 ms + net.simulate(2000) + + if return_1000: + v_clamp_rec_arr = monitor.get("v_clamp_rec_sign")[:, 0] + return np.mean(v_clamp_rec_arr[-int(round(1000 / dt(), 0)) :]) + return population.v_clamp_rec[0] + + def get_voltage_clamp_equations(self, init_arguments_dict, pop_name): + """ + works with + dv/dt = ... + v += ... + """ + ### get the dv/dt equation from equations + ### find the line with dv/dt= or v+= or v= + eq = str(init_arguments_dict["equations"]) + eq = eq.splitlines() + line_is_v_list = [False] * len(eq) + ### check in which lines v is defined + for line_idx, line in enumerate(eq): + line_is_v_list[line_idx] = self.get_line_is_v(line) + ### raise error if no v or multiple times v + if True not in line_is_v_list or sum(line_is_v_list) > 1: + raise ValueError( + f"ERROR model_configurator create_net_single_voltage_clamp: In the equations of the neurons has to be exactly a single line which defines dv/dt or v, not given for population {pop_name}" + ) + ### set the v equation + eq_v = eq[line_is_v_list.index(True)] + + ### if equation type is v += ... --> just take right side + if "+=" in eq_v: + ### create the new equations for the ANNarchy neuron + ### create two lines, the voltage clamp line v+=0 and the + ### right sight of v+=... separately + eq_new_0 = f"v_clamp_rec_sign = {eq_v.split('+=')[1]}" + eq_new_1 = f"v_clamp_rec = fabs({eq_v.split('+=')[1]})" + eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" + eq_new_3 = "v+=0" + ### remove old v line and insert new lines + del eq[line_is_v_list.index(True)] + eq.insert(line_is_v_list.index(True), eq_new_0) + eq.insert(line_is_v_list.index(True), eq_new_1) + eq.insert(line_is_v_list.index(True), eq_new_2) + eq.insert(line_is_v_list.index(True), eq_new_3) + eq = "\n".join(eq) + ### return new neuron equations + return eq + + ### if equation type is dv/dt = ... --> get the right side of dv/dt=... + ### transform eq_v + ### remove whitespaces + ### remove tags and store them for later + ### TODO replace random distributions and mathematical expressions which may be on the left side + eq_v = eq_v.replace(" ", "") + eq_v = eq_v.replace("dv/dt", "delta_v") + eq_tags_list = eq_v.split(":") + eq_v = eq_tags_list[0] + if len(eq_tags_list) > 1: + tags = eq_tags_list[1] + else: + tags = None + + ### split the equation at "=" and move everything on one side (other side = 0) + eq_v_splitted = eq_v.split("=") + left_side = eq_v_splitted[0] + right_side = "right_side" + eq_v_one_side = f"{right_side}-({left_side})" + + ### prepare the sympy equation generation + attributes_name_list = self.neuron_model_attributes_dict[pop_name] + attributes_tuple = symbols(",".join(attributes_name_list)) + ### for each attribute of the neuron a sympy symbol + attributes_sympy_dict = { + key: attributes_tuple[attributes_name_list.index(key)] + for key in attributes_name_list + } + ### furhter create symbols for delta_v and right_side + attributes_sympy_dict["delta_v"] = Symbol("delta_v") + attributes_sympy_dict["right_side"] = Symbol("right_side") + + ### get the sympy equation expression by evaluating the string + eq_sympy = evaluate_expression_with_dict( + expression=eq_v_one_side, value_dict=attributes_sympy_dict + ) + + ### solve the equation to delta_v + result = solve(eq_sympy, attributes_sympy_dict["delta_v"], dict=True) + if len(result) != 1: + raise ValueError( + f"ERROR model_configurator create_net_single_voltage_clamp: Could not find solution for dv/dt for neuronmodel of population {pop_name}!" + ) + result = str(result[0][attributes_sympy_dict["delta_v"]]) + + ### replace right_side by the actual right side + result = result.replace("right_side", f"({eq_v_splitted[1]})") + + ### TODO replace mathematical expressions and random distributions back to previous + + ### now create the new equations for the ANNarchy neuron + ### create three lines, the voltage clamp line "dv/dt=0", + ### the obtained line which would be the right side of dv/dt, + ### and this right side sotred from the previous time step + ### v_clamp_rec should be an absolute value + eq_new_0 = f"v_clamp_rec_sign = {result}" + eq_new_1 = f"v_clamp_rec = fabs({result})" + eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" + ### add stored tags to new dv/dt equation + if not isinstance(tags, type(None)): + eq_new_3 = f"dv/dt=0 : {tags}" + else: + eq_new_3 = "dv/dt=0" + ### remove old v line and insert new three lines + del eq[line_is_v_list.index(True)] + eq.insert(line_is_v_list.index(True), eq_new_0) + eq.insert(line_is_v_list.index(True), eq_new_1) + eq.insert(line_is_v_list.index(True), eq_new_2) + eq.insert(line_is_v_list.index(True), eq_new_3) + eq = "\n".join(eq) + ### return new neuron equations + return eq + + def get_line_is_v(self, line: str): + """ + check if a equation string contains dv/dt or v= or v+= + """ + if "v" not in line: + return False + + ### remove whitespaces + line = line.replace(" ", "") + + ### check for dv/dt + if "dv/dt" in line: + return True + + ### check for v update + if ("v=" in line or "v+=" in line) and line.startswith("v"): + return True + + return False + + def get_line_is_g_ampa(self, line: str): + """ + check if a equation string contains dg_ampa/dt + """ + + ### remove whitespaces + line = line.replace(" ", "") + + ### check for dv/dt + if "dv/dt" in line: + return True + + ### check for v update + if ("v=" in line or "v+=" in line) and line.startswith("v"): + return True + + return False + + def get_init_neuron_variables_for_psp(self, net, pop, v_rest, I_app_hold): + """ + get the variables of the given population after simulating 2000 ms + + Args: + net: ANNarchy network + the network which contains the pop + + pop: ANNarchy population + the population whose variables are obtained + + """ + ### reset neuron and deactivate input and set v_rest + net.reset() + pop.v = v_rest + pop.I_app = I_app_hold + + ### get the variables of the neuron after 5000 ms + net.simulate(5000) + var_name_list = list(pop.variables) + var_arr = np.zeros((1, len(var_name_list))) + get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) + var_arr[0, :] = get_arr[:, 0] + + ### create a sampler with the one data sample + sampler = self.var_arr_sampler(var_arr, var_name_list) + return sampler + + class var_arr_sampler: + def __init__(self, var_arr, var_name_list) -> None: + self.var_arr_shape = var_arr.shape + self.is_const = ( + np.std(var_arr, axis=0) <= np.mean(np.absolute(var_arr), axis=0) / 1000 + ) + self.constant_arr = var_arr[0, self.is_const] + self.not_constant_val_arr = var_arr[:, np.logical_not(self.is_const)] + self.var_name_list = var_name_list + + def sample(self, n=1, seed=0): + """ + Args: + n: int, optional, default=1 + number of samples + + seed: int, optional, default=0 + seed for rng + """ + ### get random idx + rng = np.random.default_rng(seed=seed) + random_idx_arr = rng.integers(low=0, high=self.var_arr_shape[0], size=n) + ### sample with random idx + sample_arr = self.not_constant_val_arr[random_idx_arr] + ### create return array + ret_arr = np.zeros((n,) + self.var_arr_shape[1:]) + ### add samples to return array + ret_arr[:, np.logical_not(self.is_const)] = sample_arr + ### add constant values to return array + ret_arr[:, self.is_const] = self.constant_arr + + return ret_arr + + def get_nr_many_neurons(self, nr_neurons, nr_networks): + """ + Splits the number of neurons in almost equally sized parts. + + Args: + nr_neurons: int + number of neurons which should be splitted + + nr_networks: int + number of networks over which the neurons should be equally distributed + """ + return self.divide_almost_equal(number=nr_neurons, num_parts=nr_networks) + + def get_max_weight_dict_for_pop(self, pop_name): + """ + get the weight dict for a single population + + Args: + pop_name: str + population name + + return: dict + keys = afferent projection names, values = max weights + """ + + ### loop over afferent projections + max_w_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + ### find max weight for projection + max_weight_of_proj = self.get_max_weight_of_proj(proj_name=proj_name) + max_w_list.append(max_weight_of_proj) + self.afferent_projection_dict[pop_name]["max_weight"] = max_w_list + + ### remove weight key from self.afferent_projection_dict[pop_name] which was added during the process + self.afferent_projection_dict[pop_name].pop("weights") + + ### now create the dictionary structure for return + # { + # "ampa": {"projection_name": "max_weight value"...}, + # "gaba": {"projection_name": "max_weight value"...}, + # } + max_weight_dict_for_pop = {"ampa": {}, "gaba": {}} + ### loop over all afferent projections + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + proj_target_type = proj_dict["proj_target_type"] + proj_max_weight = proj_dict["proj_max_weight"] + ### add max weight of projection to the corresponding target type in the return dict + max_weight_dict_for_pop[proj_target_type][proj_name] = proj_max_weight + + return max_weight_dict_for_pop + + def get_proj_dict(self, proj_name): + """ + get a dictionary for a specified projection which contains following information: + post_pop_name + proj_target_type + idx_proj + spike_frequency + proj_weight + g_max + + Args: + proj_name: str + projection name + + return: + proj_dict: dict + keys see above + """ + ### get pre_pop_name + pre_pop_name = self.pre_pop_name_dict[proj_name] + ### get pre_pop_name + pre_pop_size = self.pre_pop_size_dict[proj_name] + ### get post_pop_name + post_pop_name = self.post_pop_name_dict[proj_name] + ### get idx_proj and proj_target_type + idx_proj = self.afferent_projection_dict[post_pop_name][ + "projection_names" + ].index(proj_name) + proj_target_type = self.afferent_projection_dict[post_pop_name]["target"][ + idx_proj + ] + ### get spike frequency + f_t = self.afferent_projection_dict[post_pop_name]["target firing rate"][ + idx_proj + ] + p = self.afferent_projection_dict[post_pop_name]["probability"][idx_proj] + s = self.afferent_projection_dict[post_pop_name]["size"][idx_proj] + spike_frequency = f_t * p * s + ### get weight + try: + proj_weight = self.afferent_projection_dict[post_pop_name]["weights"][ + idx_proj + ] + except: + proj_weight = None + ### g_max + try: + g_max = self.g_max_dict[post_pop_name][proj_target_type] + except: + g_max = None + ### get max weight + try: + proj_max_weight = self.afferent_projection_dict[post_pop_name][ + "max_weight" + ][idx_proj] + except: + proj_max_weight = None + + return { + "pre_pop_name": pre_pop_name, + "pre_pop_size": pre_pop_size, + "post_pop_name": post_pop_name, + "proj_target_type": proj_target_type, + "idx_proj": idx_proj, + "spike_frequency": spike_frequency, + "proj_weight": proj_weight, + "g_max": g_max, + "proj_max_weight": proj_max_weight, + "proj_prob": p, + } + + def get_max_weight_of_proj(self, proj_name): + """ + find the max weight of a specified projection using incremental_continuous_bound_search + increasing weights of projection increases conductance g of projection --> increase + until g_max is found + + Args: + proj_name: str + projection name + + return: + w_max: number + """ + ### log task + self.log(f"get w_max for {proj_name}") + + ### g_max for projection + proj_dict = self.get_proj_dict(proj_name) + g_max = proj_dict["g_max"] + + ### find max weight with incremental_continuous_bound_search + ### increase weights until g_max is reached + self.log("search w_max with y(X) = g(w=X)") + w_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_g_of_single_proj( + weight=X_val, + proj_name=proj_name, + ), + y_bound=g_max, + X_0=0, + y_0=0, + ) + + return w_max + + def get_g_of_single_proj(self, weight, proj_name): + """ + given a weight for a specified projection get the resulting conductance value g + in the target population + + Args: + weight: number + the weight of the projection + + proj_name: str + projection name + + return: + g_val: number + """ + ### get some projection infos + proj_dict = self.get_proj_dict(proj_name) + pop_name = proj_dict["post_pop_name"] + idx_proj = proj_dict["idx_proj"] + proj_target_type = proj_dict["proj_target_type"] + + ### set weights in the afferent_projection_dict + ### set all weights to zero except the weight of the current proj which is set to the given weight + weight_list = [0] * self.nr_afferent_proj_dict[pop_name] + weight_list[idx_proj] = weight + self.afferent_projection_dict[pop_name]["weights"] = weight_list + + ### get the g_ampa and g_gaba values based on the current afferent_projection_dict weights + mean_g = self.get_g_values_of_pop(pop_name) + + ### then return the conductance related to the specified projection + return mean_g[proj_target_type] + + def get_g_values_of_pop(self, pop_name): + """ + calculate the average g_ampa and g_gaba values of the specified population based on the weights + defined in the afferent_projection_dict + + Args: + pop_name: str + population name + """ + spike_times_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} + spike_weights_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} + ### loop over afferent projections + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + ### get projection infos + proj_dict = self.get_proj_dict(proj_name) + proj_weight = proj_dict["proj_weight"] + proj_target_type = proj_dict["proj_target_type"] + spike_frequency = proj_dict["spike_frequency"] + ### get spike times over the simulation duration for the spike frequency + if spike_frequency > 0: + spike_times_arr = self.get_spike_times_arr( + spike_frequency=spike_frequency + ) + else: + spike_times_arr = np.array([]) + ### get weights array + spike_weights_arr = np.ones(len(spike_times_arr)) * proj_weight + ### store spike times and weights for the target type of the projection + spike_times_dict[proj_target_type].append(spike_times_arr) + spike_weights_dict[proj_target_type].append(spike_weights_arr) + + mean_g = {} + for target_type in ["ampa", "gaba"]: + ### concatenate spike times and corresponding weights of different afferent projections + spike_times_arr = np.concatenate(spike_times_dict[target_type]) + spike_weights_arr = np.concatenate(spike_weights_dict[target_type]) + + ### sort the spike times and corresponding weights + sort_idx = np.argsort(spike_times_arr) + spike_times_arr = spike_times_arr[sort_idx] + spike_weights_arr = spike_weights_arr[sort_idx] + + ### calculate mean g values from the spike times and corresponding weights + mean_g[target_type] = self.get_mean_g( + spike_times_arr=spike_times_arr, + spike_weights_arr=spike_weights_arr, + tau=self.tau_dict[pop_name][target_type], + ) + + return mean_g + + def get_spike_times_arr(self, spike_frequency): + """ + get spike times for a given spike frequency + + Args: + spike_frequency: number + spike frequency in Hz + """ + expected_nr_spikes = int( + round((500 + self.simulation_dur) * (spike_frequency / 1000), 0) + ) + ### isi_arr in timesteps + isi_arr = poisson.rvs( + (1 / (spike_frequency * (dt() / 1000))), size=expected_nr_spikes + ) + ### convert to ms + isi_arr = isi_arr * dt() + + ### get spike times from isi_arr + spike_times_arr = np.cumsum(isi_arr) + + ### only use spikes which are in the simulation time + spike_times_arr = spike_times_arr[spike_times_arr < (self.simulation_dur + 500)] + + return spike_times_arr + + def get_mean_g(self, spike_times_arr, spike_weights_arr, tau): + """ + calculates the mean conductance g for given spike times, corresponding weights (increases of g) and time constant + + Args: + spike_times_arr: arr + 1d array containing spike times in ms + + spike_weights_arr: arr + 1d array containing the weights corresponding to the spike times + + tau: number + time constant of the exponential decay of the conductance g in ms + """ + ### TODO instead of calculating the mean, create a conductance trace for the simulation time + if np.sum(spike_weights_arr) > 0: + ### get inter spike interval array + isis_g_arr = np.diff(spike_times_arr) + ### calc mean g + mean_w = np.mean(spike_weights_arr) + mean_isi = np.mean(isis_g_arr) + mean_g = mean_w / ((1 / np.exp(-mean_isi / tau)) - 1) + else: + mean_g = 0 + + return mean_g + + +def get_rate_parallel( + idx, + net, + population: Population, + variable_init_sampler, + monitor: Monitor, + I_app_arr, + weight_list: list, + pre_pop_name_list: list, + rate_list: list, + eff_size_list: list, + simulation_dur: int, +): + """ + function to obtain the firing rates of the populations of + the network given with 'idx' for given I_app, g_ampa and g_gaba values + + Args: + idx: int + network index given by the parallel_run function + + net: object + network object given by the parallel_run function + + pop_name_list: list of str + list with population names of network + + population_list: list of ANNarchy Population object + list of population objets of magic network + + variable_init_sampler_list: list of sampler objects + for each population a sampler object with function .sample to get initial variable values + + monitor_list: list of ANNarchy Monitor objects + list of monitor objets of magic network recording spikes from the populations + + I_app_list: list of arrays + list containing for each population the array with input values for I_app + + g_ampa_list: list of arrays + list containing for each population the array with input values for g_ampa + + g_gaba_list: list of arrays + list containing for each population the array with input values for g_gaba + + simulation_dur: int + simulation duration + + return: + f_rec_arr_list: list of arrays + list containing for each population the array with the firing rates for the given inputs + """ + ### reset and set init values + net.reset() + ### sample init values, one could sample different values for multiple neurons + ### but here we sample a single sample and use it for all neurons + variable_init_arr = variable_init_sampler.sample(1, seed=0) + var_name_list = variable_init_sampler.var_name_list + variable_init_arr = np.array([variable_init_arr[0]] * len(population)) + for var_name in enumerate(population.variables): + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) + + ### set the weights and rates of the poisson spike traces of the afferent populations + for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): + setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) + setattr( + population, + f"{pre_pop_name}_spike_prob", + (rate_list[pre_pop_idx] / 1000) * dt(), + ) + setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) + + ### set the I_app + population.I_app = I_app_arr + + ### simulate 500 ms initial duration + X ms + if "stn" in population.name and False: + net.simulate(500) + time_arr = np.arange(500, 500 + simulation_dur, dt()) + cor_spike_train_list = [] + gpe_spike_train_list = [] + g_ampa_list = [] + g_gaba_list = [] + I_app_list = [] + for time_ms in time_arr: + net.simulate(dt()) + if "cor_spike_train" in population.attributes: + cor_spike_train_list.append(population.cor_spike_train[0]) + else: + cor_spike_train_list.append(0) + if "gpe_spike_train" in population.attributes: + gpe_spike_train_list.append(population.gpe_spike_train[0]) + else: + gpe_spike_train_list.append(0) + g_ampa_list.append(population.g_ampa[0]) + g_gaba_list.append(population.g_gaba[0]) + I_app_list.append(population.I_app[0]) + plt.figure(figsize=(6.4, 4.8 * 2)) + plt.subplot(211) + plt.ylabel("g_ampa") + plt.plot(time_arr, g_ampa_list, "k.") + plt.subplot(212) + plt.ylabel("g_gaba") + plt.plot(time_arr, g_gaba_list, "k.") + plt.tight_layout() + plt.savefig("stn_input_configurator.png", dpi=300) + plt.close("all") + else: + net.simulate(500 + simulation_dur) + + ### get rate for the last X ms + spike_dict = monitor.get("spike") + f_arr = np.zeros(len(population)) + for idx_n, n in enumerate(spike_dict.keys()): + time_list = np.array(spike_dict[n]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (simulation_dur / 1000) + f_arr[idx_n] = rate + return f_arr + + +_p_g_1 = """First call get_max_syn. +This determines max synaptic conductances and weights of all afferent projections of the model populations and returns a dictionary with max weights.""" + +_p_g_after_get_weights = ( + lambda template_weight_dict, template_synaptic_load_dict, template_synaptic_contribution_dict: f"""Now either set the weights of all projections directly or first set the synaptic load of the populations and the synaptic contributions of the afferent projections. +You can set the weights using the function .set_weights() which requires a weight_dict as argument. +Use this template for the weight_dict: + +{template_weight_dict} + +The values within the template are the maximum weight values. + + +You can set the synaptic load and contribution using the function .set_syn_load() which requires a synaptic_load_dict or a single number between 0 and 1 for the synaptic load of the populations and a synaptic_contribution_dict for the synaptic contributions to the synaptic load of the afferent projections. +Use this template for the synaptic_load_dict: + +{template_synaptic_load_dict} + +'ampa_load' and 'gaba_load' are placeholders, replace them with values between 0 and 1. + +Use this template for the synaptic_contribution_dict: + +{template_synaptic_contribution_dict} + +The shown contributions of the afferent projections are based on the assumption that the maximum weights are used. The contributions of all afferent projections of a single population have to sum up to 1! +""" +) + +_p_g_after_set_syn_load = """Synaptic loads and contributions, i.e. weights set. Now call .get_base to obtain the baseline currents for the model populations. With .set_base you can directly set these baselines and the current weights in the model and compile the model. +""" diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 83a6a91..299f2c2 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -20,7 +20,7 @@ PlotRecordings, ) from CompNeuroPy.examples.model_configurator.model_configurator_cnp import ( - model_configurator, + ModelConfigurator, ) import matplotlib.pyplot as plt import numpy as np @@ -360,13 +360,12 @@ def BGM_part_function(params): ### given rates, if not, maybe print warning ### initialize model_configurator - model_conf = model_configurator( + model_conf = ModelConfigurator( model, target_firing_rate_dict, do_not_config_list=do_not_config_list, print_guide=True, I_app_variable="I_app", - interpolation_grid_points=36, ) ### obtain the maximum synaptic loads for the populations and the From 5b6b27f975d4b7fbb3589501b323132bde514c70 Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 5 Jun 2024 16:18:36 +0200 Subject: [PATCH 30/39] model configurator: continued restructuring --- .../model_configurator_cnp.py | 1420 ++++++++++++++++- 1 file changed, 1403 insertions(+), 17 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index df2b2a9..38842c2 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -1,5 +1,6 @@ from CompNeuroPy.generate_model import CompNeuroModel from CompNeuroPy import model_functions as mf +from CompNeuroPy import extra_functions as ef from ANNarchy import ( Population, @@ -19,6 +20,8 @@ get_current_step, projections, populations, + Binomial, + CurrentInjection, ) from ANNarchy.core import ConnectorMethods @@ -43,6 +46,29 @@ from scipy.stats import poisson from ANNarchy.extensions.bold import BoldMonitor from sklearn.linear_model import LinearRegression +import sympy as sp + + +class Logger: + def __init__(self, log_file: str): + self.log_file = log_file + self.caller_name = "" + with open(log_file, "w") as f: + print("Logger file:", file=f) + + def log(self, txt): + caller_frame = inspect.currentframe().f_back + caller_name = caller_frame.f_code.co_name + + if caller_name == self.caller_name: + txt = f"{textwrap.indent(str(txt), ' ')}" + else: + txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" + + self.caller_name = caller_name + + with open(self.log_file, "a") as f: + print(txt, file=f) class ModelConfigurator: @@ -54,11 +80,45 @@ def __init__( do_not_config_list: list[str] = [], print_guide: bool = False, I_app_variable: str = "I_app", + cache: bool = False, + clear_cache: bool = False, + log_file: str | None = None, ): + ### create logger + self._logger = Logger(log_file=log_file) + ### analyze the given model, create model before analyzing, then clear ANNarchy self._analyze_model = AnalyzeModel(model=model) - self._single_neuron_networks = CreateSingleNeuronNetworks() - self._reduced_model = CreateReducedModel() - self._v_rest = GetVRest() + ### create the CompNeuroModel object for the reduced model (the model itself is + ### not created yet) + self._model_reduced = CreateReducedModel( + model=model, + analyze_model=self._analyze_model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, + ) + ### create the single neuron networks (networks are compiled and ready to be + ### simulated), normal model for searching for max conductances, max input + ### current, resting firing rate; voltage clamp model for preparing the PSP + ### simulationssearching, i.e., for resting potential and corresponding input + ### current I_hold (for self-active neurons) + self._single_nets = CreateSingleNeuronNetworks( + model=model, + analyze_model=self._analyze_model, + do_not_config_list=do_not_config_list, + ) + ### define the simulator with all simulations with the single neuron networks + self._simulator = Simulator(single_nets=self._single_nets) + ### get the resting potential and corresponding I_hold for each population using + ### the voltage clamp networks + self._v_rest = PreparePSP( + simulator=self._simulator, + model=model, + single_nets=self._single_nets, + do_not_config_list=do_not_config_list, + logger=self._logger, + ) self._max_syn = GetMaxSyn() self._weight_templates = GetWeightTemplates() @@ -85,7 +145,7 @@ class AnalyzeModel: def __init__(self, model: CompNeuroModel): ### clear ANNarchy and create the model - self._clear_model(model=model) + self._clear_model(model=model, do_create=True) ### get population info (eq, params etc.) self._analyze_populations(model=model) @@ -93,9 +153,12 @@ def __init__(self, model: CompNeuroModel): ### get projection info self._analyze_projections(model=model) - def _clear_model(self, model: CompNeuroModel): + ### clear ANNarchy + self._clear_model(model=model, do_create=False) + + def _clear_model(self, model: CompNeuroModel, do_create: bool = True): mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) - model.create(do_compile=False) + model.create(do_create=do_create, do_compile=False) def _analyze_populations(self, model: CompNeuroModel): """ @@ -137,10 +200,10 @@ def _analyze_populations(self, model: CompNeuroModel): for param in init_params if param != "self" and param != "storage_order" and param != "copied" } - ### get the afferent projections dict of the population TODO - self.afferent_projection_dict[pop_name] = ( - self._get_afferent_projection_dict(pop_name=pop_name) - ) + ### get the afferent projections dict of the population TODO do we still need this? + # self.afferent_projection_dict[pop_name] = ( + # self._get_afferent_projection_dict(pop_name=pop_name) + # ) def _analyze_projections(self, model: CompNeuroModel): """ @@ -316,18 +379,1081 @@ def _get_connector_parameters(self, proj: Projection): class CreateSingleNeuronNetworks: - def __init__(self): - pass + """ + Class to create single neuron networks for normal and voltage clamp mode. + + Attributes: + single_net_dict (dict): + Nested dict containing the single neuron networks for normal and voltage + clamp mode + keys: mode (str) + normal or v_clamp + values: dict + keys: pop_name (str) + population name + values: dict + keys: net, population, monitor, init_sampler + values: Network, Population, Monitor, ArrSampler + """ + + def __init__( + self, + model: CompNeuroModel, + analyze_model: AnalyzeModel, + do_not_config_list: list[str], + ): + """ + Args: + model (CompNeuroModel): + Model to be analyzed + analyze_model (AnalyzeModel): + Analyzed model + do_not_config_list (list[str]): + List of population names which should not be configured + """ + self._single_net_dict = {} + ### create the single neuron networks for normal and voltage clamp mode + for mode in ["normal", "v_clamp"]: + self._single_net_dict[mode] = {} + self.create_single_neuron_networks( + model=model, + analyze_model=analyze_model, + do_not_config_list=do_not_config_list, + mode=mode, + ) + + def single_net_dict(self, pop_name: str, mode: str): + """ + Get the single neuron network dict for the given population and mode. + + Args: + pop_name (str): + Name of the population + mode (str): + Mode for which the network should be returned + + Returns: + net_single_dict (dict): + Dict containing the Network, Population, Monitor and ArrSampler objects + """ + return self.ReturnSingleNeuronNetworks(self._single_net_dict[mode][pop_name]) + + class ReturnSingleNeuronNetworks: + def __init__(self, single_net_dict): + self.net: Network = single_net_dict["net"] + self.population: Population = single_net_dict["population"] + self.monitor: Monitor = single_net_dict["monitor"] + self.init_sampler: ArrSampler = single_net_dict["init_sampler"] + + def create_single_neuron_networks( + self, + model: CompNeuroModel, + analyze_model: AnalyzeModel, + do_not_config_list: list[str], + mode: str, + ): + """ + Create the single neuron networks for the given mode. Sets the single_net_dict. + + Args: + model (CompNeuroModel): + Model to be analyzed + analyze_model (AnalyzeModel): + Analyzed model + do_not_config_list (list[str]): + List of population names which should not be configured + mode (str): + Mode for which the single neuron networks should be created + """ + + ### loop over populations which should be configured + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + ### store the dict containing the network etc + self._single_net_dict[mode][pop_name] = self.create_net_single( + pop_name=pop_name, analyze_model=analyze_model, mode=mode + ) + + def create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: str): + """ + Creates a network with the neuron type of the population given by pop_name for + the given mode. The population size is set to 1. + + Args: + pop_name (str): + Name of the population + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the network should be created + + Returns: + net_single_dict (dict): + Dict containing the Network, Population, Monitor and ArrSampler objects + """ + ### create the adjusted neuron model for the stop condition + neuron_model_new = self.get_single_neuron_neuron_model( + pop_name=pop_name, analyze_model=analyze_model, mode=mode + ) + + ### create the single neuron population + pop_single_neuron = self.get_single_neuron_population( + pop_name=pop_name, + neuron_model_new=neuron_model_new, + analyze_model=analyze_model, + mode=mode, + ) + + ### create Monitor for single neuron + if mode == "normal": + mon_single = Monitor(pop_single_neuron, ["spike", "v"]) + elif mode == "v_clamp": + mon_single = Monitor(pop_single_neuron, ["v_clamp_rec_sign"]) + + ### create network with single neuron and compile it + net_single = Network() + net_single.add([pop_single_neuron, mon_single]) + mf.compile_in_folder( + folder_name=f"single_net_{mode}_{pop_name}", silent=True, net=net_single + ) + + ### network dict + net_single_dict = { + "net": net_single, + "population": net_single.get(pop_single_neuron), + "monitor": net_single.get(mon_single), + "init_sampler": None, + } + + ### for v_clamp we are done here + if mode == "v_clamp": + return net_single_dict + + ### for normal neuron get the init sampler for the variables of the neuron model + ### (to initialize a population of the neuron model) + init_sampler = self.get_neuron_model_init_sampler( + net=net_single, pop=net_single.get(pop_single_neuron) + ) + net_single_dict["init_sampler"] = init_sampler + + return net_single_dict + + def get_single_neuron_neuron_model( + self, pop_name: str, analyze_model: AnalyzeModel, mode=str + ): + """ + Create the adjusted neuron model for the given mode. + + Args: + pop_name (str): + Name of the population + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the neuron model should be created + + Returns: + neuron_model_new (Neuron): + Adjusted neuron model + """ + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = ( + analyze_model.neuron_model_init_parameter_dict[pop_name].copy() + ) + ### Define the attributes of the neuron model as sympy symbols + neuron_model_attributes_name_list = list( + analyze_model.neuron_model_attr_dict[pop_name].keys() + ) + ### add v_before_psp and v_psp_thresh to equations/parameters, for the stop + ### condition below + self.adjust_neuron_model( + neuron_model_init_parameter_dict, + neuron_model_attributes_name_list, + mode=mode, + ) + ### create the adjusted neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + return neuron_model_new + + def get_single_neuron_population( + self, + pop_name: str, + neuron_model_new: Neuron, + analyze_model: AnalyzeModel, + mode: str, + ): + """ + Create the single neuron population for the given mode. + + Args: + pop_name (str): + Name of the population + neuron_model_new (Neuron): + Adjusted neuron model + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the population should be created + + Returns: + pop_single_neuron (Population): + Single neuron population + """ + if mode == "normal": + pop_single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_{pop_name}", + stop_condition="((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", + ) + elif mode == "v_clamp": + ### create the single neuron population + pop_single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_v_clamp_{pop_name}", + ) + + ### get the stored parameters and variables + neuron_model_attr_dict = analyze_model.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_single_neuron, attr_name, attr_val) + return pop_single_neuron + + def get_neuron_model_init_sampler(self, net: Network, pop: Population): + """ + Create a sampler for the initial values of the variables of the neuron model by + simulating the neuron for 10000 ms and afterwards simulating 2000 ms and + sampling the variables every dt. + + Args: + net (Network): + Network with the single neuron population + pop (Population): + Single neuron population + + Returns: + sampler (ArrSampler): + Sampler for the initial values of the variables of the neuron model + """ + + ### reset network and deactivate input + net.reset() + pop.I_app = 0 + + ### 10000 ms init duration + net.simulate(10000) + + ### simulate 2000 ms and check every dt the variables of the neuron + time_steps = int(2000 / dt()) + var_name_list = list(pop.variables) + var_arr = np.zeros((time_steps, len(var_name_list))) + for time_idx in range(time_steps): + net.simulate(dt()) + get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) + var_arr[time_idx, :] = get_arr[:, 0] + + ### reset network after simulation + net.reset() + + ### create a sampler with the data samples of from the 1000 ms simulation + sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) + return sampler + + def adjust_neuron_model( + self, + neuron_model_init_parameter_dict: dict, + neuron_model_attributes_name_list: list[str], + mode: str, + ): + """ + Adjust the parameters and equations of the neuron model for the given mode. + + Args: + neuron_model_init_parameter_dict (dict): + Dict with the parameters and equations of the neuron model + neuron_model_attributes_name_list (list[str]): + List of the names of the attributes of the neuron model + mode (str): + Mode for which the neuron model should be adjusted + """ + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### get the parameters of the neuron model as a list of strings + parameters_line_split_list = str( + neuron_model_init_parameter_dict["parameters"] + ).splitlines() + + if mode == "normal": + ### add v_before_psp=v at the beginning of the equations + equations_line_split_list.insert(0, "v_before_psp = v") + ### add v_psp_thresh to the parameters + parameters_line_split_list.append("v_psp_thresh = 0 : population") + elif mode == "v_clamp": + ### get new equations for voltage clamp + equations_new_list = CreateVoltageClampEquations( + equations_line_split_list, neuron_model_attributes_name_list + ).eq_new + neuron_model_init_parameter_dict["equations"] = equations_new_list + ### add v_clamp_rec_thresh to the parameters + parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") + + ### join equations and parameters to a string and store them in the dict + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) + neuron_model_init_parameter_dict["parameters"] = "\n".join( + parameters_line_split_list + ) + + +class Simulator: + def __init__(self, single_nets: CreateSingleNeuronNetworks): + self.single_nets = single_nets + + def get_v_clamp_2000( + self, + pop_name: str, + v: float | None = None, + I_app: float | None = None, + ) -> float: + """ + Simulates the v_clamp single neuron network of the given pop_name for 2000 ms + and returns the v_clamp_rec value of the single neuron after 2000 ms. The + returned values is "dv/dt". Therefore, to get the hypothetical change of v for a + single time step multiply it with dt! + + Args: + pop_name (str): + Name of the population + v (float): + Membrane potential (does not change over time due to voltage clamp) + I_app (float): + Applied current + + Returns: + v_clamp_rec (float): + v_clamp_rec value of the single neuron after 2000 ms + """ + ### get the network, population, init_sampler + net = self.single_nets.single_net_dict(pop_name=pop_name, mode="v_clamp").net + population = self.single_nets.single_net_dict( + pop_name=pop_name, mode="v_clamp" + ).population + init_sampler = self.single_nets.single_net_dict( + pop_name=pop_name, mode="v_clamp" + ).init_sampler + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### set v and I_app + if v is not None: + population.v = v + if I_app is not None: + population.I_app = I_app + ### simulate 2000 ms + net.simulate(2000) + ### return the v_clamp_rec value of the single neuron after 2000 ms + return population.v_clamp_rec[0] + + def get_v_2000(self, pop_name, initial_variables, I_app=None, do_plot=True): + """ + Simulate normal single neuron 2000 ms and return v for this duration. + + Args: + pop_name (str): + Name of the population + initial_variables (dict): + Initial variables of the neuron model + I_app (float): + Applied current + do_plot (bool): + If True, plot the membrane potential + + Returns: + v_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ + ### get the network, population, monitor + net = self.single_nets.single_net_dict(pop_name=pop_name, mode="normal").net + population = self.single_nets.single_net_dict( + pop_name=pop_name, mode="normal" + ).population + monitor = self.single_nets.single_net_dict( + pop_name=pop_name, mode="normal" + ).monitor + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + for var_name, var_val in initial_variables.items(): + if var_name in population.variables: + setattr(population, var_name, var_val) + ### set I_app + if I_app is not None: + population.I_app = I_app + ### simulate + net.simulate(2000) + v_arr = monitor.get("v")[:, 0] + + if do_plot: + plt.figure() + plt.title(f"{population.I_app}") + plt.plot(v_arr) + plt.savefig(f"tmp_v_rest_{pop_name}.png") + plt.close("all") + + return v_arr class CreateReducedModel: - def __init__(self): - pass + """ + Class to create a reduced model from the original model. It is accessable via the + attribute model_reduced. + Attributes: + model_reduced (CompNeuroModel): + Reduced model, created but not compiled + """ -class GetVRest: - def __init__(self): - pass + def __init__( + self, + model: CompNeuroModel, + analyze_model: AnalyzeModel, + reduced_size: int, + do_create: bool = False, + do_compile: bool = False, + verbose: bool = False, + ) -> None: + """ + Prepare model for reduction. + + Args: + model (CompNeuroModel): + Model to be reduced + reduced_size (int): + Size of the reduced populations + """ + ### set the attributes + self._model = model + self._analyze_model = analyze_model + self._reduced_size = reduced_size + self._verbose = verbose + ### recreate model with reduced populations and projections + self.model_reduced = CompNeuroModel( + model_creation_function=self.recreate_model, + name=f"{model.name}_reduced", + description=f"{model.description}\nWith reduced populations and projections.", + do_create=do_create, + do_compile=do_compile, + compile_folder_name=f"{model.compile_folder_name}_reduced", + ) + + def recreate_model(self): + """ + Recreates the model with reduced populations and projections. + """ + ### 1st for each population create a reduced population + for pop_name in self._model.populations: + self.create_reduced_pop(pop_name) + ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population + for pop_name in self._model.populations: + self.create_spike_collecting_aux_pop(pop_name) + ## 3rd for each population which has afferents create a population for incoming spikes for each target type + for pop_name in self._model.populations: + self.create_conductance_aux_pop(pop_name, target="ampa") + self.create_conductance_aux_pop(pop_name, target="gaba") + + def create_reduced_pop(self, pop_name: str): + """ + Create a reduced population from the given population. + + Args: + pop_name (str): + Name of the population to be reduced + """ + if self._verbose: + print(f"create_reduced_pop for {pop_name}") + ### 1st check how the population is connected + _, is_postsynaptic, ampa, gaba = self.how_pop_is_connected(pop_name) + + ### 2nd recreate neuron model + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = ( + self._analyze_model.neuron_model_init_parameter_dict[pop_name].copy() + ) + ### if the population is a postsynaptic population adjust the synaptic + ### conductance equations + if is_postsynaptic: + neuron_model_init_parameter_dict = self.adjust_neuron_model( + neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba + ) + ### create the new neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + + ### 3rd recreate the population + ### get the stored parameters of the __init__ function of the Population + pop_init_parameter_dict = self._analyze_model.pop_init_parameter_dict[ + pop_name + ].copy() + ### replace the neuron model with the new neuron model + pop_init_parameter_dict["neuron"] = neuron_model_new + ### replace the size with the reduced size (if reduced size is smaller than the + ### original size) + ### TODO add model requirements somewhere, here requirements = geometry = int + pop_init_parameter_dict["geometry"] = min( + [pop_init_parameter_dict["geometry"][0], self._reduced_size] + ) + ### append _reduce to the name + pop_init_parameter_dict["name"] = f"{pop_name}_reduced" + ### create the new population + pop_new = Population(**pop_init_parameter_dict) + + ### 4th set the parameters and variables of the population's neurons + ### get the stored parameters and variables + neuron_model_attr_dict = self._analyze_model.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_new, attr_name, attr_val) + + def create_spike_collecting_aux_pop(self, pop_name: str): + """ + Create a spike collecting population for the given population. + + Args: + pop_name (str): + Name of the population for which the spike collecting population should be created + """ + ### get all efferent projections + efferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[0] == pop_name + ] + ### check if pop has efferent projections + if len(efferent_projection_list) == 0: + return + if self._verbose: + print(f"create_spike_collecting_aux_pop for {pop_name}") + ### create the spike collecting population + pop_aux = Population( + 1, + neuron=self.SpikeProbCalcNeuron( + reduced_size=min( + [ + self._analyze_model.pop_init_parameter_dict[pop_name][ + "geometry" + ][0], + self._reduced_size, + ] + ) + ), + name=f"{pop_name}_spike_collecting_aux", + ) + ### create the projection from reduced pop to spike collecting aux pop + proj = Projection( + pre=get_population(pop_name + "_reduced"), + post=pop_aux, + target="ampa", + name=f"proj_{pop_name}_spike_collecting_aux", + ) + proj.connect_all_to_all(weights=1) + + def create_conductance_aux_pop(self, pop_name: str, target: str): + """ + Create a conductance calculating population for the given population and target. + + Args: + pop_name (str): + Name of the population for which the conductance calculating population should be created + target (str): + Target type of the conductance calculating population + """ + ### get all afferent projections + afferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[1] == pop_name + ] + ### check if pop has afferent projections + if len(afferent_projection_list) == 0: + return + ### get all afferent projections with target type + afferent_target_projection_list = [ + proj_name + for proj_name in afferent_projection_list + if self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == target + ] + ### check if there are afferent projections with target type + if len(afferent_target_projection_list) == 0: + return + if self._verbose: + print(f"create_conductance_aux_pop for {pop_name} target {target}") + ### get projection informations + ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) + projection_dict = { + proj_name: { + "pre_size": self._analyze_model.pop_init_parameter_dict[ + self._analyze_model.pre_post_pop_name_dict[proj_name][0] + ]["geometry"][0], + "connection_prob": self._analyze_model.connector_function_parameter_dict[ + proj_name + ][ + "probability" + ], + "weights": self._analyze_model.connector_function_parameter_dict[ + proj_name + ]["weights"], + "pre_name": self._analyze_model.pre_post_pop_name_dict[proj_name][0], + } + for proj_name in afferent_target_projection_list + } + ### create the conductance calculating population + pop_aux = Population( + self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0], + neuron=self.InputCalcNeuron(projection_dict=projection_dict), + name=f"{pop_name}_{target}_aux", + ) + ### set number of synapses parameter for each projection + for proj_name, vals in projection_dict.items(): + number_synapses = Binomial( + n=vals["pre_size"], p=vals["connection_prob"] + ).get_values( + self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0] + ) + setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) + ### create the "current injection" projection from conductance calculating + ### population to the reduced post population + proj = CurrentInjection( + pre=pop_aux, + post=get_population(f"{pop_name}_reduced"), + target=f"incomingaux{target}", + name=f"proj_{pop_name}_{target}_aux", + ) + proj.connect_current() + ### create projection from spike_prob calculating aux neurons of presynaptic + ### populations to conductance calculating aux population + for proj_name, vals in projection_dict.items(): + pre_pop_name = vals["pre_name"] + pre_pop_spike_collecting_aux = get_population( + f"{pre_pop_name}_spike_collecting_aux" + ) + proj = Projection( + pre=pre_pop_spike_collecting_aux, + post=pop_aux, + target=f"spikeprob_{pre_pop_name}", + name=f"{proj_name}_spike_collecting_to_conductance", + ) + proj.connect_all_to_all(weights=1) + + def how_pop_is_connected(self, pop_name): + """ + Check how a population is connected. If the population is a postsynaptic and/or + presynaptic population, check if it gets ampa and/or gaba input. + + Args: + pop_name (str): + Name of the population to check + + Returns: + is_presynaptic (bool): + True if the population is a presynaptic population, False otherwise + is_postsynaptic (bool): + True if the population is a postsynaptic population, False otherwise + ampa (bool): + True if the population gets ampa input, False otherwise + gaba (bool): + True if the population gets gaba input, False otherwise + """ + is_presynaptic = False + is_postsynaptic = False + ampa = False + gaba = False + ### loop over all projections + for proj_name in self._model.projections: + ### check if the population is a presynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][0] == pop_name: + is_presynaptic = True + ### check if the population is a postsynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name: + is_postsynaptic = True + ### check if the projection target is ampa or gaba + if ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "ampa" + ): + ampa = True + elif ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "gaba" + ): + gaba = True + + return is_presynaptic, is_postsynaptic, ampa, gaba + + def adjust_neuron_model( + self, neuron_model_init_parameter_dict, ampa=True, gaba=True + ): + """ + Add the new synaptic input coming from the auxillary population to the neuron + model. + + Args: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + ampa (bool): + True if the population gets ampa input and therefore the ampa conductance + needs to be adjusted, False otherwise + gaba (bool): + True if the population gets gaba input and therefore the gaba conductance + needs to be adjusted, False otherwise + + Returns: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + with DBS mechanisms added + """ + ### 1st adjust the conductance equations + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### search for equation with dg_ampa/dt and dg_gaba/dt + for line_idx, line in enumerate(equations_line_split_list): + if ( + self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") + and ampa + ): + ### add " + tau_ampa*g_incomingauxampa/dt" + ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa + equations_line_split_list[line_idx] = ( + "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" + ) + if ( + self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") + and gaba + ): + ### add " + tau_gaba*g_incomingauxgaba/dt" + ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba + equations_line_split_list[line_idx] = ( + "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" + ) + ### join list to a string + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) + + ### 2nd extend description + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." + ) + + return neuron_model_init_parameter_dict + + def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): + """ + Check if a equation string has the form "tau*dvar/dt = -var". + + Args: + line (str): + Equation string + var_name (str): + Name of the variable + tau_name (str): + Name of the time constant + + Returns: + is_solution_correct (bool): + True if the equation is as expected, False otherwise + """ + if var_name not in line: + return False + + # Define the variables + var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") + + # Given equation as a string + equation_str = line + + # Parse the equation string + lhs, rhs = equation_str.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs) + + # Form the equation + equation = sp.Eq(lhs, rhs) + + # Solve the equation for var + try: + solution = sp.solve(equation, var) + except: + ### equation is not solvable with variables means it is not as expected + return False + + # Given solution to compare + expected_solution_str = f"-{tau_name}*d{var_name}/dt" + expected_solution = sp.sympify(expected_solution_str) + + # Check if the solution is as expected + is_solution_correct = solution[0] == expected_solution + + return is_solution_correct + + class SpikeProbCalcNeuron(Neuron): + """ + Neuron model to calculate the spike probabilities of the presynaptic neurons. + """ + + def __init__(self, reduced_size=1): + """ + Args: + reduced_size (int): + Reduced size of the associated presynaptic population + """ + parameters = f""" + reduced_size = {reduced_size} : population + tau= 1.0 : population + """ + equations = """ + tau*dr/dt = g_ampa/reduced_size - r + g_ampa = 0 + """ + super().__init__(parameters=parameters, equations=equations) + + class InputCalcNeuron(Neuron): + """ + This neurons gets the spike probabilities of the pre neurons and calculates the + incoming spikes for each projection. It accumulates the incoming spikes of all + projections (of the same target type) and calculates the conductance increase + for the post neuron. + """ + + def __init__(self, projection_dict): + """ + Args: + projection_dict (dict): + keys: names of afferent projections (of the same target type) + values: dict with keys "weights", "pre_name" + """ + + ### create parameters + parameters = [ + f""" + number_synapses_{proj_name} = 0 + weights_{proj_name} = {vals['weights']} + """ + for proj_name, vals in projection_dict.items() + ] + parameters = "\n".join(parameters) + + ### create equations + equations = [ + f""" + incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} + """ + for proj_name, vals in projection_dict.items() + ] + equations = "\n".join(equations) + sum_of_conductance_increase = ( + "r = " + + "".join( + [ + f"incoming_spikes_{proj_name} * weights_{proj_name} + " + for proj_name in projection_dict.keys() + ] + )[:-3] + ) + equations = equations + "\n" + sum_of_conductance_increase + + super().__init__(parameters=parameters, equations=equations) + + +class PreparePSP: + """ + Find v_rest, corresponding I_hold (in case of self-active neurons) and an + init_sampler to initialize the neuron model for the PSP calculation for each + population. + """ + + def __init__( + self, + simulator: Simulator, + model: CompNeuroModel, + single_nets: CreateSingleNeuronNetworks, + do_not_config_list: list[str], + logger: Logger, + do_plot: bool, + ): + + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + ### find initial v_rest + logger.log( + f"search v_rest with y(X) = delta_v_2000(v=X) using grid search for pop {pop_name}" + ) + v_rest, delta_v_v_rest, variables_v_rest = self.find_v_rest_initial( + pop_name=pop_name, + simulator=simulator, + single_nets=single_nets, + do_plot=do_plot, + ) + logger.log( + f"for {pop_name} found v_rest={v_rest} with delta_v_2000(v=v_rest)={delta_v_v_rest}" + ) + ### check if v is constant after setting v to v_rest + v_rest_is_constant, v_rest_arr = self.get_v_rest_is_const() + + if v_rest_is_constant: + ### v_rest found, no I_app_hold needed + v_rest = v_rest_arr[-1] + I_app_hold = 0 + else: + ### there is no resting_state i.e. neuron is self-active --> find + ### smallest negative I_app to silence neuron + logger.log( + "neuron seems to be self-active --> find smallest I_app to silence the neuron" + ) + v_rest, I_app_hold = self.find_I_app_hold() + logger.log(f"I_app_hold = {I_app_hold}, resulting v_rest = {v_rest}") + + ### get the sampler for the initial variables + variable_init_sampler = self.get_init_neuron_variables_for_psp( + net=self.net_single_dict[pop_name]["net"], + pop=self.net_single_dict[pop_name]["population"], + v_rest=v_rest, + I_app_hold=I_app_hold, + ) + + return { + "v_rest": v_rest, + "I_app_hold": I_app_hold, + "variable_init_sampler": variable_init_sampler, + } + + def find_I_app_hold(self): + # TODO continue + ### negative current initially reduces v + ### then v climbs back up + ### check if the second half of v is constant if yes fine if not increase negative I_app + ### find I_app_hold with incremental_continuous_bound_search + self.log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") + I_app_hold = -self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_v_rest_arr_const( + pop_name=pop_name, + obtained_variables=obtained_variables, + I_app=-X_val, + ), + y_bound=0, + X_0=0, + y_0=self.get_v_rest_arr_const( + pop_name=pop_name, + obtained_variables=obtained_variables, + I_app=0, + ), + X_increase=detla_v_rest, + accept_non_dicontinuity=True, + bound_type="greater", + ) + ### again simulate the neuron with the obtained I_app_hold to get the new v_rest + v_rest_arr = self.get_new_v_rest_2000( + pop_name, obtained_variables, I_app=I_app_hold + ) + v_rest = v_rest_arr[-1] + + def find_v_rest_initial( + self, + pop_name: str, + simulator: Simulator, + single_nets: CreateSingleNeuronNetworks, + do_plot: bool, + ): + """ + Find the initial v_rest with the voltage clamp single neuron network for the + given population. Furthermore, get the change of v durign setting v_rest and the + stady state variables of the neuron. + + Args: + pop_name (str): + Name of the population + simulator (Simulator): + Simulator object + single_nets (CreateSingleNeuronNetworks): + Single neuron networks + do_plot (bool): + True if plots should be created, False otherwise + + Returns: + v_rest (float): + Resting membrane potential + detla_v_v_rest (float): + Change of the membrane potential during setting v_rest as membrane + potential + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential + """ + ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can + ### only be >= 0) + v_arr = np.linspace(-90, -20, 200) + v_clamp_arr = np.array( + [simulator.get_v_clamp_2000(pop_name=pop_name, v=v_val) for v_val in v_arr] + ) + v_clamp_min_idx = argrelmin(v_clamp_arr)[0] + v_rest = np.min(v_arr[v_clamp_min_idx]) + if do_plot: + plt.figure() + plt.plot(v_arr, v_clamp_arr) + plt.axvline(v_rest, color="k") + plt.axhline(0, color="k", ls="dashed") + plt.savefig(f"v_clamp_{pop_name}.png") + plt.close("all") + + ### do again the simulation only with the obtained v_rest to get the detla_v for + ### v_rest + detla_v_v_rest = simulator.get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() + population = single_nets.single_net_dict( + pop_name=pop_name, mode="v_clamp" + ).population + ### and the stady state variables of the neuron + variables_v_rest = { + var_name: getattr(population, var_name) for var_name in population.variables + } + return v_rest, detla_v_v_rest, variables_v_rest + + def get_v_rest_is_const( + self, simulator: Simulator, pop_name: str, variables_v_rest: dict, do_plot=bool + ): + """ + Check if the membrane potential is constant after setting it to v_rest. + + Returns: + v_rest_is_constant (bool): + True if the membrane potential is constant, False otherwise + v_rest_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ + ### check if the neuron stays at v_rest with normal neuron + v_rest_arr = simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=0, + do_plot=do_plot, + ) + v_rest_arr_is_const = ( + np.std(v_rest_arr) <= np.mean(np.absolute(v_rest_arr)) / 1000 + ) + return v_rest_arr_is_const, v_rest_arr class GetMaxSyn: @@ -338,3 +1464,263 @@ def __init__(self): class GetWeightTemplates: def __init__(self): pass + + +class ArrSampler: + """ + Class to store an array and sample from it. + """ + + def __init__(self, arr: np.ndarray, var_name_list: list[str]) -> None: + """ + Args: + arr (np.ndarray) + array with shape (n_samples, n_variables) + var_name_list (list[str]) + list of variable names + """ + self.arr_shape = arr.shape + self.var_name_list = var_name_list + ### check values of any variable are constant + self.is_const = np.std(arr, axis=0) <= np.mean(np.absolute(arr), axis=0) / 1000 + ### for the constant variables only the first value is used + self.constant_arr = arr[0, self.is_const] + ### array without the constant variables + self.not_constant_val_arr = arr[:, np.logical_not(self.is_const)] + + def sample(self, n=1, seed=0): + """ + Sample n samples from the array. + + Args: + n (int) + number of samples to be drawn + seed (int) + seed for the random number generator + + Returns: + ret_arr (np.ndarray) + array with shape (n, n_variables) + """ + ### get n random indices along the n_samples axis + rng = np.random.default_rng(seed=seed) + random_idx_arr = rng.integers(low=0, high=self.arr_shape[0], size=n) + ### sample with random idx + sample_arr = self.not_constant_val_arr[random_idx_arr] + ### create return array + ret_arr = np.zeros((n,) + self.arr_shape[1:]) + ### add samples to return array + ret_arr[:, np.logical_not(self.is_const)] = sample_arr + ### add constant values to return array + ret_arr[:, self.is_const] = self.constant_arr + + return ret_arr + + def set_init_variables(self, population: Population): + """ + Set the initial variables of the given population to the given values. + """ + variable_init_arr = self.sample(len(population), seed=0) + var_name_list = self.var_name_list + for var_name in population.variables: + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) + + +class CreateVoltageClampEquations: + """ + Class to create voltage clamp equations from the given equations of a neuron model. + The equations of the neuron model have to contain the voltage change equation in the + form of ... dv/dt ... = ... + + Attributes: + eq_new (list[str]) + new equations of the neuron model with the voltage clamp + """ + + def __ini__(self, eq: list[str], neuron_model_attributes_name_list: list[str]): + """ + Args: + eq (list[str]) + equations of the neuron model + neuron_model_attributes_name_list (list[str]) + list of the names of the attributes of the neuron model + """ + ### get the dv/dt equation from equations + eq_v, eq_v_idx = self.get_eq_v(eq=eq) + + ### prepare the equation string for solving + ### TODO replace random distributions and mathematical expressions which may be on the left side + eq_v, tags = self.prepare_eq_v(eq_v=eq_v) + + ### solve equation to delta_v (which is dv/dt) + result = self.solve_delta_v(eq_v, neuron_model_attributes_name_list) + + ### insert the new equation lines for v_clamp and remove the old dv/dt line + self.eq_new = self.replace_delta_v( + result=result, eq=eq, eq_v_idx=eq_v_idx, tags=tags + ) + + def replace_delta_v( + self, result: str, eq: list[str], eq_v_idx: int, tags: str = None + ): + """ + Replace the dv/dt line with the voltage clamp lines. + + Args: + result (str) + right side of the dv/dt equation + eq (list[str]) + equations of the neuron model + eq_v_idx (int) + index of the dv/dt line + tags (str) + tags of the dv/dt line + + Returns: + eq (list[str]) + new equations of the neuron model with the voltage clamp + """ + ### create the line for recording voltage clamp (right side of dv/dt) + eq_new_0 = f"v_clamp_rec_sign = {result}" + ### create the line for the absolute voltage clamp + eq_new_1 = f"v_clamp_rec = fabs({result})" + ### create the line for the absolute voltage clamp from the previous time step + eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" + ### create the voltage clamp line "dv/dt=0" with tags if they exist + if not isinstance(tags, type(None)): + eq_new_3 = f"dv/dt=0 : {tags}" + else: + eq_new_3 = "dv/dt=0" + ### remove old v line and insert new three lines, order is important + del eq[eq_v_idx] + eq.insert(eq_v_idx, eq_new_0) + eq.insert(eq_v_idx, eq_new_1) + eq.insert(eq_v_idx, eq_new_2) + eq.insert(eq_v_idx, eq_new_3) + ### return new neuron equations + return eq + + def get_line_is_v(self, line: str): + """ + Check if the line contains the definition of dv/dt. + + Args: + line (str) + line of the equations of the neuron model + + Returns: + line_is_v (bool) + True if the line contains the definition of dv/dt, False otherwise + """ + if "v" not in line: + return False + + ### remove whitespaces + line = line.replace(" ", "") + + ### check for dv/dt + if "dv/dt" in line: + return True + + return False + + def get_eq_v(self, eq: list[str]): + """ + Get the dv/dt equation from the equations of the neuron model. + + Args: + eq (list[str]) + equations of the neuron model + + Returns: + eq_v (str) + dv/dt equation + eq_v_idx (int) + index of the dv/dt line + """ + ### get the dv/dt equation from equations + ### find the line with dv/dt= or v+= or v= + line_is_v_list = [False] * len(eq) + ### check in which lines v is defined + for line_idx, line in enumerate(eq): + line_is_v_list[line_idx] = self.get_line_is_v(line) + ### raise error if no v or multiple times v + if True not in line_is_v_list or sum(line_is_v_list) > 1: + raise ValueError( + "In the equations of the neurons has to be exactly a single line which defines dv/dt!" + ) + ### get the index of the line with dv/dt + eq_v_idx = line_is_v_list.index(True) + ### set the v equation + eq_v = eq.copy()[eq_v_idx] + return eq_v, eq_v_idx + + def prepare_eq_v(self, eq_v: str): + """ + Prepare the equation string for solving with sympy. + + Args: + eq_v (str) + dv/dt equation + + Returns: + eq_v (str) + dv/dt equation + tags (str) + tags of the dv/dt equation + """ + ### remove whitespaces + eq_v = eq_v.replace(" ", "") + ### replace dv/dt by delta_v + eq_v = eq_v.replace("dv/dt", "delta_v") + ### separate equation and tags + eq_tags_list = eq_v.split(":") + eq_v = eq_tags_list[0] + if len(eq_tags_list) > 1: + tags = eq_tags_list[1] + else: + tags = None + return eq_v, tags + + def solve_delta_v(self, eq_v: str, neuron_model_attributes_name_list: list[str]): + """ + Solve the dv/dt equation for delta_v (which is dv/dt). + + Args: + eq_v (str) + dv/dt equation + neuron_model_attributes_name_list (list[str]) + list of the names of the attributes of the neuron model + + Returns: + solution_str (str) + right side of the dv/dt equation + """ + ### Define the attributes of the neuron model as sympy symbols + sp.symbols(",".join(neuron_model_attributes_name_list)) + ### Define delta_v and right_side as sympy symbols + delta_v, _ = sp.symbols("delta_v right_side") + + ### Parse the equation string + lhs, rhs_string = eq_v.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs_string) + + ### Form the equation + equation = sp.Eq(lhs, rhs) + + ### Solve the equation for delta_v + try: + solution = sp.solve(equation, delta_v)[0] + except: + raise ValueError("Could not find solution for dv/dt!") + + ### Get the solution as a string + solution_str = str(solution) + + ### replace right_side by the actual right side string + solution_str = solution_str.replace("right_side", f"({rhs_string})") + + return solution_str From 8e6dbd08337a465edd725415657ff707054aabf7 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 6 Jun 2024 17:31:31 +0200 Subject: [PATCH 31/39] model_configurator: continued with new structur system_functions: new class Logger extra_functions: new function find_x_bound --- .gitignore | 1 + src/CompNeuroPy/__init__.py | 2 + .../model_configurator_cnp.py | 615 +++++++++++------- .../model_configurator_user.py | 1 + src/CompNeuroPy/extra_functions.py | 121 ++++ src/CompNeuroPy/system_functions.py | 49 ++ 6 files changed, 569 insertions(+), 220 deletions(-) diff --git a/.gitignore b/.gitignore index 36290a3..69bc5cd 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ dist/ !site/* *.pkl *json +*.log diff --git a/src/CompNeuroPy/__init__.py b/src/CompNeuroPy/__init__.py index 22a7638..fa8a1bd 100644 --- a/src/CompNeuroPy/__init__.py +++ b/src/CompNeuroPy/__init__.py @@ -38,6 +38,7 @@ node_cl, # TODO remove efel_loss, RNG, + find_x_bound, ) from CompNeuroPy.model_functions import ( compile_in_folder, @@ -64,6 +65,7 @@ timing_decorator, run_script_parallel, create_data_raw_folder, + Logger, ) from CompNeuroPy.simulation_requirements import req_pop_attr, ReqPopHasAttr from CompNeuroPy.statistic_functions import anova_between_groups diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 38842c2..03ae5c7 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -1,6 +1,7 @@ from CompNeuroPy.generate_model import CompNeuroModel from CompNeuroPy import model_functions as mf from CompNeuroPy import extra_functions as ef +from CompNeuroPy import system_functions as sf from ANNarchy import ( Population, @@ -49,78 +50,66 @@ import sympy as sp -class Logger: - def __init__(self, log_file: str): - self.log_file = log_file - self.caller_name = "" - with open(log_file, "w") as f: - print("Logger file:", file=f) +class ArrSampler: + """ + Class to store an array and sample from it. + """ - def log(self, txt): - caller_frame = inspect.currentframe().f_back - caller_name = caller_frame.f_code.co_name + def __init__(self, arr: np.ndarray, var_name_list: list[str]) -> None: + """ + Args: + arr (np.ndarray) + array with shape (n_samples, n_variables) + var_name_list (list[str]) + list of variable names + """ + self.arr_shape = arr.shape + self.var_name_list = var_name_list + ### check values of any variable are constant + self.is_const = np.std(arr, axis=0) <= np.mean(np.absolute(arr), axis=0) / 1000 + ### for the constant variables only the first value is used + self.constant_arr = arr[0, self.is_const] + ### array without the constant variables + self.not_constant_val_arr = arr[:, np.logical_not(self.is_const)] - if caller_name == self.caller_name: - txt = f"{textwrap.indent(str(txt), ' ')}" - else: - txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" + def sample(self, n=1, seed=0): + """ + Sample n samples from the array. - self.caller_name = caller_name + Args: + n (int) + number of samples to be drawn + seed (int) + seed for the random number generator - with open(self.log_file, "a") as f: - print(txt, file=f) + Returns: + ret_arr (np.ndarray) + array with shape (n, n_variables) + """ + ### get n random indices along the n_samples axis + rng = np.random.default_rng(seed=seed) + random_idx_arr = rng.integers(low=0, high=self.arr_shape[0], size=n) + ### sample with random idx + sample_arr = self.not_constant_val_arr[random_idx_arr] + ### create return array + ret_arr = np.zeros((n,) + self.arr_shape[1:]) + ### add samples to return array + ret_arr[:, np.logical_not(self.is_const)] = sample_arr + ### add constant values to return array + ret_arr[:, self.is_const] = self.constant_arr + return ret_arr -class ModelConfigurator: - def __init__( - self, - model: CompNeuroModel, - target_firing_rate_dict: dict, - max_psp: float = 10.0, - do_not_config_list: list[str] = [], - print_guide: bool = False, - I_app_variable: str = "I_app", - cache: bool = False, - clear_cache: bool = False, - log_file: str | None = None, - ): - ### create logger - self._logger = Logger(log_file=log_file) - ### analyze the given model, create model before analyzing, then clear ANNarchy - self._analyze_model = AnalyzeModel(model=model) - ### create the CompNeuroModel object for the reduced model (the model itself is - ### not created yet) - self._model_reduced = CreateReducedModel( - model=model, - analyze_model=self._analyze_model, - reduced_size=100, - do_create=False, - do_compile=False, - verbose=True, - ) - ### create the single neuron networks (networks are compiled and ready to be - ### simulated), normal model for searching for max conductances, max input - ### current, resting firing rate; voltage clamp model for preparing the PSP - ### simulationssearching, i.e., for resting potential and corresponding input - ### current I_hold (for self-active neurons) - self._single_nets = CreateSingleNeuronNetworks( - model=model, - analyze_model=self._analyze_model, - do_not_config_list=do_not_config_list, - ) - ### define the simulator with all simulations with the single neuron networks - self._simulator = Simulator(single_nets=self._single_nets) - ### get the resting potential and corresponding I_hold for each population using - ### the voltage clamp networks - self._v_rest = PreparePSP( - simulator=self._simulator, - model=model, - single_nets=self._single_nets, - do_not_config_list=do_not_config_list, - logger=self._logger, - ) - self._max_syn = GetMaxSyn() - self._weight_templates = GetWeightTemplates() + def set_init_variables(self, population: Population): + """ + Set the initial variables of the given population to the given values. + """ + variable_init_arr = self.sample(len(population), seed=0) + var_name_list = self.var_name_list + for var_name in population.variables: + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) class AnalyzeModel: @@ -158,7 +147,8 @@ def __init__(self, model: CompNeuroModel): def _clear_model(self, model: CompNeuroModel, do_create: bool = True): mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) - model.create(do_create=do_create, do_compile=False) + if do_create: + model.create(do_compile=False) def _analyze_populations(self, model: CompNeuroModel): """ @@ -415,26 +405,29 @@ def __init__( ### create the single neuron networks for normal and voltage clamp mode for mode in ["normal", "v_clamp"]: self._single_net_dict[mode] = {} - self.create_single_neuron_networks( + self._create_single_neuron_networks( model=model, analyze_model=analyze_model, do_not_config_list=do_not_config_list, mode=mode, ) - def single_net_dict(self, pop_name: str, mode: str): + def single_net(self, pop_name: str, mode: str): """ - Get the single neuron network dict for the given population and mode. + Return the information of the single neuron network for the given population and + mode. Args: pop_name (str): Name of the population mode (str): - Mode for which the network should be returned + Mode for which the single neuron network should be returned (normal or + v_clamp) Returns: - net_single_dict (dict): - Dict containing the Network, Population, Monitor and ArrSampler objects + ReturnSingleNeuronNetworks: + Information of the single neuron network with Attributes: net, + population, monitor, init_sampler """ return self.ReturnSingleNeuronNetworks(self._single_net_dict[mode][pop_name]) @@ -445,7 +438,7 @@ def __init__(self, single_net_dict): self.monitor: Monitor = single_net_dict["monitor"] self.init_sampler: ArrSampler = single_net_dict["init_sampler"] - def create_single_neuron_networks( + def _create_single_neuron_networks( self, model: CompNeuroModel, analyze_model: AnalyzeModel, @@ -472,11 +465,11 @@ def create_single_neuron_networks( if pop_name in do_not_config_list: continue ### store the dict containing the network etc - self._single_net_dict[mode][pop_name] = self.create_net_single( + self._single_net_dict[mode][pop_name] = self._create_net_single( pop_name=pop_name, analyze_model=analyze_model, mode=mode ) - def create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: str): + def _create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: str): """ Creates a network with the neuron type of the population given by pop_name for the given mode. The population size is set to 1. @@ -494,12 +487,12 @@ def create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: st Dict containing the Network, Population, Monitor and ArrSampler objects """ ### create the adjusted neuron model for the stop condition - neuron_model_new = self.get_single_neuron_neuron_model( + neuron_model_new = self._get_single_neuron_neuron_model( pop_name=pop_name, analyze_model=analyze_model, mode=mode ) ### create the single neuron population - pop_single_neuron = self.get_single_neuron_population( + pop_single_neuron = self._get_single_neuron_population( pop_name=pop_name, neuron_model_new=neuron_model_new, analyze_model=analyze_model, @@ -533,14 +526,14 @@ def create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: st ### for normal neuron get the init sampler for the variables of the neuron model ### (to initialize a population of the neuron model) - init_sampler = self.get_neuron_model_init_sampler( + init_sampler = self._get_neuron_model_init_sampler( net=net_single, pop=net_single.get(pop_single_neuron) ) net_single_dict["init_sampler"] = init_sampler return net_single_dict - def get_single_neuron_neuron_model( + def _get_single_neuron_neuron_model( self, pop_name: str, analyze_model: AnalyzeModel, mode=str ): """ @@ -568,7 +561,7 @@ def get_single_neuron_neuron_model( ) ### add v_before_psp and v_psp_thresh to equations/parameters, for the stop ### condition below - self.adjust_neuron_model( + self._adjust_neuron_model( neuron_model_init_parameter_dict, neuron_model_attributes_name_list, mode=mode, @@ -577,7 +570,7 @@ def get_single_neuron_neuron_model( neuron_model_new = Neuron(**neuron_model_init_parameter_dict) return neuron_model_new - def get_single_neuron_population( + def _get_single_neuron_population( self, pop_name: str, neuron_model_new: Neuron, @@ -623,7 +616,7 @@ def get_single_neuron_population( setattr(pop_single_neuron, attr_name, attr_val) return pop_single_neuron - def get_neuron_model_init_sampler(self, net: Network, pop: Population): + def _get_neuron_model_init_sampler(self, net: Network, pop: Population): """ Create a sampler for the initial values of the variables of the neuron model by simulating the neuron for 10000 ms and afterwards simulating 2000 ms and @@ -659,11 +652,11 @@ def get_neuron_model_init_sampler(self, net: Network, pop: Population): ### reset network after simulation net.reset() - ### create a sampler with the data samples of from the 1000 ms simulation + ### create a sampler with the data samples from the21000 ms simulation sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) return sampler - def adjust_neuron_model( + def _adjust_neuron_model( self, neuron_model_init_parameter_dict: dict, neuron_model_attributes_name_list: list[str], @@ -713,8 +706,12 @@ def adjust_neuron_model( class Simulator: - def __init__(self, single_nets: CreateSingleNeuronNetworks): - self.single_nets = single_nets + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = super(Simulator, cls).__new__(cls) + return cls._instance def get_v_clamp_2000( self, @@ -741,11 +738,11 @@ def get_v_clamp_2000( v_clamp_rec value of the single neuron after 2000 ms """ ### get the network, population, init_sampler - net = self.single_nets.single_net_dict(pop_name=pop_name, mode="v_clamp").net - population = self.single_nets.single_net_dict( + net = single_nets.single_net(pop_name=pop_name, mode="v_clamp").net + population = single_nets.single_net( pop_name=pop_name, mode="v_clamp" ).population - init_sampler = self.single_nets.single_net_dict( + init_sampler = single_nets.single_net( pop_name=pop_name, mode="v_clamp" ).init_sampler ### reset network @@ -764,7 +761,9 @@ def get_v_clamp_2000( ### return the v_clamp_rec value of the single neuron after 2000 ms return population.v_clamp_rec[0] - def get_v_2000(self, pop_name, initial_variables, I_app=None, do_plot=True): + def get_v_2000( + self, pop_name, initial_variables, I_app=None, do_plot=True + ) -> np.ndarray: """ Simulate normal single neuron 2000 ms and return v for this duration. @@ -783,13 +782,9 @@ def get_v_2000(self, pop_name, initial_variables, I_app=None, do_plot=True): Membrane potential for the 2000 ms simulation with shape: (time_steps,) """ ### get the network, population, monitor - net = self.single_nets.single_net_dict(pop_name=pop_name, mode="normal").net - population = self.single_nets.single_net_dict( - pop_name=pop_name, mode="normal" - ).population - monitor = self.single_nets.single_net_dict( - pop_name=pop_name, mode="normal" - ).monitor + net = single_nets.single_net(pop_name=pop_name, mode="normal").net + population = single_nets.single_net(pop_name=pop_name, mode="normal").population + monitor = single_nets.single_net(pop_name=pop_name, mode="normal").monitor ### reset network net.reset() net.set_seed(0) @@ -813,6 +808,106 @@ def get_v_2000(self, pop_name, initial_variables, I_app=None, do_plot=True): return v_arr + def get_v_psp(self, v_rest: float, I_app_hold: float, pop_name: str) -> np.ndarray: + """ + Simulate the single neuron network of the given pop_name for 5000 ms and return + the variables of the neuron model after 5000 ms. + + Args: + v_rest (float): + Resting potential + I_app_hold (float): + Applied current to hold the resting potential + pop_name (str): + Name of the population + + Returns: + var_arr (np.array): + Variables of the neuron model after 5000 ms with shape: (1, n_vars) + """ + + ### get the network, population, monitor + net = single_nets.single_net(pop_name=pop_name, mode="normal").net + population = single_nets.single_net(pop_name=pop_name, mode="normal").population + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + population.v = v_rest + population.I_app = I_app_hold + ### simulate + net.simulate(5000) + ### get the variables of the neuron after 5000 ms in the shape (1, n_vars) + var_name_list = list(population.variables) + var_arr = np.zeros((1, len(var_name_list))) + get_arr = np.array( + [getattr(population, var_name) for var_name in population.variables] + ) + var_arr[0, :] = get_arr[:, 0] + return var_arr + + +class ModelConfigurator: + def __init__( + self, + model: CompNeuroModel, + target_firing_rate_dict: dict, + max_psp: float = 10.0, + do_not_config_list: list[str] = [], + print_guide: bool = False, + I_app_variable: str = "I_app", + cache: bool = False, + clear_cache: bool = False, + log_file: str | None = None, + ): + ### initialize logger + sf.Logger(log_file=log_file) + ### analyze the given model, create model before analyzing, then clear ANNarchy + self._analyze_model = AnalyzeModel(model=model) + ### create the CompNeuroModel object for the reduced model (the model itself is + ### not created yet) + self._model_reduced = CreateReducedModel( + model=model, + analyze_model=self._analyze_model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, + ) + ### create the single neuron networks (networks are compiled and ready to be + ### simulated), normal model for searching for max conductances, max input + ### current, resting firing rate; voltage clamp model for preparing the PSP + ### simulationssearching, i.e., for resting potential and corresponding input + ### current I_hold (for self-active neurons) + global single_nets + single_nets = CreateSingleNeuronNetworks( + model=model, + analyze_model=self._analyze_model, + do_not_config_list=do_not_config_list, + ) + ### define the simulator with all simulations for the single neuron networks + Simulator() + ### get the resting potential and corresponding I_hold for each population using + ### the voltage clamp networks + global prepare_psp + prepare_psp = PreparePSP( + model=model, + do_not_config_list=do_not_config_list, + do_plot=False, + ) + ### tmp test + for pop_name in model.populations: + if pop_name in do_not_config_list: + continue + print(f"pop_name: {pop_name}") + print(f"I_app_hold: {prepare_psp.get(pop_name=pop_name).I_app_hold}") + print(f"v_rest: {prepare_psp.get(pop_name=pop_name).v_rest}") + + quit() # TODO seems to work until here, continue here + + self._max_syn = GetMaxSyn() + self._weight_templates = GetWeightTemplates() + class CreateReducedModel: """ @@ -1286,110 +1381,205 @@ class PreparePSP: def __init__( self, - simulator: Simulator, model: CompNeuroModel, - single_nets: CreateSingleNeuronNetworks, do_not_config_list: list[str], - logger: Logger, do_plot: bool, ): - + """ + Args: + model (CompNeuroModel): + Model to be prepared + do_not_config_list (list[str]): + List of populations which should not be configured + do_plot (bool): + If True, plot the membrane potential + """ + self._prepare_psp_dict = {} + ### loop over all populations for pop_name in model.populations: ### skip populations which should not be configured if pop_name in do_not_config_list: continue - ### find initial v_rest - logger.log( + self._prepare_psp_dict[pop_name] = {} + ### find initial v_rest using the voltage clamp network + sf.Logger().log( f"search v_rest with y(X) = delta_v_2000(v=X) using grid search for pop {pop_name}" ) - v_rest, delta_v_v_rest, variables_v_rest = self.find_v_rest_initial( + v_rest, delta_v_v_rest, variables_v_rest = self._find_v_rest_initial( pop_name=pop_name, - simulator=simulator, - single_nets=single_nets, do_plot=do_plot, ) - logger.log( + sf.Logger().log( f"for {pop_name} found v_rest={v_rest} with delta_v_2000(v=v_rest)={delta_v_v_rest}" ) - ### check if v is constant after setting v to v_rest - v_rest_is_constant, v_rest_arr = self.get_v_rest_is_const() + ### check if v is constant after setting v to v_rest by simulating the normal + ### single neuron network for 2000 ms + v_rest_is_constant, v_rest_arr = self._get_v_rest_is_const( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + do_plot=do_plot, + ) if v_rest_is_constant: - ### v_rest found, no I_app_hold needed + ### v_rest found (last v value of the previous simulation), no + ### I_app_hold needed v_rest = v_rest_arr[-1] I_app_hold = 0 else: ### there is no resting_state i.e. neuron is self-active --> find ### smallest negative I_app to silence neuron - logger.log( - "neuron seems to be self-active --> find smallest I_app to silence the neuron" + sf.Logger().log( + f"neuron of {pop_name} seems to be self-active --> find smallest I_app to silence the neuron" + ) + v_rest, I_app_hold = self._find_I_app_hold( + pop_name=pop_name, + variables_v_rest=variables_v_rest, ) - v_rest, I_app_hold = self.find_I_app_hold() - logger.log(f"I_app_hold = {I_app_hold}, resulting v_rest = {v_rest}") + sf.Logger().log( + f"final values for {pop_name}: I_app_hold = {I_app_hold}, v_rest = {v_rest}" + ) ### get the sampler for the initial variables - variable_init_sampler = self.get_init_neuron_variables_for_psp( - net=self.net_single_dict[pop_name]["net"], - pop=self.net_single_dict[pop_name]["population"], + psp_init_sampler = self._get_init_neuron_variables_for_psp( + pop_name=pop_name, v_rest=v_rest, I_app_hold=I_app_hold, ) + ### store the prepare PSP information + self._prepare_psp_dict[pop_name]["v_rest"] = v_rest + self._prepare_psp_dict[pop_name]["I_app_hold"] = I_app_hold + self._prepare_psp_dict[pop_name]["psp_init_sampler"] = psp_init_sampler - return { - "v_rest": v_rest, - "I_app_hold": I_app_hold, - "variable_init_sampler": variable_init_sampler, - } + def get(self, pop_name: str): + """ + Return the prepare PSP information for the given population. + + Args: + pop_name (str): + Name of the population + + Returns: + ReturnPreparePSP: + Prepare PSP information for the given population with Attributes: v_rest, + I_app_hold, psp_init_sampler + """ + return self.ReturnPreparePSP( + v_rest=self._prepare_psp_dict[pop_name]["v_rest"], + I_app_hold=self._prepare_psp_dict[pop_name]["I_app_hold"], + psp_init_sampler=self._prepare_psp_dict[pop_name]["psp_init_sampler"], + ) + + class ReturnPreparePSP: + def __init__( + self, v_rest: float, I_app_hold: float, psp_init_sampler: ArrSampler + ): + self.v_rest = v_rest + self.I_app_hold = I_app_hold + self.psp_init_sampler = psp_init_sampler + + def _get_init_neuron_variables_for_psp( + self, pop_name: str, v_rest: float, I_app_hold: float + ): + """ + Get the initial variables of the neuron model for the PSP calculation. + + Args: + pop_name (str): + Name of the population + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron - def find_I_app_hold(self): - # TODO continue - ### negative current initially reduces v - ### then v climbs back up - ### check if the second half of v is constant if yes fine if not increase negative I_app + Returns: + sampler (ArrSampler): + Sampler with the initial variables of the neuron model + """ + ### get the names of the variables of the neuron model + var_name_list = single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population.variables + ### get the variables of the neuron model after 5000 ms + var_arr = Simulator().get_v_psp( + v_rest=v_rest, I_app_hold=I_app_hold, pop_name=pop_name + ) + ### create a sampler with this single data sample + sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) + return sampler + + def _find_I_app_hold( + self, + pop_name: str, + variables_v_rest: dict, + ): + """ + Find the current which silences the neuron. + + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential + + Returns: + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron + """ ### find I_app_hold with incremental_continuous_bound_search - self.log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") - I_app_hold = -self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_v_rest_arr_const( + sf.Logger().log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") + + I_app_hold = -ef.find_x_bound( + ### negative current initially reduces v then v climbs back up --> + ### get_v_change_after_v_rest checks how much v changes during second half of + ### 2000 ms simulation + y_=lambda X_val: -self._get_v_change_after_v_rest( pop_name=pop_name, - obtained_variables=obtained_variables, + variables_v_rest=variables_v_rest, + ### incremental_continuous_bound_search only uses positive values for X and + ### increases them, expecting to increase y, therefore use -X for I_app + ### (increasing X will "increase" negative current) and negative sign for + ### the returned value (for no current input the change is positive, this + ### should decrease to zero, with negative sign: for no current input the + ### change is negative, this should increase above zero) I_app=-X_val, ), + ### y is initially negative and should increase above 0, therefore search for + ### y_bound=0 with bound_type="greater" + x0=0, y_bound=0, - X_0=0, - y_0=self.get_v_rest_arr_const( - pop_name=pop_name, - obtained_variables=obtained_variables, - I_app=0, - ), - X_increase=detla_v_rest, - accept_non_dicontinuity=True, + tolerance=0.01, bound_type="greater", ) + # y_bound = 0 + # y: -10, -2, +0.1 + # x: 0, 5, 7 + # bound type greater should find value which is slightly larger than 0 TODO test this ### again simulate the neuron with the obtained I_app_hold to get the new v_rest - v_rest_arr = self.get_new_v_rest_2000( - pop_name, obtained_variables, I_app=I_app_hold + v_rest_arr = Simulator().get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app_hold, + do_plot=False, ) v_rest = v_rest_arr[-1] + return v_rest, I_app_hold - def find_v_rest_initial( + def _find_v_rest_initial( self, pop_name: str, - simulator: Simulator, - single_nets: CreateSingleNeuronNetworks, do_plot: bool, ): """ Find the initial v_rest with the voltage clamp single neuron network for the given population. Furthermore, get the change of v durign setting v_rest and the - stady state variables of the neuron. + stady state variables of the neuron (at the end of the simulation). Args: pop_name (str): Name of the population - simulator (Simulator): - Simulator object - single_nets (CreateSingleNeuronNetworks): - Single neuron networks do_plot (bool): True if plots should be created, False otherwise @@ -1407,7 +1597,10 @@ def find_v_rest_initial( ### only be >= 0) v_arr = np.linspace(-90, -20, 200) v_clamp_arr = np.array( - [simulator.get_v_clamp_2000(pop_name=pop_name, v=v_val) for v_val in v_arr] + [ + Simulator().get_v_clamp_2000(pop_name=pop_name, v=v_val) + for v_val in v_arr + ] ) v_clamp_min_idx = argrelmin(v_clamp_arr)[0] v_rest = np.min(v_arr[v_clamp_min_idx]) @@ -1421,8 +1614,10 @@ def find_v_rest_initial( ### do again the simulation only with the obtained v_rest to get the detla_v for ### v_rest - detla_v_v_rest = simulator.get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() - population = single_nets.single_net_dict( + detla_v_v_rest = ( + Simulator().get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() + ) + population = single_nets.single_net( pop_name=pop_name, mode="v_clamp" ).population ### and the stady state variables of the neuron @@ -1431,12 +1626,19 @@ def find_v_rest_initial( } return v_rest, detla_v_v_rest, variables_v_rest - def get_v_rest_is_const( - self, simulator: Simulator, pop_name: str, variables_v_rest: dict, do_plot=bool - ): + def _get_v_rest_is_const(self, pop_name: str, variables_v_rest: dict, do_plot=bool): """ Check if the membrane potential is constant after setting it to v_rest. + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation + do_plot (bool): + True if plots should be created, False otherwise + Returns: v_rest_is_constant (bool): True if the membrane potential is constant, False otherwise @@ -1444,7 +1646,7 @@ def get_v_rest_is_const( Membrane potential for the 2000 ms simulation with shape: (time_steps,) """ ### check if the neuron stays at v_rest with normal neuron - v_rest_arr = simulator.get_v_2000( + v_rest_arr = Simulator().get_v_2000( pop_name=pop_name, initial_variables=variables_v_rest, I_app=0, @@ -1455,6 +1657,41 @@ def get_v_rest_is_const( ) return v_rest_arr_is_const, v_rest_arr + def _get_v_change_after_v_rest( + self, pop_name: str, variables_v_rest: dict, I_app: float + ): + """ + Check how much the membrane potential changes after setting it to v_rest. + + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation + do_plot (bool): + True if plots should be created, False otherwise + + Returns: + change_after_v_rest (np.array): + Change of the membrane potential after setting it to v_rest + """ + ### simulate 2000 ms after setting v_rest + v_rest_arr = Simulator().get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app, + do_plot=False, + ) + ### check how much v changes during the second half + ### std(v) - mean(v)/1000 should be close to 0, the larger the value the more v + ### changes + change_after_v_rest = ( + np.std(v_rest_arr[len(v_rest_arr) // 2 :], axis=0) + - np.mean(np.absolute(v_rest_arr[len(v_rest_arr) // 2 :]), axis=0) / 1000 + ) + return change_after_v_rest + class GetMaxSyn: def __init__(self): @@ -1466,68 +1703,6 @@ def __init__(self): pass -class ArrSampler: - """ - Class to store an array and sample from it. - """ - - def __init__(self, arr: np.ndarray, var_name_list: list[str]) -> None: - """ - Args: - arr (np.ndarray) - array with shape (n_samples, n_variables) - var_name_list (list[str]) - list of variable names - """ - self.arr_shape = arr.shape - self.var_name_list = var_name_list - ### check values of any variable are constant - self.is_const = np.std(arr, axis=0) <= np.mean(np.absolute(arr), axis=0) / 1000 - ### for the constant variables only the first value is used - self.constant_arr = arr[0, self.is_const] - ### array without the constant variables - self.not_constant_val_arr = arr[:, np.logical_not(self.is_const)] - - def sample(self, n=1, seed=0): - """ - Sample n samples from the array. - - Args: - n (int) - number of samples to be drawn - seed (int) - seed for the random number generator - - Returns: - ret_arr (np.ndarray) - array with shape (n, n_variables) - """ - ### get n random indices along the n_samples axis - rng = np.random.default_rng(seed=seed) - random_idx_arr = rng.integers(low=0, high=self.arr_shape[0], size=n) - ### sample with random idx - sample_arr = self.not_constant_val_arr[random_idx_arr] - ### create return array - ret_arr = np.zeros((n,) + self.arr_shape[1:]) - ### add samples to return array - ret_arr[:, np.logical_not(self.is_const)] = sample_arr - ### add constant values to return array - ret_arr[:, self.is_const] = self.constant_arr - - return ret_arr - - def set_init_variables(self, population: Population): - """ - Set the initial variables of the given population to the given values. - """ - variable_init_arr = self.sample(len(population), seed=0) - var_name_list = self.var_name_list - for var_name in population.variables: - if var_name in var_name_list: - set_val = variable_init_arr[:, var_name_list.index(var_name)] - setattr(population, var_name, set_val) - - class CreateVoltageClampEquations: """ Class to create voltage clamp equations from the given equations of a neuron model. @@ -1539,7 +1714,7 @@ class CreateVoltageClampEquations: new equations of the neuron model with the voltage clamp """ - def __ini__(self, eq: list[str], neuron_model_attributes_name_list: list[str]): + def __init__(self, eq: list[str], neuron_model_attributes_name_list: list[str]): """ Args: eq (list[str]) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 299f2c2..9c2aef9 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -366,6 +366,7 @@ def BGM_part_function(params): do_not_config_list=do_not_config_list, print_guide=True, I_app_variable="I_app", + log_file="model_configurator.log", ) ### obtain the maximum synaptic loads for the populations and the diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index f7841ae..ddfded0 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -23,6 +23,7 @@ from ANNarchy import Neuron, Population, simulate, setup, get_population from sympy import symbols, Symbol, solve, sympify, Eq, lambdify, factor from scipy.interpolate import griddata +from scipy.optimize import brentq, minimize_scalar import re from typingchecker import check_types import warnings @@ -2626,3 +2627,123 @@ def reset(self): self.rng.bit_generator.state = np.random.default_rng( seed=self._original_seed ).bit_generator.state + + +def find_x_bound( + y_: Callable[[float], float], + x0: float, + y_bound: float, + tolerance: float = 1e-5, + bound_type: str = "equal", +) -> float: + """ + Find the x value such that y(x) is closest to y_bound within a given tolerance. The + value y_bound should be reachable by y(x) by increasing x from the initial value x0. + + Args: + y (Callable[[float], float]): + A function that takes a single float argument and returns a single float + value. + x0 (float): + The initial value of x to start the search. + y_bound (float): + The target value of y. + tolerance (float, optional): + The tolerance for the difference between y(x) and y_bound. Defaults to 1e-5. + bound_type (str, optional): + The type of bound to find. Can be 'equal'(y(x) should be close to y_bound), + 'greater'(y(x) should be close to y_bound and greater), or 'less'(y(x) should + be close to y_bound and less). Defaults to 'equal'. + + Returns: + x_bound (float): + The x value such that y(x) is closest to y_bound within the tolerance. + """ + + def y(x): + x_y = y_(x) + sf.Logger().log(f"x: {x}; y: {x_y}") + return x_y + + # Catch invalid bound type + if bound_type not in ["equal", "greater", "less"]: + raise ValueError("bound_type should be 'equal', 'greater', or 'less'.") + + # Check if the initial value y(x0) is already y_bound + y0 = y(x0) + if np.isclose(y0, y_bound, atol=tolerance): + print("Warning: The initial value is already equal to y_bound.") + return x0, x0 + + sf.Logger().log(f"x0: {x0}, y0: {y0}, y_bound: {y_bound}") + + # Define a helper function to find x such that y(x) - y_bound = 0 + def func(x): + return y(x) - y_bound + + # Exponential search to find an interval [a, b] where y(a) < y_bound < y(b) + a = x0 + b = x0 + 1 + while y(b) < y_bound: + a = b + b *= 2 + if b > 1e6: # Avoid infinite loop in case y_bound is not reachable + break + if b > 1e6: + raise ValueError( + "y_bound cannot be reached, the function saturates below y_bound." + ) + sf.Logger().log(f"a: {a}, b: {b}") + + # Check if the maximum value is less than y_bound + res = minimize_scalar(lambda x: -y(x), bounds=(x0, b), method="bounded") + y_max = -res.fun + if y_max < y_bound: + raise ValueError( + "y_bound cannot be reached, the function saturates below y_bound." + ) + sf.Logger().log(f"y_max: {y_max}") + + # Use brentq to find the root within the interval [a, b] + x_root: float = brentq(func, a, b, xtol=tolerance, full_output=False) + sf.Logger().log(f"x_root: {x_root}") + + if bound_type == "equal": + # Return the x value such that y(x) = y_bound + return x_root + + # Calculate the gradient at x_root + dx = np.abs(x_root - x0) * 1e-3 + grad_y = (y(x_root + dx) - y(x_root - dx)) / (2 * dx) + + # Define epsilon based on the gradient + epsilon = tolerance / np.abs(grad_y) if grad_y != 0 else tolerance + + if bound_type == "greater": + # Find the x value such that y(x) > y_bound (thus maybe increase x) + # do this by incrementaly increasing x by epsilon until y(x) is greater than + # y_bound + # if y(x+epsilon)-y(x) is less than the tolerance, increase epsilon + x = x_root + y_val = y(x) + while y_val < y_bound: + y_val_prev = y_val + x += epsilon + y_val = y(x) + if y_val - y_val_prev < tolerance / 10: + epsilon *= 2 + return x + elif bound_type == "less": + # Find the x value such that y(x) < y_bound (thus maybe decrease x) + # do this by incrementaly decreasing x by epsilon until y(x) is less than + # y_bound + # if y(x)-y(x-epsilon) is less than the tolerance, increase epsilon + x = x_root + y_val = y(x) + while y_val > y_bound: + y_val_prev = y_val + x -= epsilon + y_val = y(x) + if y_val_prev - y_val < tolerance / 10: + epsilon *= 2 + return x diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index ea4a448..eb89abf 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -7,6 +7,7 @@ from joblib import Parallel, delayed import inspect import subprocess +import textwrap def clear_dir(path): @@ -536,3 +537,51 @@ def _find_folder_with_prefix(base_path, prefix): # If no folder with the specified prefix is found, return None return None + + +class Logger: + """ + Logger singleton class to log the progress of the model configuration. Has to be + initialized with the path to the log file once.""" + + _instance = None + _log_file: str | None + _caller_name = "" + + def __new__(cls, log_file: str | None = None): + """ + Args: + log_file (str): + Path to the log file + """ + if cls._instance is None: + cls._instance = super(Logger, cls).__new__(cls) + cls._log_file = log_file + if log_file is not None: + with open(log_file, "w") as f: + print("Logger file:", file=f) + return cls._instance + + def log(self, txt): + """ + Log the given text to the log file. Only if the log file was given during + the first initialization. + + Args: + txt (str): + Text to be logged + """ + if self._log_file is None: + return + caller_frame = inspect.currentframe().f_back + caller_name = caller_frame.f_code.co_name + + if caller_name == self._caller_name: + txt = f"{textwrap.indent(str(txt), ' ')}" + else: + txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" + + self._caller_name = caller_name + + with open(self._log_file, "a") as f: + print(txt, file=f) From c67d71812c8d8b5996ccd5854be5737f500cd7f8 Mon Sep 17 00:00:00 2001 From: olimaol Date: Fri, 7 Jun 2024 12:02:08 +0200 Subject: [PATCH 32/39] model configurator: continued new structure --- .../model_configurator_cnp.py | 196 ++++++++++++++++-- .../model_configurator_user.py | 5 +- src/CompNeuroPy/extra_functions.py | 47 +++-- src/CompNeuroPy/system_functions.py | 54 ++++- 4 files changed, 255 insertions(+), 47 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 03ae5c7..b3ad883 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -846,6 +846,86 @@ def get_v_psp(self, v_rest: float, I_app_hold: float, pop_name: str) -> np.ndarr var_arr[0, :] = get_arr[:, 0] return var_arr + def get_ipsp( + self, + pop_name: str, + g_ampa: float = 0, + g_gaba: float = 0, + do_plot: bool = False, + ): + """ + Simulate the single neuron network of the given pop_name for max 5000 ms. The + neuron is hold at the resting potential by setting the applied current to + I_app_hold. Then the conductances g_ampa and g_gaba are applied (simulating a + single incoming ampa/gaba spike). The maximum of the (negative) difference of + the membrane potential and the resting potential is returned as the IPSP. + + Args: + pop_name (str): + Name of the population + g_ampa (float): + Conductance of the ampa synapse + g_gaba (float): + Conductance of the gaba synapse + do_plot (bool): + If True, plot the membrane potential + + Returns: + psp (float): + Maximum of the (negative) difference of the membrane potential and the + resting potential + """ + ### get the network, population, monitor from single nets + net = single_nets.single_net(pop_name=pop_name, mode="normal").net + population = single_nets.single_net(pop_name=pop_name, mode="normal").population + monitor = single_nets.single_net(pop_name=pop_name, mode="normal").monitor + ### get init_sampler, I_app_hold from prepare_psp + init_sampler = prepare_psp.get(pop_name=pop_name).psp_init_sampler + I_app_hold = prepare_psp.get(pop_name=pop_name).I_app_hold + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### set I_app (I_app_hold) to hold the resting potential + population.I_app = I_app_hold + ### simulate 50 ms initial duration + net.simulate(50) + ### get the current v and set it as v_psp_thresh for the population's stop + ### condition + v_rec_rest = population.v[0] + population.v_psp_thresh = v_rec_rest + ### apply given conductances --> changes v, causes psp + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate until v is near v_rec_rest again or until 5000 ms + net.simulate_until(max_duration=5000, population=population) + ### get v and spike dict to calculate psp + v_rec = monitor.get("v")[:, 0] + spike_dict = monitor.get("spike") + ### if neuron spiked only check psps until spike time, otherwise until last + ### (current) time step + spike_timestep_list = spike_dict[0] + [net.get_current_step()] + end_timestep = int(round(min(spike_timestep_list), 0)) + ### calculate psp, maximum of negative difference of v_rec and v_rec_rest + psp = float( + np.absolute(np.clip(v_rec[:end_timestep] - v_rec_rest, None, 0)).max() + ) + + if do_plot: + plt.figure() + plt.title( + f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" + ) + plt.plot(v_rec) + plt.savefig( + f"tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" + ) + plt.close("all") + + return psp + class ModelConfigurator: def __init__( @@ -860,7 +940,7 @@ def __init__( clear_cache: bool = False, log_file: str | None = None, ): - ### initialize logger + ### initialize logger TODO test if no Logger works sf.Logger(log_file=log_file) ### analyze the given model, create model before analyzing, then clear ANNarchy self._analyze_model = AnalyzeModel(model=model) @@ -885,8 +965,6 @@ def __init__( analyze_model=self._analyze_model, do_not_config_list=do_not_config_list, ) - ### define the simulator with all simulations for the single neuron networks - Simulator() ### get the resting potential and corresponding I_hold for each population using ### the voltage clamp networks global prepare_psp @@ -895,17 +973,10 @@ def __init__( do_not_config_list=do_not_config_list, do_plot=False, ) - ### tmp test - for pop_name in model.populations: - if pop_name in do_not_config_list: - continue - print(f"pop_name: {pop_name}") - print(f"I_app_hold: {prepare_psp.get(pop_name=pop_name).I_app_hold}") - print(f"v_rest: {prepare_psp.get(pop_name=pop_name).v_rest}") - - quit() # TODO seems to work until here, continue here - self._max_syn = GetMaxSyn() + self._max_syn = GetMaxSyn( + model=model, do_not_config_list=do_not_config_list, max_psp=max_psp + ) self._weight_templates = GetWeightTemplates() @@ -1400,7 +1471,6 @@ def __init__( ### skip populations which should not be configured if pop_name in do_not_config_list: continue - self._prepare_psp_dict[pop_name] = {} ### find initial v_rest using the voltage clamp network sf.Logger().log( f"search v_rest with y(X) = delta_v_2000(v=X) using grid search for pop {pop_name}" @@ -1446,6 +1516,7 @@ def __init__( I_app_hold=I_app_hold, ) ### store the prepare PSP information + self._prepare_psp_dict[pop_name] = {} self._prepare_psp_dict[pop_name]["v_rest"] = v_rest self._prepare_psp_dict[pop_name]["I_app_hold"] = I_app_hold self._prepare_psp_dict[pop_name]["psp_init_sampler"] = psp_init_sampler @@ -1535,7 +1606,7 @@ def _find_I_app_hold( ### negative current initially reduces v then v climbs back up --> ### get_v_change_after_v_rest checks how much v changes during second half of ### 2000 ms simulation - y_=lambda X_val: -self._get_v_change_after_v_rest( + y=lambda X_val: -self._get_v_change_after_v_rest( pop_name=pop_name, variables_v_rest=variables_v_rest, ### incremental_continuous_bound_search only uses positive values for X and @@ -1694,8 +1765,99 @@ def _get_v_change_after_v_rest( class GetMaxSyn: - def __init__(self): - pass + def __init__( + self, model: CompNeuroModel, do_not_config_list: list[str], max_psp: float + ): + self._max_syn_dict = {} + ### loop over all populations + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + + ### get max g_gabe + g_gaba_max = self._get_max_g_gaba(pop_name=pop_name, max_psp=max_psp) + + ### get max g_ampa + g_ampa_max = self._get_max_g_ampa(pop_name=pop_name, g_gaba_max=g_gaba_max) + print(f"g_gaba_max: {g_gaba_max}, g_ampa_max: {g_ampa_max}") + quit() ### TODO seems to work until here, continue here + ### get max I_app + I_app_max = self._get_max_I_app(pop_name=pop_name) + + ### store the maximal synaptic input in dict + self._max_syn_dict[pop_name] = {} + self._max_syn_dict[pop_name]["g_gaba"] = g_gaba_max + self._max_syn_dict[pop_name]["g_ampa"] = g_ampa_max + self._max_syn_dict[pop_name]["I_app"] = I_app_max + + def get(self, pop_name: str): + """ + Return the maximal synaptic input for the given population. + + Args: + pop_name (str): + Name of the population + + Returns: + ReturnMaxSyn: + Maximal synaptic input for the given population with Attributes: g_gaba, + g_ampa, I_app + """ + return self.ReturnMaxSyn( + g_gaba=self._max_syn_dict[pop_name]["g_gaba"], + g_ampa=self._max_syn_dict[pop_name]["g_ampa"], + I_app=self._max_syn_dict[pop_name]["I_app"], + ) + + class ReturnMaxSyn: + def __init__(self, g_gaba: float, g_ampa: float, I_app: float): + self.g_gaba = g_gaba + self.g_ampa = g_ampa + self.I_app = I_app + + def _get_max_g_gaba(self, pop_name: str, max_psp: float): + ### find g_gaba max using max IPSP + sf.Logger().log("search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)") + return ef.find_x_bound( + y=lambda X_val: Simulator().get_ipsp( + pop_name=pop_name, + g_gaba=X_val, + ), + x0=0, + y_bound=max_psp, + tolerance=0.005, + ) + + def _get_max_g_ampa(self, pop_name: str, g_gaba_max: float): + ### find g_ampa max by "overriding" IPSP of g_gaba max + sf.Logger().log( + f"search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" + ) + + def func(x): + ipsp = Simulator().get_ipsp( + pop_name=pop_name, + g_gaba=g_gaba_max, + g_ampa=x, + ) + ### find_x_bound tries to increase x to increase y, therefore, use + ### -ipsp, since initially the ipsp is maximal, thus, y is negative and by + ### increasing x it should increase to 0 + y = -ipsp + ### next problem: find_x_bound expects a function which increases beyond the + ### bound but -ipsp can maximum reach 0, thus, use -ipsp + x + if y >= 0: + return y + x + else: + return y + + return ef.find_x_bound( + y=func, + x0=0, + y_bound=0, + tolerance=0.005, + ) class GetWeightTemplates: diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 9c2aef9..24ea2d5 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -22,6 +22,9 @@ from CompNeuroPy.examples.model_configurator.model_configurator_cnp import ( ModelConfigurator, ) +from CompNeuroPy.examples.model_configurator.model_configurator_cnp_old import ( + model_configurator, +) import matplotlib.pyplot as plt import numpy as np @@ -371,7 +374,7 @@ def BGM_part_function(params): ### obtain the maximum synaptic loads for the populations and the ### maximum weights of their afferent projections - model_conf.get_max_syn(cache=True, clear=False) + model_conf.get_max_syn(cache=False, clear=False) ### now either set weights directly weights = { diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index ddfded0..234f8b8 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -2630,7 +2630,7 @@ def reset(self): def find_x_bound( - y_: Callable[[float], float], + y: Callable[[float], float], x0: float, y_bound: float, tolerance: float = 1e-5, @@ -2659,12 +2659,6 @@ def find_x_bound( x_bound (float): The x value such that y(x) is closest to y_bound within the tolerance. """ - - def y(x): - x_y = y_(x) - sf.Logger().log(f"x: {x}; y: {x_y}") - return x_y - # Catch invalid bound type if bound_type not in ["equal", "greater", "less"]: raise ValueError("bound_type should be 'equal', 'greater', or 'less'.") @@ -2672,10 +2666,10 @@ def y(x): # Check if the initial value y(x0) is already y_bound y0 = y(x0) if np.isclose(y0, y_bound, atol=tolerance): - print("Warning: The initial value is already equal to y_bound.") + sf.Logger().log("Warning: The initial value is already equal to y_bound.") return x0, x0 - sf.Logger().log(f"x0: {x0}, y0: {y0}, y_bound: {y_bound}") + sf.Logger().log(f"x0: {x0}, y0: {y0}, y_bound: {bound_type} {y_bound}") # Define a helper function to find x such that y(x) - y_bound = 0 def func(x): @@ -2684,7 +2678,7 @@ def func(x): # Exponential search to find an interval [a, b] where y(a) < y_bound < y(b) a = x0 b = x0 + 1 - while y(b) < y_bound: + while func(b) < 0: a = b b *= 2 if b > 1e6: # Avoid infinite loop in case y_bound is not reachable @@ -2695,21 +2689,30 @@ def func(x): ) sf.Logger().log(f"a: {a}, b: {b}") - # Check if the maximum value is less than y_bound - res = minimize_scalar(lambda x: -y(x), bounds=(x0, b), method="bounded") - y_max = -res.fun - if y_max < y_bound: - raise ValueError( - "y_bound cannot be reached, the function saturates below y_bound." - ) - sf.Logger().log(f"y_max: {y_max}") - # Use brentq to find the root within the interval [a, b] - x_root: float = brentq(func, a, b, xtol=tolerance, full_output=False) - sf.Logger().log(f"x_root: {x_root}") + x_root: float = brentq(func, a, b, full_output=False) + y_root = y(x_root) + sf.Logger().log(f"y(x_root={x_root}) = {y_root}") + + # check if y(x_root) is not within the tolerance of y_bound + if not np.isclose(y_root, y_bound, atol=tolerance): + sf.Logger().log( + f"Warning: y(x_root) is not within the tolerance of y_bound (y(x_root)={y_root}, y_bound={y_bound}, tolerance={tolerance})!" + ) if bound_type == "equal": # Return the x value such that y(x) = y_bound + sf.Logger().log(f"Returning y(x={x_root}) = {y_root}") + return x_root + + if bound_type == "greater" and y_root > y_bound: + # Return the x value such that y(x) > y_bound + sf.Logger().log(f"Returning y(x={x_root}) = {y_root}") + return x_root + + if bound_type == "less" and y_root < y_bound: + # Return the x value such that y(x) < y_bound + sf.Logger().log(f"Returning y(x={x_root}) = {y_root}") return x_root # Calculate the gradient at x_root @@ -2732,6 +2735,7 @@ def func(x): y_val = y(x) if y_val - y_val_prev < tolerance / 10: epsilon *= 2 + sf.Logger().log(f"Returning y(x={x}) = {y_val}") return x elif bound_type == "less": # Find the x value such that y(x) < y_bound (thus maybe decrease x) @@ -2746,4 +2750,5 @@ def func(x): y_val = y(x) if y_val_prev - y_val < tolerance / 10: epsilon *= 2 + sf.Logger().log(f"Returning y(x={x}) = {y_val}") return x diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index eb89abf..ce4121e 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -546,7 +546,7 @@ class Logger: _instance = None _log_file: str | None - _caller_name = "" + _call_stack = "" def __new__(cls, log_file: str | None = None): """ @@ -571,17 +571,55 @@ def log(self, txt): txt (str): Text to be logged """ - if self._log_file is None: - return - caller_frame = inspect.currentframe().f_back - caller_name = caller_frame.f_code.co_name - if caller_name == self._caller_name: + _, call_stack = self.trace_calls() + + if call_stack == self._call_stack: txt = f"{textwrap.indent(str(txt), ' ')}" else: - txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" + txt = f"\n[{call_stack}]:\n{textwrap.indent(str(txt), ' ')}" - self._caller_name = caller_name + self._call_stack = call_stack with open(self._log_file, "a") as f: print(txt, file=f) + + def trace_calls(self): + # Get the call stack + stack = inspect.stack() + + call_stack = [] + for frame in stack: + # Get the function name + function_name = frame.function + # Check if it's a method of a class by looking for 'self' or 'cls' + locals = frame.frame.f_locals + if "self" in locals: + class_name = locals["self"].__class__.__name__ + full_name = f"{class_name}.{function_name}" + elif "cls" in locals: + class_name = locals["cls"].__name__ + full_name = f"{class_name}.{function_name}" + else: + # If function_name is '', replace it with the module name + if function_name == "": + module_name = frame.frame.f_globals["__name__"] + full_name = f"{module_name}" + else: + full_name = function_name + call_stack.append(full_name) + + # Remove the first two elements of the call stack, which are the functions of + # the Logger class + call_stack = call_stack[2:] + + # Get the name of the current function + current_function_name = call_stack[0] + + # Reverse the call stack to get the order of the calls + call_stack = call_stack[::-1] + + # Convert the call stack to a string + call_stack = " -> ".join(call_stack) + + return current_function_name, call_stack From ce3aacc815947a887af9e2b8071229cc7436712d Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 10 Jun 2024 17:12:06 +0200 Subject: [PATCH 33/39] model configurator: continued new structure --- .../model_configurator_cnp.py | 2524 +++++++++++------ .../model_configurator_user.py | 49 +- 2 files changed, 1665 insertions(+), 908 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index b3ad883..dc7f1f5 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -1,7 +1,10 @@ from CompNeuroPy.generate_model import CompNeuroModel +from CompNeuroPy.experiment import CompNeuroExp +from CompNeuroPy.monitors import CompNeuroMonitors from CompNeuroPy import model_functions as mf from CompNeuroPy import extra_functions as ef from CompNeuroPy import system_functions as sf +from CompNeuroPy import analysis_functions as af from ANNarchy import ( Population, @@ -23,6 +26,7 @@ populations, Binomial, CurrentInjection, + raster_plot, ) from ANNarchy.core import ConnectorMethods @@ -30,7 +34,7 @@ # from ANNarchy.core.Global import _network import numpy as np from scipy.interpolate import interp1d, interpn -from scipy.signal import find_peaks, argrelmin +from scipy.signal import find_peaks, argrelmin, argrelextrema import matplotlib.pyplot as plt import inspect import textwrap @@ -48,6 +52,7 @@ from ANNarchy.extensions.bold import BoldMonitor from sklearn.linear_model import LinearRegression import sympy as sp +from scipy.optimize import minimize, Bounds class ArrSampler: @@ -438,6 +443,51 @@ def __init__(self, single_net_dict): self.monitor: Monitor = single_net_dict["monitor"] self.init_sampler: ArrSampler = single_net_dict["init_sampler"] + def init_sampler(self, model: CompNeuroModel, do_not_config_list: list[str]): + """ + Return the init samplers for all populations of the normal mode. All samplers + are returned in an object with a get method to get the sampler for a specific + population. + + Args: + model (CompNeuroModel): + Model to be analyzed + do_not_config_list (list[str]): + List of population names which should not be configured + + Returns: + AllSampler: + Object with a get method to get the init sampler for a specific + population + """ + init_sampler_dict = {} + for pop_name in model.populations: + if pop_name in do_not_config_list: + continue + init_sampler_dict[pop_name] = self._single_net_dict["normal"][pop_name][ + "init_sampler" + ] + return self.AllSampler(init_sampler_dict) + + class AllSampler: + def __init__(self, init_sampler_dict: dict[str, ArrSampler]): + self.init_sampler_dict = init_sampler_dict + + def get(self, pop_name: str): + """ + Get the init sampler for the given population. + + Args: + pop_name (str): + Name of the population + + Returns: + sampler (ArrSampler): + Init sampler for the given population + """ + sampler: ArrSampler = self.init_sampler_dict[pop_name] + return sampler + def _create_single_neuron_networks( self, model: CompNeuroModel, @@ -705,1069 +755,1487 @@ def _adjust_neuron_model( ) -class Simulator: - _instance = None - - def __new__(cls): - if cls._instance is None: - cls._instance = super(Simulator, cls).__new__(cls) - return cls._instance +class PreparePSP: + """ + Find v_rest, corresponding I_hold (in case of self-active neurons) and an + init_sampler to initialize the neuron model for the PSP calculation for each + population. + """ - def get_v_clamp_2000( + def __init__( self, - pop_name: str, - v: float | None = None, - I_app: float | None = None, - ) -> float: + model: CompNeuroModel, + single_nets: CreateSingleNeuronNetworks, + do_not_config_list: list[str], + simulator: "Simulator", + do_plot: bool, + ): """ - Simulates the v_clamp single neuron network of the given pop_name for 2000 ms - and returns the v_clamp_rec value of the single neuron after 2000 ms. The - returned values is "dv/dt". Therefore, to get the hypothetical change of v for a - single time step multiply it with dt! - Args: - pop_name (str): - Name of the population - v (float): - Membrane potential (does not change over time due to voltage clamp) - I_app (float): - Applied current - - Returns: - v_clamp_rec (float): - v_clamp_rec value of the single neuron after 2000 ms + model (CompNeuroModel): + Model to be prepared + do_not_config_list (list[str]): + List of populations which should not be configured + do_plot (bool): + If True, plot the membrane potential """ - ### get the network, population, init_sampler - net = single_nets.single_net(pop_name=pop_name, mode="v_clamp").net - population = single_nets.single_net( - pop_name=pop_name, mode="v_clamp" - ).population - init_sampler = single_nets.single_net( - pop_name=pop_name, mode="v_clamp" - ).init_sampler - ### reset network - net.reset() - net.set_seed(0) - ### set the initial variables of the neuron model - if init_sampler is not None: - init_sampler.set_init_variables(population) - ### set v and I_app - if v is not None: - population.v = v - if I_app is not None: - population.I_app = I_app - ### simulate 2000 ms - net.simulate(2000) - ### return the v_clamp_rec value of the single neuron after 2000 ms - return population.v_clamp_rec[0] + self._single_nets = single_nets + self._prepare_psp_dict = {} + self._simulator = simulator + ### loop over all populations + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + ### find initial v_rest using the voltage clamp network + sf.Logger().log( + f"[{pop_name}]: search v_rest with y(X) = delta_v_2000(v=X) using grid search" + ) + v_rest, delta_v_v_rest, variables_v_rest = self._find_v_rest_initial( + pop_name=pop_name, + do_plot=do_plot, + ) + sf.Logger().log( + f"[{pop_name}]: found v_rest={v_rest} with delta_v_2000(v=v_rest)={delta_v_v_rest}" + ) + ### check if v is constant after setting v to v_rest by simulating the normal + ### single neuron network for 2000 ms + v_rest_is_constant, v_rest_arr = self._get_v_rest_is_const( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + do_plot=do_plot, + ) - def get_v_2000( - self, pop_name, initial_variables, I_app=None, do_plot=True - ) -> np.ndarray: + if v_rest_is_constant: + ### v_rest found (last v value of the previous simulation), no + ### I_app_hold needed + v_rest = v_rest_arr[-1] + I_app_hold = 0 + else: + ### there is no resting_state i.e. neuron is self-active --> find + ### smallest negative I_app to silence neuron + sf.Logger().log( + f"[{pop_name}]: neuron seems to be self-active --> find smallest I_app to silence the neuron" + ) + v_rest, I_app_hold = self._find_I_app_hold( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + ) + sf.Logger().log( + f"[{pop_name}]: final values: I_app_hold = {I_app_hold}, v_rest = {v_rest}" + ) + + ### get the sampler for the initial variables + psp_init_sampler = self._get_init_neuron_variables_for_psp( + pop_name=pop_name, + v_rest=v_rest, + I_app_hold=I_app_hold, + ) + ### store the prepare PSP information + self._prepare_psp_dict[pop_name] = {} + self._prepare_psp_dict[pop_name]["v_rest"] = v_rest + self._prepare_psp_dict[pop_name]["I_app_hold"] = I_app_hold + self._prepare_psp_dict[pop_name]["psp_init_sampler"] = psp_init_sampler + + def get(self, pop_name: str): """ - Simulate normal single neuron 2000 ms and return v for this duration. + Return the prepare PSP information for the given population. Args: pop_name (str): Name of the population - initial_variables (dict): - Initial variables of the neuron model - I_app (float): - Applied current - do_plot (bool): - If True, plot the membrane potential Returns: - v_arr (np.array): - Membrane potential for the 2000 ms simulation with shape: (time_steps,) + ReturnPreparePSP: + Prepare PSP information for the given population with Attributes: v_rest, + I_app_hold, psp_init_sampler """ - ### get the network, population, monitor - net = single_nets.single_net(pop_name=pop_name, mode="normal").net - population = single_nets.single_net(pop_name=pop_name, mode="normal").population - monitor = single_nets.single_net(pop_name=pop_name, mode="normal").monitor - ### reset network - net.reset() - net.set_seed(0) - ### set the initial variables of the neuron model - for var_name, var_val in initial_variables.items(): - if var_name in population.variables: - setattr(population, var_name, var_val) - ### set I_app - if I_app is not None: - population.I_app = I_app - ### simulate - net.simulate(2000) - v_arr = monitor.get("v")[:, 0] - - if do_plot: - plt.figure() - plt.title(f"{population.I_app}") - plt.plot(v_arr) - plt.savefig(f"tmp_v_rest_{pop_name}.png") - plt.close("all") + return self.ReturnPreparePSP( + v_rest=self._prepare_psp_dict[pop_name]["v_rest"], + I_app_hold=self._prepare_psp_dict[pop_name]["I_app_hold"], + psp_init_sampler=self._prepare_psp_dict[pop_name]["psp_init_sampler"], + ) - return v_arr + class ReturnPreparePSP: + def __init__( + self, v_rest: float, I_app_hold: float, psp_init_sampler: ArrSampler + ): + self.v_rest = v_rest + self.I_app_hold = I_app_hold + self.psp_init_sampler = psp_init_sampler - def get_v_psp(self, v_rest: float, I_app_hold: float, pop_name: str) -> np.ndarray: + def _get_init_neuron_variables_for_psp( + self, pop_name: str, v_rest: float, I_app_hold: float + ): """ - Simulate the single neuron network of the given pop_name for 5000 ms and return - the variables of the neuron model after 5000 ms. + Get the initial variables of the neuron model for the PSP calculation. Args: - v_rest (float): - Resting potential - I_app_hold (float): - Applied current to hold the resting potential pop_name (str): Name of the population + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron Returns: - var_arr (np.array): - Variables of the neuron model after 5000 ms with shape: (1, n_vars) + sampler (ArrSampler): + Sampler with the initial variables of the neuron model """ - - ### get the network, population, monitor - net = single_nets.single_net(pop_name=pop_name, mode="normal").net - population = single_nets.single_net(pop_name=pop_name, mode="normal").population - ### reset network - net.reset() - net.set_seed(0) - ### set the initial variables of the neuron model - population.v = v_rest - population.I_app = I_app_hold - ### simulate - net.simulate(5000) - ### get the variables of the neuron after 5000 ms in the shape (1, n_vars) - var_name_list = list(population.variables) - var_arr = np.zeros((1, len(var_name_list))) - get_arr = np.array( - [getattr(population, var_name) for var_name in population.variables] + ### get the names of the variables of the neuron model + var_name_list = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population.variables + ### get the variables of the neuron model after 5000 ms + var_arr = self._simulator.get_v_psp( + v_rest=v_rest, I_app_hold=I_app_hold, pop_name=pop_name ) - var_arr[0, :] = get_arr[:, 0] - return var_arr + ### create a sampler with this single data sample + sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) + return sampler - def get_ipsp( + def _find_I_app_hold( self, pop_name: str, - g_ampa: float = 0, - g_gaba: float = 0, - do_plot: bool = False, + variables_v_rest: dict, ): """ - Simulate the single neuron network of the given pop_name for max 5000 ms. The - neuron is hold at the resting potential by setting the applied current to - I_app_hold. Then the conductances g_ampa and g_gaba are applied (simulating a - single incoming ampa/gaba spike). The maximum of the (negative) difference of - the membrane potential and the resting potential is returned as the IPSP. + Find the current which silences the neuron. Args: pop_name (str): Name of the population - g_ampa (float): - Conductance of the ampa synapse - g_gaba (float): - Conductance of the gaba synapse - do_plot (bool): - If True, plot the membrane potential + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential Returns: - psp (float): - Maximum of the (negative) difference of the membrane potential and the - resting potential + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron """ - ### get the network, population, monitor from single nets - net = single_nets.single_net(pop_name=pop_name, mode="normal").net - population = single_nets.single_net(pop_name=pop_name, mode="normal").population - monitor = single_nets.single_net(pop_name=pop_name, mode="normal").monitor - ### get init_sampler, I_app_hold from prepare_psp - init_sampler = prepare_psp.get(pop_name=pop_name).psp_init_sampler - I_app_hold = prepare_psp.get(pop_name=pop_name).I_app_hold - ### reset network - net.reset() - net.set_seed(0) - ### set the initial variables of the neuron model - if init_sampler is not None: - init_sampler.set_init_variables(population) - ### set I_app (I_app_hold) to hold the resting potential - population.I_app = I_app_hold - ### simulate 50 ms initial duration - net.simulate(50) - ### get the current v and set it as v_psp_thresh for the population's stop - ### condition - v_rec_rest = population.v[0] - population.v_psp_thresh = v_rec_rest - ### apply given conductances --> changes v, causes psp - population.g_ampa = g_ampa - population.g_gaba = g_gaba - ### simulate until v is near v_rec_rest again or until 5000 ms - net.simulate_until(max_duration=5000, population=population) - ### get v and spike dict to calculate psp - v_rec = monitor.get("v")[:, 0] - spike_dict = monitor.get("spike") - ### if neuron spiked only check psps until spike time, otherwise until last - ### (current) time step - spike_timestep_list = spike_dict[0] + [net.get_current_step()] - end_timestep = int(round(min(spike_timestep_list), 0)) - ### calculate psp, maximum of negative difference of v_rec and v_rec_rest - psp = float( - np.absolute(np.clip(v_rec[:end_timestep] - v_rec_rest, None, 0)).max() + ### find I_app_hold with find_x_bound + sf.Logger().log( + f"[{pop_name}]: search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)" + ) + + I_app_hold = -ef.find_x_bound( + ### negative current initially reduces v then v climbs back up --> + ### get_v_change_after_v_rest checks how much v changes during second half of + ### 2000 ms simulation + y=lambda X_val: -self._get_v_change_after_v_rest( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + ### find_x_bound only uses positive values for X and + ### increases them, expecting to increase y, therefore use -X for I_app + ### (increasing X will "increase" negative current) and negative sign for + ### the returned value (for no current input the change is positive, this + ### should decrease to zero, with negative sign: for no current input the + ### change is negative, this should increase above zero) + I_app=-X_val, + ), + ### y is initially negative and should increase above 0, therefore search for + ### y_bound=0 with bound_type="greater" + x0=0, + y_bound=0, + tolerance=0.01, + bound_type="greater", + ) + # y_bound = 0 + # y: -10, -2, +0.1 + # x: 0, 5, 7 + # bound type greater should find value which is slightly larger than 0 TODO test this + ### again simulate the neuron with the obtained I_app_hold to get the new v_rest + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app_hold, + do_plot=False, ) + v_rest = v_rest_arr[-1] + return v_rest, I_app_hold + + def _find_v_rest_initial( + self, + pop_name: str, + do_plot: bool, + ): + """ + Find the initial v_rest with the voltage clamp single neuron network for the + given population. Furthermore, get the change of v durign setting v_rest and the + stady state variables of the neuron (at the end of the simulation). + + Args: + pop_name (str): + Name of the population + do_plot (bool): + True if plots should be created, False otherwise + Returns: + v_rest (float): + Resting membrane potential + detla_v_v_rest (float): + Change of the membrane potential during setting v_rest as membrane + potential + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential + """ + ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can + ### only be >= 0) + v_arr = np.linspace(-90, -20, 200) + v_clamp_arr = np.array( + [ + self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_val) + for v_val in v_arr + ] + ) + v_clamp_min_idx = argrelmin(v_clamp_arr)[0] + v_rest = np.min(v_arr[v_clamp_min_idx]) if do_plot: plt.figure() - plt.title( - f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" - ) - plt.plot(v_rec) - plt.savefig( - f"tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" - ) + plt.plot(v_arr, v_clamp_arr) + plt.axvline(v_rest, color="k") + plt.axhline(0, color="k", ls="dashed") + plt.savefig(f"v_clamp_{pop_name}.png") plt.close("all") - return psp + ### do again the simulation only with the obtained v_rest to get the detla_v for + ### v_rest + detla_v_v_rest = ( + self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() + ) + population = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).population + ### and the stady state variables of the neuron + variables_v_rest = { + var_name: getattr(population, var_name) for var_name in population.variables + } + return v_rest, detla_v_v_rest, variables_v_rest + def _get_v_rest_is_const(self, pop_name: str, variables_v_rest: dict, do_plot=bool): + """ + Check if the membrane potential is constant after setting it to v_rest. -class ModelConfigurator: - def __init__( - self, - model: CompNeuroModel, - target_firing_rate_dict: dict, - max_psp: float = 10.0, - do_not_config_list: list[str] = [], - print_guide: bool = False, - I_app_variable: str = "I_app", - cache: bool = False, - clear_cache: bool = False, - log_file: str | None = None, - ): - ### initialize logger TODO test if no Logger works - sf.Logger(log_file=log_file) - ### analyze the given model, create model before analyzing, then clear ANNarchy - self._analyze_model = AnalyzeModel(model=model) - ### create the CompNeuroModel object for the reduced model (the model itself is - ### not created yet) - self._model_reduced = CreateReducedModel( - model=model, - analyze_model=self._analyze_model, - reduced_size=100, - do_create=False, - do_compile=False, - verbose=True, + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation + do_plot (bool): + True if plots should be created, False otherwise + + Returns: + v_rest_is_constant (bool): + True if the membrane potential is constant, False otherwise + v_rest_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ + ### check if the neuron stays at v_rest with normal neuron + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=0, + do_plot=do_plot, ) - ### create the single neuron networks (networks are compiled and ready to be - ### simulated), normal model for searching for max conductances, max input - ### current, resting firing rate; voltage clamp model for preparing the PSP - ### simulationssearching, i.e., for resting potential and corresponding input - ### current I_hold (for self-active neurons) - global single_nets - single_nets = CreateSingleNeuronNetworks( - model=model, - analyze_model=self._analyze_model, - do_not_config_list=do_not_config_list, + v_rest_arr_is_const = ( + np.std(v_rest_arr) <= np.mean(np.absolute(v_rest_arr)) / 1000 ) - ### get the resting potential and corresponding I_hold for each population using - ### the voltage clamp networks - global prepare_psp - prepare_psp = PreparePSP( - model=model, - do_not_config_list=do_not_config_list, + return v_rest_arr_is_const, v_rest_arr + + def _get_v_change_after_v_rest( + self, pop_name: str, variables_v_rest: dict, I_app: float + ): + """ + Check how much the membrane potential changes after setting it to v_rest. + + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation + do_plot (bool): + True if plots should be created, False otherwise + + Returns: + change_after_v_rest (np.array): + Change of the membrane potential after setting it to v_rest + """ + ### simulate 2000 ms after setting v_rest + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app, do_plot=False, ) - - self._max_syn = GetMaxSyn( - model=model, do_not_config_list=do_not_config_list, max_psp=max_psp + ### check how much v changes during the second half + ### std(v) - mean(v)/1000 should be close to 0, the larger the value the more v + ### changes + change_after_v_rest = ( + np.std(v_rest_arr[len(v_rest_arr) // 2 :], axis=0) + - np.mean(np.absolute(v_rest_arr[len(v_rest_arr) // 2 :]), axis=0) / 1000 ) - self._weight_templates = GetWeightTemplates() + return change_after_v_rest -class CreateReducedModel: +class Simulator: """ - Class to create a reduced model from the original model. It is accessable via the - attribute model_reduced. - - Attributes: - model_reduced (CompNeuroModel): - Reduced model, created but not compiled + Class with simulations for the single neuron networks. """ def __init__( self, - model: CompNeuroModel, - analyze_model: AnalyzeModel, - reduced_size: int, - do_create: bool = False, - do_compile: bool = False, - verbose: bool = False, - ) -> None: + single_nets: CreateSingleNeuronNetworks, + figure_folder: str, + prepare_psp: PreparePSP | None = None, + ): """ - Prepare model for reduction. - Args: - model (CompNeuroModel): - Model to be reduced - reduced_size (int): - Size of the reduced populations + single_nets (CreateSingleNeuronNetworks): + Single neuron networks for normal and voltage clamp mode + figure_folder (str): + Folder where the figures should be saved + prepare_psp (PreparePSP): + Prepare PSP information """ - ### set the attributes - self._model = model - self._analyze_model = analyze_model - self._reduced_size = reduced_size - self._verbose = verbose - ### recreate model with reduced populations and projections - self.model_reduced = CompNeuroModel( - model_creation_function=self.recreate_model, - name=f"{model.name}_reduced", - description=f"{model.description}\nWith reduced populations and projections.", - do_create=do_create, - do_compile=do_compile, - compile_folder_name=f"{model.compile_folder_name}_reduced", - ) + self._single_nets = single_nets + self._prepare_psp = prepare_psp + self._figure_folder = figure_folder - def recreate_model(self): - """ - Recreates the model with reduced populations and projections. + def get_v_clamp_2000( + self, + pop_name: str, + v: float | None = None, + I_app: float | None = None, + ) -> float: """ - ### 1st for each population create a reduced population - for pop_name in self._model.populations: - self.create_reduced_pop(pop_name) - ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population - for pop_name in self._model.populations: - self.create_spike_collecting_aux_pop(pop_name) - ## 3rd for each population which has afferents create a population for incoming spikes for each target type - for pop_name in self._model.populations: - self.create_conductance_aux_pop(pop_name, target="ampa") - self.create_conductance_aux_pop(pop_name, target="gaba") + Simulates the v_clamp single neuron network of the given pop_name for 2000 ms + and returns the v_clamp_rec value of the single neuron after 2000 ms. The + returned values is "dv/dt". Therefore, to get the hypothetical change of v for a + single time step multiply it with dt! - def create_reduced_pop(self, pop_name: str): + Args: + pop_name (str): + Name of the population + v (float): + Membrane potential (does not change over time due to voltage clamp) + I_app (float): + Applied current + + Returns: + v_clamp_rec (float): + v_clamp_rec value of the single neuron after 2000 ms """ - Create a reduced population from the given population. + ### get the network, population, init_sampler + net = self._single_nets.single_net(pop_name=pop_name, mode="v_clamp").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).population + init_sampler = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).init_sampler + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### set v and I_app + if v is not None: + population.v = v + if I_app is not None: + population.I_app = I_app + ### simulate 2000 ms + net.simulate(2000) + ### return the v_clamp_rec value of the single neuron after 2000 ms + return population.v_clamp_rec[0] + + def get_v_2000( + self, pop_name, initial_variables, I_app=None, do_plot=False + ) -> np.ndarray: + """ + Simulate normal single neuron 2000 ms and return v for this duration. Args: pop_name (str): - Name of the population to be reduced - """ - if self._verbose: - print(f"create_reduced_pop for {pop_name}") - ### 1st check how the population is connected - _, is_postsynaptic, ampa, gaba = self.how_pop_is_connected(pop_name) + Name of the population + initial_variables (dict): + Initial variables of the neuron model + I_app (float): + Applied current + do_plot (bool): + If True, plot the membrane potential - ### 2nd recreate neuron model - ### get the stored parameters of the __init__ function of the Neuron - neuron_model_init_parameter_dict = ( - self._analyze_model.neuron_model_init_parameter_dict[pop_name].copy() - ) - ### if the population is a postsynaptic population adjust the synaptic - ### conductance equations - if is_postsynaptic: - neuron_model_init_parameter_dict = self.adjust_neuron_model( - neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba - ) - ### create the new neuron model - neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + Returns: + v_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ + ### get the network, population, monitor + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + monitor = self._single_nets.single_net(pop_name=pop_name, mode="normal").monitor + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + for var_name, var_val in initial_variables.items(): + if var_name in population.variables: + setattr(population, var_name, var_val) + ### set I_app + if I_app is not None: + population.I_app = I_app + ### simulate + net.simulate(2000) + v_arr = monitor.get("v")[:, 0] - ### 3rd recreate the population - ### get the stored parameters of the __init__ function of the Population - pop_init_parameter_dict = self._analyze_model.pop_init_parameter_dict[ - pop_name - ].copy() - ### replace the neuron model with the new neuron model - pop_init_parameter_dict["neuron"] = neuron_model_new - ### replace the size with the reduced size (if reduced size is smaller than the - ### original size) - ### TODO add model requirements somewhere, here requirements = geometry = int - pop_init_parameter_dict["geometry"] = min( - [pop_init_parameter_dict["geometry"][0], self._reduced_size] - ) - ### append _reduce to the name - pop_init_parameter_dict["name"] = f"{pop_name}_reduced" - ### create the new population - pop_new = Population(**pop_init_parameter_dict) + if do_plot: + plt.figure() + plt.title(f"{population.I_app}") + plt.plot(v_arr) + plt.savefig(f"tmp_v_rest_{pop_name}.png") + plt.close("all") - ### 4th set the parameters and variables of the population's neurons - ### get the stored parameters and variables - neuron_model_attr_dict = self._analyze_model.neuron_model_attr_dict[pop_name] - ### set the parameters and variables - for attr_name, attr_val in neuron_model_attr_dict.items(): - setattr(pop_new, attr_name, attr_val) + return v_arr - def create_spike_collecting_aux_pop(self, pop_name: str): + def get_v_psp(self, v_rest: float, I_app_hold: float, pop_name: str) -> np.ndarray: """ - Create a spike collecting population for the given population. + Simulate the single neuron network of the given pop_name for 5000 ms and return + the variables of the neuron model after 5000 ms. Args: + v_rest (float): + Resting potential + I_app_hold (float): + Applied current to hold the resting potential pop_name (str): - Name of the population for which the spike collecting population should be created + Name of the population + + Returns: + var_arr (np.array): + Variables of the neuron model after 5000 ms with shape: (1, n_vars) """ - ### get all efferent projections - efferent_projection_list = [ - proj_name - for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() - if pre_post_pop_name_dict[0] == pop_name - ] - ### check if pop has efferent projections - if len(efferent_projection_list) == 0: - return - if self._verbose: - print(f"create_spike_collecting_aux_pop for {pop_name}") - ### create the spike collecting population - pop_aux = Population( - 1, - neuron=self.SpikeProbCalcNeuron( - reduced_size=min( - [ - self._analyze_model.pop_init_parameter_dict[pop_name][ - "geometry" - ][0], - self._reduced_size, - ] - ) - ), - name=f"{pop_name}_spike_collecting_aux", - ) - ### create the projection from reduced pop to spike collecting aux pop - proj = Projection( - pre=get_population(pop_name + "_reduced"), - post=pop_aux, - target="ampa", - name=f"proj_{pop_name}_spike_collecting_aux", + + ### get the network, population, monitor + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + population.v = v_rest + population.I_app = I_app_hold + ### simulate + net.simulate(5000) + ### get the variables of the neuron after 5000 ms in the shape (1, n_vars) + var_name_list = list(population.variables) + var_arr = np.zeros((1, len(var_name_list))) + get_arr = np.array( + [getattr(population, var_name) for var_name in population.variables] ) - proj.connect_all_to_all(weights=1) + var_arr[0, :] = get_arr[:, 0] + return var_arr - def create_conductance_aux_pop(self, pop_name: str, target: str): + def get_ipsp( + self, + pop_name: str, + g_ampa: float = 0, + g_gaba: float = 0, + do_plot: bool = False, + ): """ - Create a conductance calculating population for the given population and target. + Simulate the single neuron network of the given pop_name for max 5000 ms. The + neuron is hold at the resting potential by setting the applied current to + I_app_hold. Then the conductances g_ampa and g_gaba are applied (simulating a + single incoming ampa/gaba spike). The maximum of the (negative) difference of + the membrane potential and the resting potential is returned as the IPSP. Args: pop_name (str): - Name of the population for which the conductance calculating population should be created - target (str): - Target type of the conductance calculating population + Name of the population + g_ampa (float): + Conductance of the ampa synapse + g_gaba (float): + Conductance of the gaba synapse + do_plot (bool): + If True, plot the membrane potential + + Returns: + psp (float): + Maximum of the (negative) difference of the membrane potential and the + resting potential """ - ### get all afferent projections - afferent_projection_list = [ - proj_name - for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() - if pre_post_pop_name_dict[1] == pop_name - ] - ### check if pop has afferent projections - if len(afferent_projection_list) == 0: - return - ### get all afferent projections with target type - afferent_target_projection_list = [ - proj_name - for proj_name in afferent_projection_list - if self._analyze_model.proj_init_parameter_dict[proj_name]["target"] - == target - ] - ### check if there are afferent projections with target type - if len(afferent_target_projection_list) == 0: - return - if self._verbose: - print(f"create_conductance_aux_pop for {pop_name} target {target}") - ### get projection informations - ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) - projection_dict = { - proj_name: { - "pre_size": self._analyze_model.pop_init_parameter_dict[ - self._analyze_model.pre_post_pop_name_dict[proj_name][0] - ]["geometry"][0], - "connection_prob": self._analyze_model.connector_function_parameter_dict[ - proj_name - ][ - "probability" - ], - "weights": self._analyze_model.connector_function_parameter_dict[ - proj_name - ]["weights"], - "pre_name": self._analyze_model.pre_post_pop_name_dict[proj_name][0], - } - for proj_name in afferent_target_projection_list - } - ### create the conductance calculating population - pop_aux = Population( - self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0], - neuron=self.InputCalcNeuron(projection_dict=projection_dict), - name=f"{pop_name}_{target}_aux", - ) - ### set number of synapses parameter for each projection - for proj_name, vals in projection_dict.items(): - number_synapses = Binomial( - n=vals["pre_size"], p=vals["connection_prob"] - ).get_values( - self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0] - ) - setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) - ### create the "current injection" projection from conductance calculating - ### population to the reduced post population - proj = CurrentInjection( - pre=pop_aux, - post=get_population(f"{pop_name}_reduced"), - target=f"incomingaux{target}", - name=f"proj_{pop_name}_{target}_aux", - ) - proj.connect_current() - ### create projection from spike_prob calculating aux neurons of presynaptic - ### populations to conductance calculating aux population - for proj_name, vals in projection_dict.items(): - pre_pop_name = vals["pre_name"] - pre_pop_spike_collecting_aux = get_population( - f"{pre_pop_name}_spike_collecting_aux" - ) - proj = Projection( - pre=pre_pop_spike_collecting_aux, - post=pop_aux, - target=f"spikeprob_{pre_pop_name}", - name=f"{proj_name}_spike_collecting_to_conductance", + ### get the network, population, monitor from single nets + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + monitor = self._single_nets.single_net(pop_name=pop_name, mode="normal").monitor + ### get init_sampler, I_app_hold from prepare_psp + init_sampler = self._prepare_psp.get(pop_name=pop_name).psp_init_sampler + I_app_hold = self._prepare_psp.get(pop_name=pop_name).I_app_hold + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### set I_app (I_app_hold) to hold the resting potential + population.I_app = I_app_hold + ### simulate 50 ms initial duration + net.simulate(50) + ### get the current v and set it as v_psp_thresh for the population's stop + ### condition + v_rec_rest = population.v[0] + population.v_psp_thresh = v_rec_rest + ### apply given conductances --> changes v, causes psp + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate until v is near v_rec_rest again or until 5000 ms + net.simulate_until(max_duration=5000, population=population) + ### get v and spike dict to calculate psp + v_rec = monitor.get("v")[:, 0] + spike_dict = monitor.get("spike") + ### if neuron spiked only check psps until spike time, otherwise until last + ### (current) time step + spike_timestep_list = spike_dict[0] + [net.get_current_step()] + end_timestep = int(round(min(spike_timestep_list), 0)) + ### find ipsp + ### 1st calculate difference of v and v_rest + v_diff = v_rec[:end_timestep] - v_rec_rest + ### clip diff between None and zero, only take negative values (ipsp) + v_diff = np.clip(v_diff, None, 0) + ### add a small value to the clipped values, thus only large enough negative + ### values considered as ipsp + v_diff = v_diff + 0.01 + ### get the minimum of the difference as ipsp + psp = np.min(v_diff) + ### multiply with -1 to get the positive value of the ipsp + psp = -1 * psp + + if do_plot: + plt.figure() + plt.title( + f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" ) - proj.connect_all_to_all(weights=1) + plt.plot(v_rec[:end_timestep]) + plt.plot([0, end_timestep], [v_rec_rest, v_rec_rest], "k--") + plt.xlim(0, end_timestep) + plt.tight_layout() + plt.savefig( + f"{self._figure_folder}/tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" + ) + plt.close("all") - def how_pop_is_connected(self, pop_name): + return psp + + def get_firing_rate( + self, pop_name: str, I_app: float = 0, g_ampa: float = 0, g_gaba: float = 0 + ): """ - Check how a population is connected. If the population is a postsynaptic and/or - presynaptic population, check if it gets ampa and/or gaba input. + Simulate the single neuron network of the given pop_name for 500 ms initial + duration and 5000 ms. An input current I_app and the conductances g_ampa and + g_gaba are applied. The firing rate is calculated from the spikes in the last + 5000 ms. Args: pop_name (str): - Name of the population to check + Name of the population + I_app (float, optional): + Applied current + g_ampa (float, optional): + Conductance of the ampa synapse + g_gaba (float, optional): + Conductance of the gaba synapse Returns: - is_presynaptic (bool): - True if the population is a presynaptic population, False otherwise - is_postsynaptic (bool): - True if the population is a postsynaptic population, False otherwise - ampa (bool): - True if the population gets ampa input, False otherwise - gaba (bool): - True if the population gets gaba input, False otherwise + rate (float): + Firing rate in Hz """ - is_presynaptic = False - is_postsynaptic = False - ampa = False - gaba = False - ### loop over all projections - for proj_name in self._model.projections: - ### check if the population is a presynaptic population in any projection - if self._analyze_model.pre_post_pop_name_dict[proj_name][0] == pop_name: - is_presynaptic = True - ### check if the population is a postsynaptic population in any projection - if self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name: - is_postsynaptic = True - ### check if the projection target is ampa or gaba - if ( - self._analyze_model.proj_init_parameter_dict[proj_name]["target"] - == "ampa" - ): - ampa = True - elif ( - self._analyze_model.proj_init_parameter_dict[proj_name]["target"] - == "gaba" - ): - gaba = True - return is_presynaptic, is_postsynaptic, ampa, gaba + ### get the network, population, monitor, init_sampler from single nets + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + monitor = self._single_nets.single_net(pop_name=pop_name, mode="normal").monitor + init_sampler = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).init_sampler + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### slow down conductances (i.e. make them constant) + population.tau_ampa = 1e20 + population.tau_gaba = 1e20 + ### apply given variables + population.I_app = I_app + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate 500 ms initial duration + 5000 ms + net.simulate(500 + 5000) + ### get rate for the last 5000 ms + spike_dict = monitor.get("spike") + time_list = np.array(spike_dict[0]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (5000 / 1000) - def adjust_neuron_model( - self, neuron_model_init_parameter_dict, ampa=True, gaba=True - ): - """ - Add the new synaptic input coming from the auxillary population to the neuron - model. + return rate - Args: - neuron_model_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Neuron - ampa (bool): - True if the population gets ampa input and therefore the ampa conductance - needs to be adjusted, False otherwise - gaba (bool): - True if the population gets gaba input and therefore the gaba conductance - needs to be adjusted, False otherwise - Returns: - neuron_model_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Neuron - with DBS mechanisms added - """ - ### 1st adjust the conductance equations - ### get the equations of the neuron model as a list of strings - equations_line_split_list = str( - neuron_model_init_parameter_dict["equations"] - ).splitlines() - ### search for equation with dg_ampa/dt and dg_gaba/dt - for line_idx, line in enumerate(equations_line_split_list): - if ( - self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") - and ampa - ): - ### add " + tau_ampa*g_incomingauxampa/dt" - ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa - equations_line_split_list[line_idx] = ( - "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" - ) - if ( - self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") - and gaba - ): - ### add " + tau_gaba*g_incomingauxgaba/dt" - ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba - equations_line_split_list[line_idx] = ( - "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" - ) - ### join list to a string - neuron_model_init_parameter_dict["equations"] = "\n".join( - equations_line_split_list +class ModelConfigurator: + def __init__( + self, + model: CompNeuroModel, + target_firing_rate_dict: dict, + max_psp: float = 10.0, + do_not_config_list: list[str] = [], + print_guide: bool = False, + I_app_variable: str = "I_app", + cache: bool = False, + log_file: str | None = None, + ): + ### store the given variables + self._model = model + self._do_not_config_list = do_not_config_list + self._target_firing_rate_dict = target_firing_rate_dict + self._base_dict = None + self._figure_folder = "model_conf_figures" ### TODO add this to Simulator init + ### create the figure folder + sf.create_dir(self._figure_folder) + ### initialize logger + sf.Logger(log_file=log_file) + ### analyze the given model, create model before analyzing, then clear ANNarchy + self._analyze_model = AnalyzeModel(model=self._model) + ### create the CompNeuroModel object for the reduced model (the model itself is + ### not created yet) + self._model_reduced = CreateReducedModel( + model=self._model, + analyze_model=self._analyze_model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, ) - - ### 2nd extend description - neuron_model_init_parameter_dict["description"] = ( - f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." + ### try to load the cached variables + cache_worked = False + if cache: + try: + ### load the cached variables + cache_loaded = sf.load_variables( + name_list=["init_sampler", "max_syn"], + path=".model_config_cache", + ) + cache_worked = True + except FileNotFoundError: + pass + ### create the single neuron networks (networks are compiled and ready to be + ### simulated), normal model for searching for max conductances, max input + ### current, resting firing rate; voltage clamp model for preparing the PSP + ### simulationssearching, i.e., for resting potential and corresponding input + ### current I_hold (for self-active neurons) + if not cache_worked: + self._single_nets = CreateSingleNeuronNetworks( + model=self._model, + analyze_model=self._analyze_model, + do_not_config_list=do_not_config_list, + ) + ### get the init sampler for the populations + self._init_sampler = self._single_nets.init_sampler( + model=self._model, do_not_config_list=do_not_config_list + ) + ### create simulator with single_nets + self._simulator = Simulator( + single_nets=self._single_nets, + figure_folder=self._figure_folder, + prepare_psp=None, + ) + else: + self._init_sampler: CreateSingleNeuronNetworks.AllSampler = cache_loaded[ + "init_sampler" + ] + ### get the resting potential and corresponding I_hold for each population using + ### the voltage clamp networks + if not cache_worked: + self._prepare_psp = PreparePSP( + model=self._model, + single_nets=self._single_nets, + do_not_config_list=do_not_config_list, + simulator=self._simulator, + do_plot=False, + ) + self.simulator = Simulator( + single_nets=self._single_nets, + figure_folder=self._figure_folder, + prepare_psp=self._prepare_psp, + ) + ### get the maximum synaptic conductances and input currents for each population + if not cache_worked: + self._max_syn = GetMaxSyn( + model=self._model, + simulator=self._simulator, + do_not_config_list=do_not_config_list, + max_psp=max_psp, + target_firing_rate_dict=target_firing_rate_dict, + ) + else: + self._max_syn = cache_loaded["max_syn"] + ### cache single_nets, prepare_psp, max_syn + if cache and not cache_worked: + sf.save_variables( + variable_list=[ + self._init_sampler, + self._max_syn, + ], + name_list=["init_sampler", "max_syn"], + path=".model_config_cache", + ) + ### get the weights dictionaries + self._weight_dicts = GetWeights( + model=self._model, + do_not_config_list=do_not_config_list, + analyze_model=self._analyze_model, + max_syn=self._max_syn, ) - return neuron_model_init_parameter_dict - - def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): + def set_weights(self, weight_dict: dict[str, float]): """ - Check if a equation string has the form "tau*dvar/dt = -var". + Set the weights of the model. Args: - line (str): - Equation string - var_name (str): - Name of the variable - tau_name (str): - Name of the time constant - - Returns: - is_solution_correct (bool): - True if the equation is as expected, False otherwise + weight_dict (dict[str, float]): + Dict with the weights for each projection """ - if var_name not in line: - return False - - # Define the variables - var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") - - # Given equation as a string - equation_str = line - - # Parse the equation string - lhs, rhs = equation_str.split("=") - lhs = sp.sympify(lhs) - rhs = sp.sympify(rhs) + self._weight_dicts.weight_dict = weight_dict - # Form the equation - equation = sp.Eq(lhs, rhs) + def set_syn_load( + self, + syn_load_dict: dict[str, float], + syn_contribution_dict: dict[str, dict[str, float]], + ): + """ + Set the synaptic load of the model. - # Solve the equation for var - try: - solution = sp.solve(equation, var) - except: - ### equation is not solvable with variables means it is not as expected - return False + Args: + syn_load_dict (dict[str, float]): + Dict with ampa and gaba synaptic load for each population + syn_contribution_dict (dict[str, dict[str, float]]): + Dict with the contribution of the afferent projections to the ampa and + gaba synaptic load of each population + """ + self._weight_dicts.syn_load_dict = syn_load_dict + self._weight_dicts.syn_contribution_dict = syn_contribution_dict - # Given solution to compare - expected_solution_str = f"-{tau_name}*d{var_name}/dt" - expected_solution = sp.sympify(expected_solution_str) + def set_base(self): + """ + Set the baseline currents of the model, found for the current weights to reach + the target firing rates. + """ + if self._base_dict is None: + self._base_dict = GetBase( + model_normal=self._model, + model_reduced=self._model_reduced.model_reduced, + target_firing_rate_dict=self._target_firing_rate_dict, + weight_dicts=self._weight_dicts, + do_not_config_list=self._do_not_config_list, + init_sampler=self._init_sampler, + max_syn=self._max_syn, + ).base_dict - # Check if the solution is as expected - is_solution_correct = solution[0] == expected_solution - return is_solution_correct +class GetBase: + def __init__( + self, + model_normal: CompNeuroModel, + model_reduced: CompNeuroModel, + target_firing_rate_dict: dict, + weight_dicts: "GetWeights", + do_not_config_list: list[str], + init_sampler: CreateSingleNeuronNetworks.AllSampler, + max_syn: "GetMaxSyn", + ): + self._model_normal = model_normal + self._model_reduced = model_reduced + self._weight_dicts = weight_dicts + self._do_not_config_list = do_not_config_list + self._init_sampler = init_sampler + self._max_syn = max_syn + ### get the populations names of the configured populations + self._pop_names_config = [ + pop_name + for pop_name in model_normal.populations + if pop_name not in do_not_config_list + ] + ### convert the target firing rate dict to a array + self._target_firing_rate_arr = [] + for pop_name in self._pop_names_config: + self._target_firing_rate_arr.append(target_firing_rate_dict[pop_name]) + self._target_firing_rate_arr = np.array(self._target_firing_rate_arr) + ### get the base currents + self._base_dict = self._get_base() + + @property + def base_dict(self): + return self._base_dict + + def _set_model_weights(self): + ### loop over all populations which should be configured + for pop_name in self._pop_names_config: + ### loop over all target types + for target_type in ["ampa", "gaba"]: + ### get afferent projections of the corresponding target type + afferent_projection_list = self._weight_dicts._get_afferent_proj_names( + pop_name=pop_name, target=target_type + ) + ### loop over all afferent projections + for proj_name in afferent_projection_list: + ### set weight of the projection in the conductance-calculating + ### input current population + proj_weight = self._weight_dicts.weight_dict[proj_name] + setattr( + get_population(f"{pop_name}_{target_type}_aux"), + f"weights_{proj_name}", + proj_weight, + ) + + def _get_base(self): + ### clear ANNarchy + mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + ### create and compile the model + self._model_reduced.create() + ### create monitors for recording the spikes of all populations + ### for CompNeuroMonitors we need the "_reduced" suffix + mon = CompNeuroMonitors( + mon_dict={ + f"{pop_name}_reduced": ["spike"] + for pop_name in self._model_normal.populations + } + ) + ### create the experiment + exp = self.MyExperiment(monitors=mon) + ### initialize all populations with the init sampler + for pop_name in self._pop_names_config: + ### for get_population we need the "_reduced" suffix + self._init_sampler.get(pop_name=pop_name).set_init_variables( + get_population(f"{pop_name}_reduced") + ) + ### set the model weights + self._set_model_weights() + ### store the model state for all populations + exp.store_model_state(compartment_list=self._model_reduced.populations) + self._exp = exp + ### use objective_function to search for input base currents to reach the + ### target firing rates + lb = [] + ub = [] + x0 = [] + for pop_name in self._pop_names_config: + lb.append(-self._max_syn.get(pop_name=pop_name).I_app) + ub.append(self._max_syn.get(pop_name=pop_name).I_app) + x0.append(0) + + self.objective_function(x0) ### TODO continue here + quit() + + ### Perform the optimization using L-BFGS-B method + result = minimize( + fun=self.objective_function, x0=x0, method="L-BFGS-B", bounds=Bounds(lb, ub) + ) - class SpikeProbCalcNeuron(Neuron): - """ - Neuron model to calculate the spike probabilities of the presynaptic neurons. - """ + ### Optimized input values + optimized_inputs = result.x - def __init__(self, reduced_size=1): - """ - Args: - reduced_size (int): - Reduced size of the associated presynaptic population - """ - parameters = f""" - reduced_size = {reduced_size} : population - tau= 1.0 : population - """ - equations = """ - tau*dr/dt = g_ampa/reduced_size - r - g_ampa = 0 - """ - super().__init__(parameters=parameters, equations=equations) + print(f"Optimized inputs: {optimized_inputs}") + quit() - class InputCalcNeuron(Neuron): - """ - This neurons gets the spike probabilities of the pre neurons and calculates the - incoming spikes for each projection. It accumulates the incoming spikes of all - projections (of the same target type) and calculates the conductance increase - for the post neuron. + def objective_function(self, I_app_list: list[float]): """ + Objective function to minimize the difference between the target firing rates and + the firing rates of the model with the given input currents. - def __init__(self, projection_dict): - """ - Args: - projection_dict (dict): - keys: names of afferent projections (of the same target type) - values: dict with keys "weights", "pre_name" - """ + Args: + I_app_list (list[float]): + List with the input currents for each population - ### create parameters - parameters = [ - f""" - number_synapses_{proj_name} = 0 - weights_{proj_name} = {vals['weights']} - """ - for proj_name, vals in projection_dict.items() - ] - parameters = "\n".join(parameters) + Returns: + diff (float): + Difference between the target firing rates and the firing rates of the + model with the given input currents + """ + ### get the firing rates of the model with the given input currents + rate_arr = self._get_firing_rate(I_app_list) + ### calculate the difference between the target firing rates and the firing rates + ### of the model with the given input currents + diff = self._target_firing_rate_arr - rate_arr + return np.sum(diff**2) + + def _get_firing_rate(self, I_app_list: list[float]): + ### convert the I_app_list to a dict + I_app_dict = {} + counter = 0 + for pop_name in self._pop_names_config: + ### for the I_app_dict we need the "_reduced" suffix + I_app_dict[f"{pop_name}_reduced"] = I_app_list[counter] + counter += 1 + ### run the experiment + results = self._exp.run(I_app_dict) + ### get the firing rates from the recorded spikes + rate_list = [] + rate_dict = {} + for pop_name in self._pop_names_config: + ### for the spike dict we need the "_reduced" suffix + spike_dict = results.recordings[0][f"{pop_name}_reduced;spike"] + t, _ = raster_plot(spike_dict) + nbr_spikes = len(t) + ### divide number of spikes by the number of neurons and the duration in s + rate = nbr_spikes / (5.0 * get_population(f"{pop_name}_reduced").size) + rate_list.append(rate) + rate_dict[pop_name] = rate + sf.Logger().log(f"I_app_dict: {I_app_dict}") + sf.Logger().log(f"Firing rates: {rate_dict}") + + af.PlotRecordings( + figname="firing_rates.png", + recordings=results.recordings, + recording_times=results.recording_times, + shape=(len(self._model_normal.populations), 1), + plan={ + "position": list(range(1, len(self._model_normal.populations) + 1)), + "compartment": [ + f"{pop_name}_reduced" for pop_name in self._model_normal.populations + ], + "variable": ["spike"] * len(self._model_normal.populations), + "format": ["hybrid"] * len(self._model_normal.populations), + }, + ) + return np.array(rate_list) - ### create equations - equations = [ - f""" - incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} + class MyExperiment(CompNeuroExp): + def run(self, I_app_dict: dict[str, float]): """ - for proj_name, vals in projection_dict.items() - ] - equations = "\n".join(equations) - sum_of_conductance_increase = ( - "r = " - + "".join( - [ - f"incoming_spikes_{proj_name} * weights_{proj_name} + " - for proj_name in projection_dict.keys() - ] - )[:-3] - ) - equations = equations + "\n" + sum_of_conductance_increase + Simulate the model for 5000 ms with the given input currents. - super().__init__(parameters=parameters, equations=equations) + Args: + I_app_dict (dict[str, float]): + Dict with the input currents for each population + + Returns: + results (CompNeuroResults): + Results of the simulation + """ + ### reset to initial state + self.reset() + ### activate monitor + self.monitors.start() + ### set the input currents + for pop_name, I_app in I_app_dict.items(): + get_population(pop_name).I_app = I_app + ### simulate 5000 ms + simulate(5000, measure_time=True) + ### return results + return self.results() -class PreparePSP: +class CreateReducedModel: """ - Find v_rest, corresponding I_hold (in case of self-active neurons) and an - init_sampler to initialize the neuron model for the PSP calculation for each - population. + Class to create a reduced model from the original model. It is accessable via the + attribute model_reduced. + + Attributes: + model_reduced (CompNeuroModel): + Reduced model, created but not compiled """ def __init__( self, model: CompNeuroModel, - do_not_config_list: list[str], - do_plot: bool, - ): + analyze_model: AnalyzeModel, + reduced_size: int, + do_create: bool = False, + do_compile: bool = False, + verbose: bool = False, + ) -> None: """ + Prepare model for reduction. + Args: model (CompNeuroModel): - Model to be prepared - do_not_config_list (list[str]): - List of populations which should not be configured - do_plot (bool): - If True, plot the membrane potential + Model to be reduced + reduced_size (int): + Size of the reduced populations """ - self._prepare_psp_dict = {} - ### loop over all populations - for pop_name in model.populations: - ### skip populations which should not be configured - if pop_name in do_not_config_list: - continue - ### find initial v_rest using the voltage clamp network - sf.Logger().log( - f"search v_rest with y(X) = delta_v_2000(v=X) using grid search for pop {pop_name}" - ) - v_rest, delta_v_v_rest, variables_v_rest = self._find_v_rest_initial( - pop_name=pop_name, - do_plot=do_plot, - ) - sf.Logger().log( - f"for {pop_name} found v_rest={v_rest} with delta_v_2000(v=v_rest)={delta_v_v_rest}" - ) - ### check if v is constant after setting v to v_rest by simulating the normal - ### single neuron network for 2000 ms - v_rest_is_constant, v_rest_arr = self._get_v_rest_is_const( - pop_name=pop_name, - variables_v_rest=variables_v_rest, - do_plot=do_plot, - ) - - if v_rest_is_constant: - ### v_rest found (last v value of the previous simulation), no - ### I_app_hold needed - v_rest = v_rest_arr[-1] - I_app_hold = 0 - else: - ### there is no resting_state i.e. neuron is self-active --> find - ### smallest negative I_app to silence neuron - sf.Logger().log( - f"neuron of {pop_name} seems to be self-active --> find smallest I_app to silence the neuron" - ) - v_rest, I_app_hold = self._find_I_app_hold( - pop_name=pop_name, - variables_v_rest=variables_v_rest, - ) - sf.Logger().log( - f"final values for {pop_name}: I_app_hold = {I_app_hold}, v_rest = {v_rest}" - ) + ### set the attributes + self._model = model + self._analyze_model = analyze_model + self._reduced_size = reduced_size + self._verbose = verbose + ### recreate model with reduced populations and projections + self.model_reduced = CompNeuroModel( + model_creation_function=self.recreate_model, + name=f"{model.name}_reduced", + description=f"{model.description}\nWith reduced populations and projections.", + do_create=do_create, + do_compile=do_compile, + compile_folder_name=f"{model.compile_folder_name}_reduced", + ) - ### get the sampler for the initial variables - psp_init_sampler = self._get_init_neuron_variables_for_psp( - pop_name=pop_name, - v_rest=v_rest, - I_app_hold=I_app_hold, - ) - ### store the prepare PSP information - self._prepare_psp_dict[pop_name] = {} - self._prepare_psp_dict[pop_name]["v_rest"] = v_rest - self._prepare_psp_dict[pop_name]["I_app_hold"] = I_app_hold - self._prepare_psp_dict[pop_name]["psp_init_sampler"] = psp_init_sampler + def recreate_model(self): + """ + Recreates the model with reduced populations and projections. + """ + ### 1st for each population create a reduced population + for pop_name in self._model.populations: + self.create_reduced_pop(pop_name) + ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population + for pop_name in self._model.populations: + self.create_spike_collecting_aux_pop(pop_name) + ## 3rd for each population which has afferents create a population for incoming spikes for each target type + for pop_name in self._model.populations: + self.create_conductance_aux_pop(pop_name, target="ampa") + self.create_conductance_aux_pop(pop_name, target="gaba") - def get(self, pop_name: str): + def create_reduced_pop(self, pop_name: str): """ - Return the prepare PSP information for the given population. + Create a reduced population from the given population. Args: pop_name (str): - Name of the population - - Returns: - ReturnPreparePSP: - Prepare PSP information for the given population with Attributes: v_rest, - I_app_hold, psp_init_sampler + Name of the population to be reduced """ - return self.ReturnPreparePSP( - v_rest=self._prepare_psp_dict[pop_name]["v_rest"], - I_app_hold=self._prepare_psp_dict[pop_name]["I_app_hold"], - psp_init_sampler=self._prepare_psp_dict[pop_name]["psp_init_sampler"], + if self._verbose: + print(f"create_reduced_pop for {pop_name}") + ### 1st check how the population is connected + _, is_postsynaptic, ampa, gaba = self.how_pop_is_connected(pop_name) + + ### 2nd recreate neuron model + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = ( + self._analyze_model.neuron_model_init_parameter_dict[pop_name].copy() ) + ### if the population is a postsynaptic population adjust the synaptic + ### conductance equations + if is_postsynaptic: + neuron_model_init_parameter_dict = self.adjust_neuron_model( + neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba + ) + ### create the new neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) - class ReturnPreparePSP: - def __init__( - self, v_rest: float, I_app_hold: float, psp_init_sampler: ArrSampler - ): - self.v_rest = v_rest - self.I_app_hold = I_app_hold - self.psp_init_sampler = psp_init_sampler + ### 3rd recreate the population + ### get the stored parameters of the __init__ function of the Population + pop_init_parameter_dict = self._analyze_model.pop_init_parameter_dict[ + pop_name + ].copy() + ### replace the neuron model with the new neuron model + pop_init_parameter_dict["neuron"] = neuron_model_new + ### replace the size with the reduced size (if reduced size is smaller than the + ### original size) + ### TODO add model requirements somewhere, here requirements = geometry = int + pop_init_parameter_dict["geometry"] = min( + [pop_init_parameter_dict["geometry"][0], self._reduced_size] + ) + ### append _reduce to the name + pop_init_parameter_dict["name"] = f"{pop_name}_reduced" + ### create the new population + pop_new = Population(**pop_init_parameter_dict) - def _get_init_neuron_variables_for_psp( - self, pop_name: str, v_rest: float, I_app_hold: float - ): + ### 4th set the parameters and variables of the population's neurons + ### get the stored parameters and variables + neuron_model_attr_dict = self._analyze_model.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_new, attr_name, attr_val) + + def create_spike_collecting_aux_pop(self, pop_name: str): """ - Get the initial variables of the neuron model for the PSP calculation. + Create a spike collecting population for the given population. Args: pop_name (str): - Name of the population - v_rest (float): - Resting membrane potential - I_app_hold (float): - Current which silences the neuron + Name of the population for which the spike collecting population should be created + """ + ### get all efferent projections + efferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[0] == pop_name + ] + ### check if pop has efferent projections + if len(efferent_projection_list) == 0: + return + if self._verbose: + print(f"create_spike_collecting_aux_pop for {pop_name}") + ### create the spike collecting population + pop_aux = Population( + 1, + neuron=self.SpikeProbCalcNeuron( + reduced_size=min( + [ + self._analyze_model.pop_init_parameter_dict[pop_name][ + "geometry" + ][0], + self._reduced_size, + ] + ) + ), + name=f"{pop_name}_spike_collecting_aux", + ) + ### create the projection from reduced pop to spike collecting aux pop + proj = Projection( + pre=get_population(pop_name + "_reduced"), + post=pop_aux, + target="ampa", + name=f"proj_{pop_name}_spike_collecting_aux", + ) + proj.connect_all_to_all(weights=1) - Returns: - sampler (ArrSampler): - Sampler with the initial variables of the neuron model + def create_conductance_aux_pop(self, pop_name: str, target: str): + """ + Create a conductance calculating population for the given population and target. + + Args: + pop_name (str): + Name of the population for which the conductance calculating population should be created + target (str): + Target type of the conductance calculating population """ - ### get the names of the variables of the neuron model - var_name_list = single_nets.single_net( - pop_name=pop_name, mode="normal" - ).population.variables - ### get the variables of the neuron model after 5000 ms - var_arr = Simulator().get_v_psp( - v_rest=v_rest, I_app_hold=I_app_hold, pop_name=pop_name + ### get all afferent projections + afferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[1] == pop_name + ] + ### check if pop has afferent projections + if len(afferent_projection_list) == 0: + return + ### get all afferent projections with target type + afferent_target_projection_list = [ + proj_name + for proj_name in afferent_projection_list + if self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == target + ] + ### check if there are afferent projections with target type + if len(afferent_target_projection_list) == 0: + return + if self._verbose: + print(f"create_conductance_aux_pop for {pop_name} target {target}") + ### get projection informations + ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) + projection_dict = { + proj_name: { + "pre_size": self._analyze_model.pop_init_parameter_dict[ + self._analyze_model.pre_post_pop_name_dict[proj_name][0] + ]["geometry"][0], + "connection_prob": self._analyze_model.connector_function_parameter_dict[ + proj_name + ][ + "probability" + ], + "weights": self._analyze_model.connector_function_parameter_dict[ + proj_name + ]["weights"], + "pre_name": self._analyze_model.pre_post_pop_name_dict[proj_name][0], + } + for proj_name in afferent_target_projection_list + } + ### create the conductance calculating population + pop_aux = Population( + self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0], + neuron=self.InputCalcNeuron(projection_dict=projection_dict), + name=f"{pop_name}_{target}_aux", ) - ### create a sampler with this single data sample - sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) - return sampler + ### set number of synapses parameter for each projection + for proj_name, vals in projection_dict.items(): + number_synapses = Binomial( + n=vals["pre_size"], p=vals["connection_prob"] + ).get_values( + self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0] + ) + setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) + ### create the "current injection" projection from conductance calculating + ### population to the reduced post population + proj = CurrentInjection( + pre=pop_aux, + post=get_population(f"{pop_name}_reduced"), + target=f"incomingaux{target}", + name=f"proj_{pop_name}_{target}_aux", + ) + proj.connect_current() + ### create projection from spike_prob calculating aux neurons of presynaptic + ### populations to conductance calculating aux population + for proj_name, vals in projection_dict.items(): + pre_pop_name = vals["pre_name"] + pre_pop_spike_collecting_aux = get_population( + f"{pre_pop_name}_spike_collecting_aux" + ) + proj = Projection( + pre=pre_pop_spike_collecting_aux, + post=pop_aux, + target=f"spikeprob_{pre_pop_name}", + name=f"{proj_name}_spike_collecting_to_conductance", + ) + proj.connect_all_to_all(weights=1) - def _find_I_app_hold( - self, - pop_name: str, - variables_v_rest: dict, - ): + def how_pop_is_connected(self, pop_name): """ - Find the current which silences the neuron. + Check how a population is connected. If the population is a postsynaptic and/or + presynaptic population, check if it gets ampa and/or gaba input. Args: pop_name (str): - Name of the population - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential + Name of the population to check Returns: - v_rest (float): - Resting membrane potential - I_app_hold (float): - Current which silences the neuron + is_presynaptic (bool): + True if the population is a presynaptic population, False otherwise + is_postsynaptic (bool): + True if the population is a postsynaptic population, False otherwise + ampa (bool): + True if the population gets ampa input, False otherwise + gaba (bool): + True if the population gets gaba input, False otherwise """ - ### find I_app_hold with incremental_continuous_bound_search - sf.Logger().log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") + is_presynaptic = False + is_postsynaptic = False + ampa = False + gaba = False + ### loop over all projections + for proj_name in self._model.projections: + ### check if the population is a presynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][0] == pop_name: + is_presynaptic = True + ### check if the population is a postsynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name: + is_postsynaptic = True + ### check if the projection target is ampa or gaba + if ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "ampa" + ): + ampa = True + elif ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "gaba" + ): + gaba = True - I_app_hold = -ef.find_x_bound( - ### negative current initially reduces v then v climbs back up --> - ### get_v_change_after_v_rest checks how much v changes during second half of - ### 2000 ms simulation - y=lambda X_val: -self._get_v_change_after_v_rest( - pop_name=pop_name, - variables_v_rest=variables_v_rest, - ### incremental_continuous_bound_search only uses positive values for X and - ### increases them, expecting to increase y, therefore use -X for I_app - ### (increasing X will "increase" negative current) and negative sign for - ### the returned value (for no current input the change is positive, this - ### should decrease to zero, with negative sign: for no current input the - ### change is negative, this should increase above zero) - I_app=-X_val, - ), - ### y is initially negative and should increase above 0, therefore search for - ### y_bound=0 with bound_type="greater" - x0=0, - y_bound=0, - tolerance=0.01, - bound_type="greater", - ) - # y_bound = 0 - # y: -10, -2, +0.1 - # x: 0, 5, 7 - # bound type greater should find value which is slightly larger than 0 TODO test this - ### again simulate the neuron with the obtained I_app_hold to get the new v_rest - v_rest_arr = Simulator().get_v_2000( - pop_name=pop_name, - initial_variables=variables_v_rest, - I_app=I_app_hold, - do_plot=False, - ) - v_rest = v_rest_arr[-1] - return v_rest, I_app_hold + return is_presynaptic, is_postsynaptic, ampa, gaba - def _find_v_rest_initial( - self, - pop_name: str, - do_plot: bool, + def adjust_neuron_model( + self, neuron_model_init_parameter_dict, ampa=True, gaba=True ): """ - Find the initial v_rest with the voltage clamp single neuron network for the - given population. Furthermore, get the change of v durign setting v_rest and the - stady state variables of the neuron (at the end of the simulation). + Add the new synaptic input coming from the auxillary population to the neuron + model. Args: - pop_name (str): - Name of the population - do_plot (bool): - True if plots should be created, False otherwise + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + ampa (bool): + True if the population gets ampa input and therefore the ampa conductance + needs to be adjusted, False otherwise + gaba (bool): + True if the population gets gaba input and therefore the gaba conductance + needs to be adjusted, False otherwise Returns: - v_rest (float): - Resting membrane potential - detla_v_v_rest (float): - Change of the membrane potential during setting v_rest as membrane - potential - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + with DBS mechanisms added """ - ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can - ### only be >= 0) - v_arr = np.linspace(-90, -20, 200) - v_clamp_arr = np.array( - [ - Simulator().get_v_clamp_2000(pop_name=pop_name, v=v_val) - for v_val in v_arr - ] + ### 1st adjust the conductance equations + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### search for equation with dg_ampa/dt and dg_gaba/dt + for line_idx, line in enumerate(equations_line_split_list): + if ( + self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") + and ampa + ): + ### add " + tau_ampa*g_incomingauxampa/dt" + ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa + equations_line_split_list[line_idx] = ( + "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" + ) + if ( + self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") + and gaba + ): + ### add " + tau_gaba*g_incomingauxgaba/dt" + ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba + equations_line_split_list[line_idx] = ( + "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" + ) + ### join list to a string + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list ) - v_clamp_min_idx = argrelmin(v_clamp_arr)[0] - v_rest = np.min(v_arr[v_clamp_min_idx]) - if do_plot: - plt.figure() - plt.plot(v_arr, v_clamp_arr) - plt.axvline(v_rest, color="k") - plt.axhline(0, color="k", ls="dashed") - plt.savefig(f"v_clamp_{pop_name}.png") - plt.close("all") - ### do again the simulation only with the obtained v_rest to get the detla_v for - ### v_rest - detla_v_v_rest = ( - Simulator().get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() + ### 2nd extend description + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." ) - population = single_nets.single_net( - pop_name=pop_name, mode="v_clamp" - ).population - ### and the stady state variables of the neuron - variables_v_rest = { - var_name: getattr(population, var_name) for var_name in population.variables - } - return v_rest, detla_v_v_rest, variables_v_rest - def _get_v_rest_is_const(self, pop_name: str, variables_v_rest: dict, do_plot=bool): + return neuron_model_init_parameter_dict + + def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): """ - Check if the membrane potential is constant after setting it to v_rest. + Check if a equation string has the form "tau*dvar/dt = -var". Args: - pop_name (str): - Name of the population - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential, used as initial variables for the simulation - do_plot (bool): - True if plots should be created, False otherwise + line (str): + Equation string + var_name (str): + Name of the variable + tau_name (str): + Name of the time constant Returns: - v_rest_is_constant (bool): - True if the membrane potential is constant, False otherwise - v_rest_arr (np.array): - Membrane potential for the 2000 ms simulation with shape: (time_steps,) + is_solution_correct (bool): + True if the equation is as expected, False otherwise """ - ### check if the neuron stays at v_rest with normal neuron - v_rest_arr = Simulator().get_v_2000( - pop_name=pop_name, - initial_variables=variables_v_rest, - I_app=0, - do_plot=do_plot, - ) - v_rest_arr_is_const = ( - np.std(v_rest_arr) <= np.mean(np.absolute(v_rest_arr)) / 1000 - ) - return v_rest_arr_is_const, v_rest_arr + if var_name not in line: + return False - def _get_v_change_after_v_rest( - self, pop_name: str, variables_v_rest: dict, I_app: float - ): + # Define the variables + var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") + + # Given equation as a string + equation_str = line + + # Parse the equation string + lhs, rhs = equation_str.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs) + + # Form the equation + equation = sp.Eq(lhs, rhs) + + # Solve the equation for var + try: + solution = sp.solve(equation, var) + except: + ### equation is not solvable with variables means it is not as expected + return False + + # Given solution to compare + expected_solution_str = f"-{tau_name}*d{var_name}/dt" + expected_solution = sp.sympify(expected_solution_str) + + # Check if the solution is as expected + is_solution_correct = solution[0] == expected_solution + + return is_solution_correct + + class SpikeProbCalcNeuron(Neuron): + """ + Neuron model to calculate the spike probabilities of the presynaptic neurons. """ - Check how much the membrane potential changes after setting it to v_rest. - Args: - pop_name (str): - Name of the population - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential, used as initial variables for the simulation - do_plot (bool): - True if plots should be created, False otherwise + def __init__(self, reduced_size=1): + """ + Args: + reduced_size (int): + Reduced size of the associated presynaptic population + """ + parameters = f""" + reduced_size = {reduced_size} : population + tau= 1.0 : population + """ + equations = """ + tau*dr/dt = g_ampa/reduced_size - r + g_ampa = 0 + """ + super().__init__(parameters=parameters, equations=equations) - Returns: - change_after_v_rest (np.array): - Change of the membrane potential after setting it to v_rest + class InputCalcNeuron(Neuron): """ - ### simulate 2000 ms after setting v_rest - v_rest_arr = Simulator().get_v_2000( - pop_name=pop_name, - initial_variables=variables_v_rest, - I_app=I_app, - do_plot=False, - ) - ### check how much v changes during the second half - ### std(v) - mean(v)/1000 should be close to 0, the larger the value the more v - ### changes - change_after_v_rest = ( - np.std(v_rest_arr[len(v_rest_arr) // 2 :], axis=0) - - np.mean(np.absolute(v_rest_arr[len(v_rest_arr) // 2 :]), axis=0) / 1000 - ) - return change_after_v_rest + This neurons gets the spike probabilities of the pre neurons and calculates the + incoming spikes for each projection. It accumulates the incoming spikes of all + projections (of the same target type) and calculates the conductance increase + for the post neuron. + """ + + def __init__(self, projection_dict): + """ + Args: + projection_dict (dict): + keys: names of afferent projections (of the same target type) + values: dict with keys "weights", "pre_name" + """ + + ### create parameters + parameters = [ + f""" + number_synapses_{proj_name} = 0 + weights_{proj_name} = {vals['weights']} + """ + for proj_name, vals in projection_dict.items() + ] + parameters = "\n".join(parameters) + + ### create equations + equations = [ + f""" + incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} + """ + for proj_name, vals in projection_dict.items() + ] + equations = "\n".join(equations) + sum_of_conductance_increase = ( + "r = " + + "".join( + [ + f"incoming_spikes_{proj_name} * weights_{proj_name} + " + for proj_name in projection_dict.keys() + ] + )[:-3] + ) + equations = equations + "\n" + sum_of_conductance_increase + + super().__init__(parameters=parameters, equations=equations) class GetMaxSyn: + """ + Find the maximal synaptic input for each population. + """ + def __init__( - self, model: CompNeuroModel, do_not_config_list: list[str], max_psp: float + self, + model: CompNeuroModel, + simulator: Simulator, + do_not_config_list: list[str], + max_psp: float, + target_firing_rate_dict: dict[str, float], ): + """ + Args: + model (CompNeuroModel): + Model to be analyzed + simulator (Simulator): + Simulator object for simulations with the single neuron networks + do_not_config_list (list[str]): + List of populations which should not be configured + max_psp (float): + Maximal postsynaptic potential in mV + target_firing_rate_dict (dict[str, float]): + Target firing rate for each population + """ + self._simulator = simulator self._max_syn_dict = {} ### loop over all populations for pop_name in model.populations: @@ -1780,10 +2248,11 @@ def __init__( ### get max g_ampa g_ampa_max = self._get_max_g_ampa(pop_name=pop_name, g_gaba_max=g_gaba_max) - print(f"g_gaba_max: {g_gaba_max}, g_ampa_max: {g_ampa_max}") - quit() ### TODO seems to work until here, continue here + ### get max I_app - I_app_max = self._get_max_I_app(pop_name=pop_name) + I_app_max = self._get_max_I_app( + pop_name=pop_name, target_firing_rate_dict=target_firing_rate_dict + ) ### store the maximal synaptic input in dict self._max_syn_dict[pop_name] = {} @@ -1817,10 +2286,26 @@ def __init__(self, g_gaba: float, g_ampa: float, I_app: float): self.I_app = I_app def _get_max_g_gaba(self, pop_name: str, max_psp: float): + """ + Find the maximal g_gaba for the given population. A single spike with maximal + g_gaba should result in a inhibitory postsynaptic potential of max_psp. + + Args: + pop_name (str): + Name of the population + max_psp (float): + Maximal postsynaptic potential in mV + + Returns: + g_gaba_max (float): + Maximal g_gaba + """ ### find g_gaba max using max IPSP - sf.Logger().log("search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)") + sf.Logger().log( + f"[{pop_name}]: search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)" + ) return ef.find_x_bound( - y=lambda X_val: Simulator().get_ipsp( + y=lambda X_val: self._simulator.get_ipsp( pop_name=pop_name, g_gaba=X_val, ), @@ -1830,13 +2315,27 @@ def _get_max_g_gaba(self, pop_name: str, max_psp: float): ) def _get_max_g_ampa(self, pop_name: str, g_gaba_max: float): + """ + Find the maximal g_ampa for the given population. The maximal g_ampa should + override the maximal IPSP of g_gaba. + + Args: + pop_name (str): + Name of the population + g_gaba_max (float): + Maximal g_gaba + + Returns: + g_ampa_max (float): + Maximal g_ampa + """ ### find g_ampa max by "overriding" IPSP of g_gaba max sf.Logger().log( - f"search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" + f"[{pop_name}]: search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" ) def func(x): - ipsp = Simulator().get_ipsp( + ipsp = self._simulator.get_ipsp( pop_name=pop_name, g_gaba=g_gaba_max, g_ampa=x, @@ -1859,10 +2358,253 @@ def func(x): tolerance=0.005, ) + def _get_max_I_app(self, pop_name: str, target_firing_rate_dict: dict[str, float]): + """ + Find the maximal current input for the given population. The maximal current + input should result in "resting" firing rate + target firing rate + 100 Hz. + + Args: + pop_name (str): + Name of the population + target_firing_rate_dict (dict[str, float]): + Target firing rate for each population + + Returns: + I_app_max (float): + Maximal current input + """ + ### get f_0 and f_max + f_0 = self._simulator.get_firing_rate(pop_name=pop_name) + f_max = f_0 + target_firing_rate_dict[pop_name] + 100 + + ### find I_max with f_0, and f_max using find_x_bound + sf.Logger().log( + f"[{pop_name}]: search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0)" + ) + I_max = ef.find_x_bound( + y=lambda X_val: self._simulator.get_firing_rate( + pop_name=pop_name, + I_app=X_val, + ), + x0=0, + y_bound=f_max, + tolerance=1, + ) + + return I_max + + +class GetWeights: + def __init__( + self, + model: CompNeuroModel, + do_not_config_list: list[str], + analyze_model: AnalyzeModel, + max_syn: GetMaxSyn, + ): + self._model = model + self._do_not_config_list = do_not_config_list + self._analyze_model = analyze_model + self._max_syn = max_syn + ### initialize the weight_dict with the maximal weights + weight_dict_init = {} + for proj_name in model.projections: + post_pop_name = analyze_model.pre_post_pop_name_dict[proj_name][1] + target_type = analyze_model.proj_init_parameter_dict[proj_name]["target"] + if target_type == "ampa": + weight = self._max_syn.get(pop_name=post_pop_name).g_ampa + elif target_type == "gaba": + weight = self._max_syn.get(pop_name=post_pop_name).g_gaba + weight_dict_init[proj_name] = weight + ### first set the interal weight dict variable, then the property to calculate + ### syn_load_dict and syn_contribution_dict + self._weight_dict = weight_dict_init + self.weight_dict = weight_dict_init + + @property + def weight_dict(self): + return self._weight_dict + + @weight_dict.setter + def weight_dict(self, value: dict[str, float]): + ### check if the dictionary "value" has the same keys as the internal weight_dict + if set(value.keys()) != set(self._weight_dict.keys()): + raise ValueError( + f"The keys of the weight_dict must be: {set(self._weight_dict.keys())}" + ) + ### if weight_dict is set, recalculate syn_load_dict and syn_contribution_dict + self._weight_dict = value + self._syn_load_dict = self._get_syn_load_dict() + self._syn_contribution_dict = self._get_syn_contribution_dict() + + @property + def syn_load_dict(self): + return self._syn_load_dict + + @syn_load_dict.setter + def syn_load_dict(self, value: dict[str, dict[str, float]]): + ### check if the dictionary "value" has the same structure as the internal + ### nested dict syn_load_dict + if set(value.keys()) != set(self._syn_load_dict.keys()): + raise ValueError( + f"The syn_load_dict must have this structure: {self._syn_load_dict}" + ) + for pop_name in value.keys(): + if set(value[pop_name].keys()) != set(self._syn_load_dict[pop_name].keys()): + raise ValueError( + f"The syn_load_dict must have this structure: {self._syn_load_dict}" + ) + ### check if values are between 0 and 1 + for pop_name in value.keys(): + for target in value[pop_name].keys(): + if not 0 <= value[pop_name][target] <= 1: + raise ValueError( + "The values of the syn_load_dict must be between 0 and 1" + ) + + ### if syn_load_dict is set, recalculate weight_dict + self._syn_load_dict = value + self._weight_dict = self._get_weight_dict() + + @property + def syn_contribution_dict(self): + return self._syn_contribution_dict + + @syn_contribution_dict.setter + def syn_contribution_dict(self, value: dict[str, dict[str, dict[str, float]]]): + ### check if the dictionary "value" has the same structure as the internal + ### nested dict syn_contribution_dict + if set(value.keys()) != set(self._syn_contribution_dict.keys()): + raise ValueError( + f"The syn_contribution_dict must have this structure: {self._syn_contribution_dict}" + ) + for pop_name in value.keys(): + if set(value[pop_name].keys()) != set( + self._syn_contribution_dict[pop_name].keys() + ): + raise ValueError( + f"The syn_contribution_dict must have this structure: {self._syn_contribution_dict}" + ) + for target in value[pop_name].keys(): + if set(value[pop_name][target].keys()) != set( + self._syn_contribution_dict[pop_name][target].keys() + ): + raise ValueError( + f"The syn_contribution_dict must have this structure: {self._syn_contribution_dict}" + ) + ### check if values are between 0 and 1 + for pop_name in value.keys(): + for target in value[pop_name].keys(): + for proj_name in value[pop_name][target].keys(): + if not 0 <= value[pop_name][target][proj_name] <= 1: + raise ValueError( + "The values of the syn_contribution_dict must be between 0 and 1" + ) + + ### if syn_contribution_dict is set, recalculate weight_dict + self._syn_contribution_dict = value + self._weight_dict = self._get_weight_dict() + + def _get_weight_dict(self): + ### set the weights population wise for the afferent projections + weight_dict = {} + ### loop over all populations + for pop_name in self._model.populations: + ### skip populations which should not be configured + if pop_name in self._do_not_config_list: + continue + synaptic_load = self._syn_load_dict[pop_name] + ### loop over target types + for target, load in synaptic_load.items(): + synaptic_contribution = self._syn_contribution_dict[pop_name][target] + ### loop over afferebt projections with target type + for proj_name in synaptic_contribution.keys(): + max_conductance = ( + self._max_syn.get(pop_name=pop_name).g_ampa + if target == "ampa" + else self._max_syn.get(pop_name=pop_name).g_gaba + ) + weight_dict[proj_name] = ( + load * synaptic_contribution[proj_name] * max_conductance + ) + + return weight_dict + + def _get_syn_load_dict(self): + syn_load_dict = {} + ### loop over populations + for pop_name in self._model.populations: + ### skip populations which should not be configured + if pop_name in self._do_not_config_list: + continue + syn_load_dict[pop_name] = {} + ### loop over target types + for target in ["ampa", "gaba"]: + ### get all afferent projections with target type + proj_name_list = self._get_afferent_proj_names( + pop_name=pop_name, target=target + ) + if len(proj_name_list) == 0: + continue + ### get the maximal weight of the afferent projections + max_weight = max( + [self._weight_dict[proj_name] for proj_name in proj_name_list] + ) + ### get the synaptic load + if target == "ampa": + syn_load_dict[pop_name][target] = ( + max_weight / self._max_syn.get(pop_name=pop_name).g_ampa + ) + elif target == "gaba": + syn_load_dict[pop_name][target] = ( + max_weight / self._max_syn.get(pop_name=pop_name).g_gaba + ) + + return syn_load_dict + + def _get_syn_contribution_dict(self): + syn_contribution_dict = {} + ### loop over populations + for pop_name in self._model.populations: + ### skip populations which should not be configured + if pop_name in self._do_not_config_list: + continue + syn_contribution_dict[pop_name] = {} + ### loop over target types + for target in ["ampa", "gaba"]: + ### get all afferent projections with target type + proj_name_list = self._get_afferent_proj_names( + pop_name=pop_name, target=target + ) + if len(proj_name_list) == 0: + continue + ### get the synaptic contribution + syn_contribution_dict[pop_name][target] = {} + for proj_name in proj_name_list: + syn_contribution_dict[pop_name][target][ + proj_name + ] = self._weight_dict[proj_name] / max( + [self._weight_dict[proj_name] for proj_name in proj_name_list] + ) + + return syn_contribution_dict + + ### synaptic load for each population between 0 and 1, determines the largest weight of incoming synapses, 1 means maximal conductance + ### to get synaptic load of a population/target, get all afferent projections of the population/target and take the maximal weight divided by the (global) maximal weight + ### synaptic contribution for each if a population has for a target type multiple afferent projections --> array with numbers for these projections + ### divide the array by tjhe max value --> e.g. result is [0.6,1.0] weights of the projections then are 0.6*max_weight and 1.0*max_weight, where max weight is determined by the synaptic load + + def _get_afferent_proj_names(self, pop_name: str, target: str): + proj_name_list = [] + for proj_name in self._model.projections: + if ( + self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name + and self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == target + ): + proj_name_list.append(proj_name) -class GetWeightTemplates: - def __init__(self): - pass + return proj_name_list class CreateVoltageClampEquations: diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 24ea2d5..326090c 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -369,29 +369,44 @@ def BGM_part_function(params): do_not_config_list=do_not_config_list, print_guide=True, I_app_variable="I_app", + cache=True, log_file="model_configurator.log", ) - ### obtain the maximum synaptic loads for the populations and the - ### maximum weights of their afferent projections - model_conf.get_max_syn(cache=False, clear=False) - - ### now either set weights directly - weights = { - "stn": { - "cor_exc__stn": 0.1420716334652917 * 0, - "cor_inh__stn": 0.3210113100293368 * 0, + ### set syn load + model_conf.set_syn_load( + syn_load_dict={ + "stn": {"ampa": 0.0, "gaba": 0.0}, + "snr": {"ampa": 0.0, "gaba": 0.0}, + "gpe": {"ampa": 1.0}, + "thal": {"gaba": 0.0}, }, - "gpe": {"stn__gpe": 0.14456939170522481 * 0}, - "snr": { - "stn__snr": 0.14456939170522481 * 0, - "gpe__snr": 0.04, - "snr__snr": 0.3258095138891384 * 0, + syn_contribution_dict={ + "stn": {"ampa": {"cor_exc__stn": 1.0}, "gaba": {"cor_inh__stn": 1.0}}, + "snr": { + "ampa": {"stn__snr": 1.0}, + "gaba": {"gpe__snr": 1.0, "snr__snr": 1.0}, + }, + "gpe": {"ampa": {"stn__gpe": 1.0}}, + "thal": {"gaba": {"snr__thal": 1.0}}, }, - "thal": {"snr__thal": 0.33855115254020435 * 0}, - } + ) + print(model_conf._weight_dicts.weight_dict) + # ### or set weights + # model_conf.set_weights( + # weight_dict={ + # "cor_exc__stn": 0.14017251511767667, + # "cor_inh__stn": 0.3185158233680059, + # "stn__snr": 0.1411802181516728, + # "gpe__snr": 0.3210042713120005, + # "snr__snr": 0.3210042713120005, + # "stn__gpe": 0.1411802181516728, + # "snr__thal": 1.169558816450918, + # } + # ) - model_conf.set_weights(weights) + I_base_dict = model_conf.set_base() + quit() ### or define synaptic load of populations # synaptic_load_dict = { From 9b551f270dd1e181af6346db9e51db4693639d75 Mon Sep 17 00:00:00 2001 From: olimaol Date: Tue, 11 Jun 2024 15:01:18 +0200 Subject: [PATCH 34/39] model configurator started implementign get_base --- .../model_configurator_cnp.py | 294 ++++++++++++++---- .../model_configurator_user.py | 3 +- 2 files changed, 230 insertions(+), 67 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index dc7f1f5..ede4bea 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -27,6 +27,7 @@ Binomial, CurrentInjection, raster_plot, + set_seed, ) from ANNarchy.core import ConnectorMethods @@ -1403,6 +1404,7 @@ def __init__( print_guide: bool = False, I_app_variable: str = "I_app", cache: bool = False, + clear_cache: bool = False, log_file: str | None = None, ): ### store the given variables @@ -1428,6 +1430,8 @@ def __init__( verbose=True, ) ### try to load the cached variables + if clear_cache: + sf.clear_dir(".model_config_cache") cache_worked = False if cache: try: @@ -1474,7 +1478,7 @@ def __init__( simulator=self._simulator, do_plot=False, ) - self.simulator = Simulator( + self._simulator = Simulator( single_nets=self._single_nets, figure_folder=self._figure_folder, prepare_psp=self._prepare_psp, @@ -1487,7 +1491,7 @@ def __init__( do_not_config_list=do_not_config_list, max_psp=max_psp, target_firing_rate_dict=target_firing_rate_dict, - ) + ).max_syn_getter else: self._max_syn = cache_loaded["max_syn"] ### cache single_nets, prepare_psp, max_syn @@ -1553,6 +1557,91 @@ def set_base(self): ).base_dict +class Minimize: + def __init__(self, func, yt, x0, lb, ub, tol, max_it) -> None: + """ + Args: + func (Callable): + Function which takes a vector as input and returns a vector as output + target_values (np.array): + Target output vector of the function + x0 (np.array): + Initial input vector + lb (np.array): + Lower bounds of the input vector + ub (np.array): + Upper bounds of the input vector + tol (float): + If the maximum absolute error of the output vector is smaller than this + value, the optimization stops + max_it (int): + Maximum number of iterations + """ + ### TODO continue here, I think it works but neuron models explode + x = x0 + error = np.inf + it = 0 + search_gradient_diff = np.ones(x0.shape) + alpha = 0.1 + while np.max(np.abs(error)) > tol and it < max_it: + print("\n\nnext iteration") + y = func(x) + print(f"x: {x}") + print(f"y: {y}\n") + ### calculate the gradient i.e. change of the output values for each input + grad = np.zeros((yt.shape[0], x0.shape[0])) + for i in range(len(x0)): + ### search for the gradient of the i-th input, increase the stepwidth + ### which is used to calculate the gradient if the gradient for the + ### associated output value is below 1 + while grad[i, i] < 1: + x_plus = x.copy() + ### change only the currently selected input whose gradient should be + ### calculated + x_plus[i] += search_gradient_diff[i] + y_plus = func(x_plus) + print(f"x_plus: {x_plus}") + print(f"y_plus: {y_plus}\n") + grad[:, i] = y_plus - y + ### if gradient is above 10 reduce the search gradient diff + if grad[i, i] >= 10: + search_gradient_diff[i] /= 1.5 + ### if gradient is below 1 increase the search gradient diff + elif grad[i, i] < 1: + search_gradient_diff[i] *= 2 + ### calculate the wanted change of the output values + delta_y = yt - y + print(f"delta_y: {delta_y}") + print(f"grad:\n{grad}") + + # Example coefficient matrix A (m x n matrix) + A = grad + + # Right-hand side vector b (m-dimensional vector) + b = delta_y + + # Solve the system using least squares method + solution, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None) + + # Output the results + print("Solution vector x:", solution) + print("Residuals:", residuals) + print("Rank of matrix A:", rank) + print("Singular values of A:", s) + + # Calculate the matrix-vector product Ax + Ax = np.dot(A, solution) + + # Output the matrix-vector product and compare with b + print("Matrix-vector product Ax:", Ax) + print("Original vector b:", b) + + ### solution contains the info how much each input should change (how many + ### times the change of gradient is needed to reach the target values) + x = x + solution * search_gradient_diff * alpha + it += 1 + + class GetBase: def __init__( self, @@ -1562,7 +1651,7 @@ def __init__( weight_dicts: "GetWeights", do_not_config_list: list[str], init_sampler: CreateSingleNeuronNetworks.AllSampler, - max_syn: "GetMaxSyn", + max_syn: "GetMaxSyn.MaxSynGetter", ): self._model_normal = model_normal self._model_reduced = model_reduced @@ -1576,12 +1665,14 @@ def __init__( for pop_name in model_normal.populations if pop_name not in do_not_config_list ] - ### convert the target firing rate dict to a array + ### convert the target firing rate dict to an array self._target_firing_rate_arr = [] + print(self._pop_names_config) for pop_name in self._pop_names_config: self._target_firing_rate_arr.append(target_firing_rate_dict[pop_name]) self._target_firing_rate_arr = np.array(self._target_firing_rate_arr) ### get the base currents + self._prepare_get_base() self._base_dict = self._get_base() @property @@ -1608,7 +1699,7 @@ def _set_model_weights(self): proj_weight, ) - def _get_base(self): + def _prepare_get_base(self): ### clear ANNarchy mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) ### create and compile the model @@ -1622,7 +1713,7 @@ def _get_base(self): } ) ### create the experiment - exp = self.MyExperiment(monitors=mon) + self._exp = self.MyExperiment(monitors=mon) ### initialize all populations with the init sampler for pop_name in self._pop_names_config: ### for get_population we need the "_reduced" suffix @@ -1632,33 +1723,95 @@ def _get_base(self): ### set the model weights self._set_model_weights() ### store the model state for all populations - exp.store_model_state(compartment_list=self._model_reduced.populations) - self._exp = exp - ### use objective_function to search for input base currents to reach the - ### target firing rates - lb = [] - ub = [] - x0 = [] + self._exp.store_model_state(compartment_list=self._model_reduced.populations) + ### set lower and upper bounds and initial guess + self._lb = [] + self._ub = [] + self._x0 = [] for pop_name in self._pop_names_config: - lb.append(-self._max_syn.get(pop_name=pop_name).I_app) - ub.append(self._max_syn.get(pop_name=pop_name).I_app) - x0.append(0) + self._lb.append(-self._max_syn.get(pop_name=pop_name).I_app) + self._ub.append(self._max_syn.get(pop_name=pop_name).I_app) + self._x0.append(0.0) - self.objective_function(x0) ### TODO continue here - quit() + def _get_base(self): + """ + Perform the optimization to find the base currents for the target firing rates. - ### Perform the optimization using L-BFGS-B method - result = minimize( - fun=self.objective_function, x0=x0, method="L-BFGS-B", bounds=Bounds(lb, ub) + Returns: + optimized_inputs (np.array): + Optimized input currents + """ + + ### Perform the optimization using Minimize class + Minimize( + func=self._get_firing_rate, + yt=self._target_firing_rate_arr, + x0=np.array(self._x0), + lb=np.array(self._lb), + ub=np.array(self._ub), + tol=1, + max_it=20, ) - ### Optimized input values - optimized_inputs = result.x - - print(f"Optimized inputs: {optimized_inputs}") + ### Perform the optimization using L-BFGS-B method + # result = minimize( + # fun=self._objective_function, + # x0=self._x0, + # method="L-BFGS-B", + # bounds=Bounds(self._lb, self._ub), + # ) + + # ### Perform the optimization using Powell method + # result = minimize(self._objective_function, self._x0, method='Powell') + + # ### Optimized input values + # optimized_inputs = result.x + + # print(f"Optimized inputs: {optimized_inputs}") + + # ### Perform the optimization using DeapCma method + # ### define lower bounds of paramters to optimize + # lb = np.array(self._lb) + + # ### define upper bounds of paramters to optimize + # ub = np.array(self._ub) + + # ### create an "minimal" instance of the DeapCma class + # deap_cma = ef.DeapCma( + # lower=lb, + # upper=ub, + # evaluate_function=self.objective_function_deap, + # ) + + # ### run the optimization + # deap_cma_result = deap_cma.run(max_evals=1000) + + # for key, val in deap_cma_result.items(): + # if key in ["logbook", "deap_pop"]: + # continue + # print(f"{key}: {val}") quit() - def objective_function(self, I_app_list: list[float]): + def _objective_function_deap(self, population): + """ + Objective function wrapper for the DeapCma optimization. + + Args: + population (list): + List of individuals with input currents for each model population + + Returns: + loss_list (list): + List of losses for each individual of the population + """ + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = self._objective_function(I_app_list=individual) + loss_list.append((loss_of_individual,)) + return loss_list + + def _objective_function(self, I_app_list: list[float]): """ Objective function to minimize the difference between the target firing rates and the firing rates of the model with the given input currents. @@ -1701,23 +1854,23 @@ def _get_firing_rate(self, I_app_list: list[float]): rate = nbr_spikes / (5.0 * get_population(f"{pop_name}_reduced").size) rate_list.append(rate) rate_dict[pop_name] = rate - sf.Logger().log(f"I_app_dict: {I_app_dict}") - sf.Logger().log(f"Firing rates: {rate_dict}") - - af.PlotRecordings( - figname="firing_rates.png", - recordings=results.recordings, - recording_times=results.recording_times, - shape=(len(self._model_normal.populations), 1), - plan={ - "position": list(range(1, len(self._model_normal.populations) + 1)), - "compartment": [ - f"{pop_name}_reduced" for pop_name in self._model_normal.populations - ], - "variable": ["spike"] * len(self._model_normal.populations), - "format": ["hybrid"] * len(self._model_normal.populations), - }, - ) + # sf.Logger().log(f"I_app_dict: {I_app_dict}") + # sf.Logger().log(f"Firing rates: {rate_dict}") + + # af.PlotRecordings( + # figname="firing_rates.png", + # recordings=results.recordings, + # recording_times=results.recording_times, + # shape=(len(self._model_normal.populations), 1), + # plan={ + # "position": list(range(1, len(self._model_normal.populations) + 1)), + # "compartment": [ + # f"{pop_name}_reduced" for pop_name in self._model_normal.populations + # ], + # "variable": ["spike"] * len(self._model_normal.populations), + # "format": ["hybrid"] * len(self._model_normal.populations), + # }, + # ) return np.array(rate_list) class MyExperiment(CompNeuroExp): @@ -1735,13 +1888,14 @@ def run(self, I_app_dict: dict[str, float]): """ ### reset to initial state self.reset() + set_seed(0) ### activate monitor self.monitors.start() ### set the input currents for pop_name, I_app in I_app_dict.items(): get_population(pop_name).I_app = I_app ### simulate 5000 ms - simulate(5000, measure_time=True) + simulate(5000) ### return results return self.results() @@ -2260,30 +2414,38 @@ def __init__( self._max_syn_dict[pop_name]["g_ampa"] = g_ampa_max self._max_syn_dict[pop_name]["I_app"] = I_app_max - def get(self, pop_name: str): - """ - Return the maximal synaptic input for the given population. + @property + def max_syn_getter(self): + return self.MaxSynGetter(self._max_syn_dict) - Args: - pop_name (str): - Name of the population + class MaxSynGetter: + def __init__(self, max_syn_dict: dict) -> None: + self._max_syn_dict = max_syn_dict - Returns: - ReturnMaxSyn: - Maximal synaptic input for the given population with Attributes: g_gaba, - g_ampa, I_app - """ - return self.ReturnMaxSyn( - g_gaba=self._max_syn_dict[pop_name]["g_gaba"], - g_ampa=self._max_syn_dict[pop_name]["g_ampa"], - I_app=self._max_syn_dict[pop_name]["I_app"], - ) + def get(self, pop_name: str): + """ + Return the maximal synaptic input for the given population. + + Args: + pop_name (str): + Name of the population + + Returns: + ReturnMaxSyn: + Maximal synaptic input for the given population with Attributes: g_gaba, + g_ampa, I_app + """ + return self.ReturnMaxSyn( + g_gaba=self._max_syn_dict[pop_name]["g_gaba"], + g_ampa=self._max_syn_dict[pop_name]["g_ampa"], + I_app=self._max_syn_dict[pop_name]["I_app"], + ) - class ReturnMaxSyn: - def __init__(self, g_gaba: float, g_ampa: float, I_app: float): - self.g_gaba = g_gaba - self.g_ampa = g_ampa - self.I_app = I_app + class ReturnMaxSyn: + def __init__(self, g_gaba: float, g_ampa: float, I_app: float): + self.g_gaba = g_gaba + self.g_ampa = g_ampa + self.I_app = I_app def _get_max_g_gaba(self, pop_name: str, max_psp: float): """ @@ -2400,7 +2562,7 @@ def __init__( model: CompNeuroModel, do_not_config_list: list[str], analyze_model: AnalyzeModel, - max_syn: GetMaxSyn, + max_syn: GetMaxSyn.MaxSynGetter, ): self._model = model self._do_not_config_list = do_not_config_list diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 326090c..5a548d5 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -370,6 +370,7 @@ def BGM_part_function(params): print_guide=True, I_app_variable="I_app", cache=True, + clear_cache=False, log_file="model_configurator.log", ) @@ -378,7 +379,7 @@ def BGM_part_function(params): syn_load_dict={ "stn": {"ampa": 0.0, "gaba": 0.0}, "snr": {"ampa": 0.0, "gaba": 0.0}, - "gpe": {"ampa": 1.0}, + "gpe": {"ampa": 0.0}, "thal": {"gaba": 0.0}, }, syn_contribution_dict={ From f7e62eb16f2f0820a0d2e2c456438a69b2f34557 Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 12 Jun 2024 17:10:59 +0200 Subject: [PATCH 35/39] model_conf: continued with get_base optimization neuron models: dv/dt needs min/max! --- .../model_configurator_cnp.py | 347 ++++++++++++++---- .../model_configurator_user.py | 66 +--- .../final_models/izhikevich_2003_like_nm.py | 4 +- 3 files changed, 286 insertions(+), 131 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index ede4bea..05f67fa 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -152,7 +152,7 @@ def __init__(self, model: CompNeuroModel): self._clear_model(model=model, do_create=False) def _clear_model(self, model: CompNeuroModel, do_create: bool = True): - mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + mf.cnp_clear(functions=False, constants=False) if do_create: model.create(do_compile=False) @@ -196,10 +196,6 @@ def _analyze_populations(self, model: CompNeuroModel): for param in init_params if param != "self" and param != "storage_order" and param != "copied" } - ### get the afferent projections dict of the population TODO do we still need this? - # self.afferent_projection_dict[pop_name] = ( - # self._get_afferent_projection_dict(pop_name=pop_name) - # ) def _analyze_projections(self, model: CompNeuroModel): """ @@ -770,6 +766,7 @@ def __init__( do_not_config_list: list[str], simulator: "Simulator", do_plot: bool, + figure_folder: str, ): """ Args: @@ -783,6 +780,7 @@ def __init__( self._single_nets = single_nets self._prepare_psp_dict = {} self._simulator = simulator + self._figure_folder = figure_folder ### loop over all populations for pop_name in model.populations: ### skip populations which should not be configured @@ -943,10 +941,6 @@ def _find_I_app_hold( tolerance=0.01, bound_type="greater", ) - # y_bound = 0 - # y: -10, -2, +0.1 - # x: 0, 5, 7 - # bound type greater should find value which is slightly larger than 0 TODO test this ### again simulate the neuron with the obtained I_app_hold to get the new v_rest v_rest_arr = self._simulator.get_v_2000( pop_name=pop_name, @@ -999,7 +993,7 @@ def _find_v_rest_initial( plt.plot(v_arr, v_clamp_arr) plt.axvline(v_rest, color="k") plt.axhline(0, color="k", ls="dashed") - plt.savefig(f"v_clamp_{pop_name}.png") + plt.savefig(f"{self._figure_folder}/v_clamp_{pop_name}.png") plt.close("all") ### do again the simulation only with the obtained v_rest to get the detla_v for @@ -1412,7 +1406,7 @@ def __init__( self._do_not_config_list = do_not_config_list self._target_firing_rate_dict = target_firing_rate_dict self._base_dict = None - self._figure_folder = "model_conf_figures" ### TODO add this to Simulator init + self._figure_folder = "model_conf_figures" ### TODO add this to figures ### create the figure folder sf.create_dir(self._figure_folder) ### initialize logger @@ -1477,6 +1471,7 @@ def __init__( do_not_config_list=do_not_config_list, simulator=self._simulator, do_plot=False, + figure_folder=self._figure_folder, ) self._simulator = Simulator( single_nets=self._single_nets, @@ -1521,6 +1516,7 @@ def set_weights(self, weight_dict: dict[str, float]): Dict with the weights for each projection """ self._weight_dicts.weight_dict = weight_dict + self._check_if_not_config_pops_have_correct_rates() def set_syn_load( self, @@ -1539,26 +1535,119 @@ def set_syn_load( """ self._weight_dicts.syn_load_dict = syn_load_dict self._weight_dicts.syn_contribution_dict = syn_contribution_dict + self._check_if_not_config_pops_have_correct_rates() + + def _check_if_not_config_pops_have_correct_rates(self): + """ + Check if the populations which should not be configured have the correct firing + rates. + """ + ### initialize the normal model + compile the model + self._init_model_with_fitted_base(base_dict=self._base_dict) + + ### record spikes of the do_not_config populations + mon = CompNeuroMonitors( + mon_dict={ + pop_name: ["spike"] for pop_name in self._do_not_config_list + } # _model.populations # tmp test + ) + mon.start() + ### simulate the model for 5000 ms + # get_population("stn").I_app = 8 # tmp test + simulate(5000) + + ### get the firing rates + recordings = mon.get_recordings() + for pop_name in self._do_not_config_list: + spike_dict = recordings[0][f"{pop_name};spike"] + t, _ = raster_plot(spike_dict) + spike_count = len(t) + pop_size = len(get_population(pop_name)) + firing_rate = spike_count / (5 * pop_size) + if np.abs(firing_rate - self._target_firing_rate_dict[pop_name]) > 1: + sf.Logger().log( + f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + ) + print( + f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + ) + + # ### tmp plot + # recording_times = mon.get_recording_times() + + # af.PlotRecordings( + # figname="tmp.png", + # recordings=recordings, + # recording_times=recording_times, + # shape=(len(self._model.populations), 1), + # plan={ + # "position": list(range(1, len(self._model.populations) + 1)), + # "compartment": self._model.populations, + # "variable": ["spike"] * len(self._model.populations), + # "format": ["hybrid"] * len(self._model.populations), + # }, + # ) + # quit() def set_base(self): """ Set the baseline currents of the model, found for the current weights to reach - the target firing rates. + the target firing rates. The model is compiled after setting the baselines. """ + ### get the base dict if self._base_dict is None: - self._base_dict = GetBase( - model_normal=self._model, - model_reduced=self._model_reduced.model_reduced, - target_firing_rate_dict=self._target_firing_rate_dict, - weight_dicts=self._weight_dicts, - do_not_config_list=self._do_not_config_list, - init_sampler=self._init_sampler, - max_syn=self._max_syn, - ).base_dict + self.get_base() + + ### initialize the normal model + set the baselines with the base dict + self._init_model_with_fitted_base(base_dict=self._base_dict) + + def get_base(self): + """ + Get the baseline currents of the model. + + Returns: + base_dict (dict[str, float]): + Dict with the baseline currents for each population + """ + ### get the base dict + self._base_dict = GetBase( + model_normal=self._model, + model_reduced=self._model_reduced.model_reduced, + target_firing_rate_dict=self._target_firing_rate_dict, + weight_dicts=self._weight_dicts, + do_not_config_list=self._do_not_config_list, + init_sampler=self._init_sampler, + max_syn=self._max_syn, + ).base_dict + return self._base_dict + + def _init_model_with_fitted_base(self, base_dict: dict[str, float] | None = None): + """ + Initialize the neurons of the model using the init_sampler, set the baseline + currents of the model from the base dict (containing fitted baselines) and the + weights from the weight dicts and compile the model. + """ + ### clear ANNarchy and create the normal model + mf.cnp_clear(functions=False, constants=False) + self._model.create(do_compile=False) + ### set the initial variables of the neurons + for pop_name, init_sampler in self._init_sampler.init_sampler_dict.items(): + init_sampler.set_init_variables(get_population(pop_name)) + ### set the baseline currents + if base_dict is not None: + for pop_name, I_app in base_dict.items(): + setattr(get_population(pop_name), "I_app", I_app) + ### compile the model + self._model.compile() + ### set the weights + for proj_name, weight in self._weight_dicts.weight_dict.items(): + setattr(get_projection(proj_name), "w", weight) class Minimize: - def __init__(self, func, yt, x0, lb, ub, tol, max_it) -> None: + def __init__( + self, func, yt, x0, lb, ub, tol_error, tol_convergence, max_it + ) -> None: """ Args: func (Callable): @@ -1571,23 +1660,117 @@ def __init__(self, func, yt, x0, lb, ub, tol, max_it) -> None: Lower bounds of the input vector ub (np.array): Upper bounds of the input vector - tol (float): - If the maximum absolute error of the output vector is smaller than this - value, the optimization stops + tol_error (float): + If the error is below this value the optimization stops + tol_convergence (float): + If the change of the error stays below this value the optimization stops max_it (int): Maximum number of iterations """ ### TODO continue here, I think it works but neuron models explode x = x0 - error = np.inf + x_old = x0 + y = yt + error = np.ones(x0.shape) * 20 + error_old = np.ones(x0.shape) * 20 it = 0 search_gradient_diff = np.ones(x0.shape) - alpha = 0.1 - while np.max(np.abs(error)) > tol and it < max_it: + alpha = np.ones(x0.shape) + error_list = [] + dx_list = [] + dy_list = [] + x_list = [] + y_list = [] + it_list = [] + + def error_changed(error_list, tol, n=3): + if len(error_list) < 2: + return True + return (np.max(error_list[-n:]) - np.min(error_list[-n:])) > tol + + ### TODO not check if error is small enough but if the change of the error + ### converges, for this, check the mean of the last 10 error changes + while it < max_it and error_changed(error_list, tol_convergence): print("\n\nnext iteration") + y_old = y y = func(x) + dx_list.append(x - x_old) + dy_list.append(y - y_old) + ### TODO if x did not change much, use the previous gradient again print(f"x: {x}") - print(f"y: {y}\n") + print(f"y: {y}") + x_list.append(x) + y_list.append(y) + it_list.append(it) + ### here we know the new y(x) + ### check if the error sign changed + error_old = error + error = yt - y + ### if error is small enough stop the optimization + if np.all(np.abs(error) < tol_error): + break + error_sign_changed = np.sign(error) != np.sign(error_old) + print(f"error_sign_changed: {error_sign_changed}") + ### get how much the error (in total, not for individual inputs) changed + error_list.append(np.mean(np.abs(error))) + print(f"error_list: {error_list}\n") + ### if the error sign changed: + ### - TODO check if error is larger as before, if yes -> use again the previous x, if use previous x also compute current y + ### - we calculate (as usual) a new gradient + ### - we reduce alpha, so this time the step is smaller + error_increased = np.abs(error) > np.abs(error_old) + x[error_sign_changed & error_increased] = x_old[ + error_sign_changed & error_increased + ] + if np.any(error_sign_changed & error_increased): + y = func(x) + + # TODO I do not understand this example, this message was printed but x did not change + # next iteration + # x: [12.56441496 40.92615539 18.96717589 90.30010779] + # y: [30.00888889 59.99777778 50.01333333 96.85333333] + # error_sign_changed: [False False False False] + # error_list: [23.759444444444448, 2.517777777777779, 78.90388888888889, 22.96944444444445] + + # x_plus: [13.56441496 40.92615539 18.96717589 90.30010779] + # y_plus: [32.22222222 60.10888889 50.08666667 97.41111111] + + # x_plus: [12.56441496 42.92615539 18.96717589 90.30010779] + # y_plus: [30.00888889 62.06666667 50.01333333 91.96666667] + + # x_plus: [12.56441496 40.92615539 19.96717589 90.30010779] + # y_plus: [30.00888889 59.89333333 51.14666667 96.79333333] + + # x_plus: [ 12.56441496 40.92615539 18.96717589 132.96677446] + # y_plus: [ 30.00888889 59.99777778 50.01333333 214.46666667] + + # delta_y: [-8.88888889e-03 2.22222222e-03 -1.33333333e-02 -9.18533333e+01] + # grad: + # [[ 2.21333333e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00] + # [ 1.11111111e-01 2.06888889e+00 -1.04444444e-01 0.00000000e+00] + # [ 7.33333333e-02 0.00000000e+00 1.13333333e+00 0.00000000e+00] + # [ 5.57777778e-01 -4.88666667e+00 -6.00000000e-02 1.17613333e+02]] + # Solution vector x: [-4.01606426e-03 7.08996344e-04 -1.15048429e-02 -7.80934579e-01] + # delta_y from solution: [-8.88888889e-03 2.22222222e-03 -1.33333333e-02 -9.18533333e+01] + + # next iteration + # x: [12.56200532 40.92757338 18.96027299 76.97215765] + # y: [30.01333333 60.00444444 50.00444444 61.82444444] + # error_sign_changed: [False True False False] + # error_list: [23.759444444444448, 2.517777777777779, 78.90388888888889, 22.96944444444445, 14.211666666666666] + + # some errors changed sign and increased + # x: [12.56200532 40.92615539 18.96027299 76.97215765] + # y: [30.01333333 60.02 50.00444444 61.86 ] + + print("some errors changed sign and increased") + print(f"x: {x}") + print(f"y: {y}\n") + x_list.append(x) + y_list.append(y) + it_list.append(it) + alpha[error_sign_changed] /= 2 + alpha[~error_sign_changed] += (1 - alpha[~error_sign_changed]) / 5 ### calculate the gradient i.e. change of the output values for each input grad = np.zeros((yt.shape[0], x0.shape[0])) for i in range(len(x0)): @@ -1625,22 +1808,56 @@ def __init__(self, func, yt, x0, lb, ub, tol, max_it) -> None: # Output the results print("Solution vector x:", solution) - print("Residuals:", residuals) - print("Rank of matrix A:", rank) - print("Singular values of A:", s) # Calculate the matrix-vector product Ax Ax = np.dot(A, solution) # Output the matrix-vector product and compare with b - print("Matrix-vector product Ax:", Ax) - print("Original vector b:", b) + print("delta_y from solution:", Ax) ### solution contains the info how much each input should change (how many ### times the change of gradient is needed to reach the target values) + x_old = x x = x + solution * search_gradient_diff * alpha it += 1 + self.x = x + self.success = np.all(np.abs(error) < tol_error) + + x_arr = np.array(x_list) + y_arr = np.array(y_list) + it_arr = np.array(it_list) + + plt.close("all") + plt.figure() + for idx in range(4): + ax = plt.subplot(4, 1, idx + 1) + ### plot the x values + plt.plot(it_arr, x_arr[:, idx]) + plt.ylabel(f"x{idx}") + ### second y axis on the right for the y values + ax2 = ax.twinx() + ax2.plot(it_arr, y_arr[:, idx], color="red") + ax2.set_ylabel(f"y{idx}", color="red") + plt.xlabel("iteration") + plt.tight_layout() + plt.savefig("optimization.png") + + plt.close("all") + plt.figure() + dx_arr = x_arr[1:] - x_arr[:-1] + dx_ausgehend_von = x_arr[:-1] + dy_arr = y_arr[1:] - y_arr[:-1] + dy_ausgehend_von = y_arr[:-1] + for idx in range(4): + ax = plt.subplot(4, 1, idx + 1) + ### plot the x values + plt.plot(dx_ausgehend_von[:, idx], dy_arr[:, idx] / dx_arr[:, idx]) + plt.ylabel(f"dy{idx}/dx{idx}") + plt.xlabel("x") + plt.tight_layout() + plt.savefig("dy_dx_asugehend_x.png") + class GetBase: def __init__( @@ -1701,7 +1918,7 @@ def _set_model_weights(self): def _prepare_get_base(self): ### clear ANNarchy - mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + mf.cnp_clear(functions=False, constants=False) ### create and compile the model self._model_reduced.create() ### create monitors for recording the spikes of all populations @@ -1738,59 +1955,31 @@ def _get_base(self): Perform the optimization to find the base currents for the target firing rates. Returns: - optimized_inputs (np.array): - Optimized input currents + base_dict (dict): + Dict with the base currents for each population """ ### Perform the optimization using Minimize class - Minimize( + result = Minimize( func=self._get_firing_rate, yt=self._target_firing_rate_arr, x0=np.array(self._x0), lb=np.array(self._lb), ub=np.array(self._ub), - tol=1, + tol_error=1, + tol_convergence=0.1, max_it=20, ) - ### Perform the optimization using L-BFGS-B method - # result = minimize( - # fun=self._objective_function, - # x0=self._x0, - # method="L-BFGS-B", - # bounds=Bounds(self._lb, self._ub), - # ) - - # ### Perform the optimization using Powell method - # result = minimize(self._objective_function, self._x0, method='Powell') - - # ### Optimized input values - # optimized_inputs = result.x - - # print(f"Optimized inputs: {optimized_inputs}") - - # ### Perform the optimization using DeapCma method - # ### define lower bounds of paramters to optimize - # lb = np.array(self._lb) - - # ### define upper bounds of paramters to optimize - # ub = np.array(self._ub) - - # ### create an "minimal" instance of the DeapCma class - # deap_cma = ef.DeapCma( - # lower=lb, - # upper=ub, - # evaluate_function=self.objective_function_deap, - # ) - - # ### run the optimization - # deap_cma_result = deap_cma.run(max_evals=1000) - - # for key, val in deap_cma_result.items(): - # if key in ["logbook", "deap_pop"]: - # continue - # print(f"{key}: {val}") - quit() + optimized_inputs = result.x + if not result.success: + sf.Logger().log("Optimization failed, target firing rates not reached!") + print("Optimization failed, target firing rates not reached!") + base_dict = { + pop_name: optimized_inputs[idx] + for idx, pop_name in enumerate(self._pop_names_config) + } + return base_dict def _objective_function_deap(self, population): """ @@ -1849,9 +2038,11 @@ def _get_firing_rate(self, I_app_list: list[float]): ### for the spike dict we need the "_reduced" suffix spike_dict = results.recordings[0][f"{pop_name}_reduced;spike"] t, _ = raster_plot(spike_dict) + ### only take spikes after the first 500 ms + t = t[t > 500] nbr_spikes = len(t) ### divide number of spikes by the number of neurons and the duration in s - rate = nbr_spikes / (5.0 * get_population(f"{pop_name}_reduced").size) + rate = nbr_spikes / (4.5 * get_population(f"{pop_name}_reduced").size) rate_list.append(rate) rate_dict[pop_name] = rate # sf.Logger().log(f"I_app_dict: {I_app_dict}") diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 5a548d5..5830cde 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -39,6 +39,7 @@ def BGM_part_function(params): ) cor_exc.tau_up = params["cor_exc.tau_up"] cor_exc.tau_down = params["cor_exc.tau_down"] + cor_exc.rates = params["cor_exc.rates"] cor_inh = Population( params["cor_inh.size"], poisson_neuron_up_down, @@ -46,6 +47,7 @@ def BGM_part_function(params): ) cor_inh.tau_up = params["cor_inh.tau_up"] cor_inh.tau_down = params["cor_inh.tau_down"] + cor_inh.rates = params["cor_inh.rates"] ### BG Populations stn = Population( params["stn.size"], @@ -223,9 +225,11 @@ def BGM_part_function(params): params["cor_exc.size"] = 100 params["cor_exc.tau_up"] = 10 params["cor_exc.tau_down"] = 30 + params["cor_exc.rates"] = 15 params["cor_inh.size"] = 100 params["cor_inh.tau_up"] = 10 params["cor_inh.tau_down"] = 30 + params["cor_inh.rates"] = 30 ### BG Populations params["snr.size"] = 100 params["snr.a"] = 0.005 @@ -349,6 +353,7 @@ def BGM_part_function(params): ### model configurator should get target resting-state firing rates for the ### model populations one wants to configure and their afferents as input + ### TODO allow for target range target_firing_rate_dict = { "cor_exc": 15, "cor_inh": 30, @@ -359,13 +364,11 @@ def BGM_part_function(params): } do_not_config_list = ["cor_exc", "cor_inh"] - ### TODO for the do not config populations: check if the populations have the - ### given rates, if not, maybe print warning - ### initialize model_configurator model_conf = ModelConfigurator( - model, - target_firing_rate_dict, + model=model, + target_firing_rate_dict=target_firing_rate_dict, + max_psp=0.7, do_not_config_list=do_not_config_list, print_guide=True, I_app_variable="I_app", @@ -377,10 +380,10 @@ def BGM_part_function(params): ### set syn load model_conf.set_syn_load( syn_load_dict={ - "stn": {"ampa": 0.0, "gaba": 0.0}, - "snr": {"ampa": 0.0, "gaba": 0.0}, - "gpe": {"ampa": 0.0}, - "thal": {"gaba": 0.0}, + "stn": {"ampa": 1.0, "gaba": 1.0}, + "snr": {"ampa": 1.0, "gaba": 1.0}, + "gpe": {"ampa": 1.0}, + "thal": {"gaba": 1.0}, }, syn_contribution_dict={ "stn": {"ampa": {"cor_exc__stn": 1.0}, "gaba": {"cor_inh__stn": 1.0}}, @@ -406,45 +409,11 @@ def BGM_part_function(params): # } # ) - I_base_dict = model_conf.set_base() - quit() - - ### or define synaptic load of populations - # synaptic_load_dict = { - # "stn": [0.3, 0.3], - # "gpe": [0.4], - # "snr": [0.5, 0.3], - # "thal": [0.1], - # } - # ### and define the contributions of their afferent projections - # synaptic_contribution_dict = {"snr": {"gaba": {"gpe__snr": 0.7, "snr__snr": 0.3}}} - # synaptic_contribution_dict = model_conf.set_syn_load( - # synaptic_load_dict, - # synaptic_contribution_dict, - # ) - - ### after setting the weights i.e. the synaptic load/contributions - ### get the baseline currents - I_base_dict = model_conf.set_base(I_base_variable="base_mean") - print("user I_base:") + I_base_dict = model_conf.get_base() + print("I_base:") print(I_base_dict) - print("model cor_stn_weight:") - print(get_projection("cor_exc__stn").w) - for pop_name in model.populations: - if "cor" in pop_name: - continue - get_population(pop_name).rate_base_noise = 10 - get_population(pop_name).base_noise = max( - [ - model_conf.I_app_max_dict[pop_name] * 0.02, - abs(get_population(pop_name).base_mean[0]) * 0.02, - ] - ) - print(f"pop_name: {pop_name}") - print(f"base_mean: {get_population(pop_name).base_mean[0]}") - print(f"base_noise: {get_population(pop_name).base_noise[0]}") - print(f"rate_base_noise: {get_population(pop_name).rate_base_noise[0]}\n") + model_conf.set_base() ### do a test simulation mon = Monitors( @@ -525,8 +494,3 @@ def BGM_part_function(params): "15;thal;g_gaba;line", ], ) - - # TODO - # it seems that there are problems with snr - # it even gets wotse if I deactivate the lateral inhib - # maybe check which g_ampa, g_gaba are expected based on weights and which actually are present diff --git a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py index 30096bf..2647fa9 100644 --- a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py @@ -581,11 +581,11 @@ def __init__( ### power of signal tau_power * dpower_I_signal/dt = I_signal**2 - power_I_signal ### membrane potential and recovery variable - dv/dt = n2 * v * v + n1 * v + n0 - u + I + dv/dt = n2 * v * v + n1 * v + n0 - u + I : min=-100, max=0 du/dt = a * (b * v - u) """, spike=""" - v >= 30 + v >= 0 """, reset=""" v = c From 09a3dfa9a037afc148432f394a823dff79419bfb Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 17 Jun 2024 09:07:05 +0200 Subject: [PATCH 36/39] moved get_spike_features_of_chunk and get_spike_features_loss_of_chunk to analysis_functions --- src/CompNeuroPy/analysis_functions.py | 178 +++++++++++++++++++++++++ src/CompNeuroPy/extra_functions.py | 180 +------------------------- src/CompNeuroPy/system_functions.py | 2 + 3 files changed, 181 insertions(+), 179 deletions(-) diff --git a/src/CompNeuroPy/analysis_functions.py b/src/CompNeuroPy/analysis_functions.py index eb96744..ba76ea4 100644 --- a/src/CompNeuroPy/analysis_functions.py +++ b/src/CompNeuroPy/analysis_functions.py @@ -7,6 +7,7 @@ from CompNeuroPy import system_functions as sf from CompNeuroPy import extra_functions as ef from CompNeuroPy.monitors import RecordingTimes +from CompNeuroPy.experiment import CompNeuroExp from scipy.interpolate import interp1d from multiprocessing import Process from typingchecker import check_types @@ -2674,3 +2675,180 @@ def _matrix_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean plt.title( f"Variable {variable} of {compartment} ({nr_neurons}) [{ef.sci(np.nanmin(data_arr))}, {ef.sci(np.nanmax(data_arr))}]" ) + + +def get_spike_features_of_chunk(chunk: int, results: CompNeuroExp._ResultsCl): + """ + Get the features of the spikes of a chunk of the results of a CompNeuroExp. + + !!! warning + The results data dict has to contain the population name as key "pop_name". + The spikes have to be recorded. + + Args: + chunk (int): + index of the chunk + results (CompNeuroExp._ResultsCl): + results of the experiment + + Returns: + spike_features (dict): + dictionary with the features of the spikes + """ + ### get number of spikes + spike_dict = results.recordings[chunk][f"{results.data['pop_name']};spike"] + t, _ = my_raster_plot(spike_dict) + nbr_spikes = len(t) + ### get time of 1st, 2nd, 3rd spike + if nbr_spikes > 0: + time_1st_spike = t[0] * results.recordings[chunk]["dt"] + if nbr_spikes > 1: + time_2nd_spike = t[1] * results.recordings[chunk]["dt"] + if nbr_spikes > 2: + time_3rd_spike = t[2] * results.recordings[chunk]["dt"] + else: + time_3rd_spike = None + else: + time_2nd_spike = None + time_3rd_spike = None + else: + time_1st_spike = None + time_2nd_spike = None + time_3rd_spike = None + ### get time of last spike + if nbr_spikes > 0: + time_last_spike = t[-1] * results.recordings[chunk]["dt"] + else: + time_last_spike = None + ### get CV of ISI + if nbr_spikes > 1: + isi = np.diff(t * results.recordings[chunk]["dt"]) + cv_isi = np.std(isi) / np.mean(isi) + else: + cv_isi = None + + return { + "spike_count": nbr_spikes, + "time_to_first_spike": time_1st_spike, + "time_to_second_spike": time_2nd_spike, + "time_to_third_spike": time_3rd_spike, + "time_to_last_spike": time_last_spike, + "ISI_CV": cv_isi, + } + + +def get_spike_features_loss_of_chunk( + chunk: int, + results1: CompNeuroExp._ResultsCl, + results2: CompNeuroExp._ResultsCl, + chunk2: None | int = None, + feature_list: list[str] | None = None, +): + """ + Calculate the loss/difference between the spike features of two chunks of the + results of CompNeuroExp. + + !!! warning + The results data dict has to contain the population name as key "pop_name". + The spikes have to be recorded. + + Args: + chunk (int): + index of the chunk + results1 (CompNeuroExp._ResultsCl): + results of the first experiment + results2 (CompNeuroExp._ResultsCl): + results of the second experiment + chunk2 (None|int): + index of the chunk of the second results, if None the same as chunk + feature_list (list[str]|None): + list of feature names which should be used to calculate the loss, if None + the default list is used + + Returns: + loss (float): + loss/difference between the spike features of the two chunks + """ + verbose = False + if chunk2 is None: + chunk2 = chunk + + ### get recording duration of chunk + nbr_periods = results1.recording_times.nbr_periods( + chunk=chunk, compartment=results1.data["pop_name"] + ) + chunk_duration_ms = 0 + chunk_duration_idx = 0 + for period in range(nbr_periods): + chunk_duration_ms += np.abs( + np.diff( + results1.recording_times.time_lims( + chunk=chunk, compartment=results1.data["pop_name"], period=period + ) + ) + ) + chunk_duration_idx += np.abs( + np.diff( + results1.recording_times.idx_lims( + chunk=chunk, compartment=results1.data["pop_name"], period=period + ) + ) + ) + + ### set a plausible "maximum" absolute difference for each feature + diff_max: dict[str, float] = { + "spike_count": chunk_duration_idx, + "time_to_first_spike": chunk_duration_ms, + "time_to_second_spike": chunk_duration_ms, + "time_to_third_spike": chunk_duration_ms, + "time_to_last_spike": chunk_duration_ms, + "ISI_CV": 1, + } + if verbose: + print(f"\ndiff_max: {diff_max}") + + ### set a plausible "close" absolute difference for each feature + diff_close: dict[str, float] = { + "spike_count": np.ceil(chunk_duration_ms / 200), + "time_to_first_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_second_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_third_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_last_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "ISI_CV": 0.1, + } + if verbose: + print(f"\ndiff_close: {diff_close}\n") + + ### catch if features from feature_list are not supported + if feature_list is None: + feature_list = list(diff_max.keys()) + features_not_supported = [ + feature for feature in feature_list if feature not in diff_max + ] + if features_not_supported: + raise ValueError(f"Features not supported: {features_not_supported}") + + ### calculate and return the mean of the differences of the features + features_1 = get_spike_features_of_chunk(chunk, results1) + features_2 = get_spike_features_of_chunk(chunk2, results2) + + if verbose: + print(f"\nfeatures_1: {features_1}\n") + print(f"features_2: {features_2}\n") + loss = 0.0 + for feature in feature_list: + ### if both features are None use 0 + if features_1[feature] is None and features_2[feature] is None: + diff = 0.0 + ### if single feature is None use diff_max + elif features_1[feature] is None or features_2[feature] is None: + diff = diff_max[feature] + else: + diff = float(np.absolute(features_1[feature] - features_2[feature])) + ### scale the difference by diff_close and add to loss + loss += diff / diff_close[feature] + loss /= len(feature_list) + + if verbose: + print(f"loss: {loss}") + return loss diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 234f8b8..1a39bec 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -6,7 +6,6 @@ from CompNeuroPy import system_functions as sf from CompNeuroPy import model_functions as mf from CompNeuroPy.generate_model import CompNeuroModel -from CompNeuroPy.experiment import CompNeuroExp import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import cm @@ -23,7 +22,7 @@ from ANNarchy import Neuron, Population, simulate, setup, get_population from sympy import symbols, Symbol, solve, sympify, Eq, lambdify, factor from scipy.interpolate import griddata -from scipy.optimize import brentq, minimize_scalar +from scipy.optimize import brentq import re from typingchecker import check_types import warnings @@ -2352,183 +2351,6 @@ def efel_loss(trace1, trace2, feature_list): return loss -def get_spike_features_of_chunk(chunk: int, results: CompNeuroExp._ResultsCl): - """ - Get the features of the spikes of a chunk of the results of a CompNeuroExp. - - !!! warning - The results data dict has to contain the population name as key "pop_name". - The spikes have to be recorded. - - Args: - chunk (int): - index of the chunk - results (CompNeuroExp._ResultsCl): - results of the experiment - - Returns: - spike_features (dict): - dictionary with the features of the spikes - """ - ### get number of spikes - spike_dict = results.recordings[chunk][f"{results.data['pop_name']};spike"] - t, _ = af.my_raster_plot(spike_dict) - nbr_spikes = len(t) - ### get time of 1st, 2nd, 3rd spike - if nbr_spikes > 0: - time_1st_spike = t[0] * results.recordings[chunk]["dt"] - if nbr_spikes > 1: - time_2nd_spike = t[1] * results.recordings[chunk]["dt"] - if nbr_spikes > 2: - time_3rd_spike = t[2] * results.recordings[chunk]["dt"] - else: - time_3rd_spike = None - else: - time_2nd_spike = None - time_3rd_spike = None - else: - time_1st_spike = None - time_2nd_spike = None - time_3rd_spike = None - ### get time of last spike - if nbr_spikes > 0: - time_last_spike = t[-1] * results.recordings[chunk]["dt"] - else: - time_last_spike = None - ### get CV of ISI - if nbr_spikes > 1: - isi = np.diff(t * results.recordings[chunk]["dt"]) - cv_isi = np.std(isi) / np.mean(isi) - else: - cv_isi = None - - return { - "spike_count": nbr_spikes, - "time_to_first_spike": time_1st_spike, - "time_to_second_spike": time_2nd_spike, - "time_to_third_spike": time_3rd_spike, - "time_to_last_spike": time_last_spike, - "ISI_CV": cv_isi, - } - - -def get_spike_features_loss_of_chunk( - chunk: int, - results1: CompNeuroExp._ResultsCl, - results2: CompNeuroExp._ResultsCl, - chunk2: None | int = None, - feature_list: list[str] | None = None, -): - """ - Calculate the loss/difference between the spike features of two chunks of the - results of CompNeuroExp. - - !!! warning - The results data dict has to contain the population name as key "pop_name". - The spikes have to be recorded. - - Args: - chunk (int): - index of the chunk - results1 (CompNeuroExp._ResultsCl): - results of the first experiment - results2 (CompNeuroExp._ResultsCl): - results of the second experiment - chunk2 (None|int): - index of the chunk of the second results, if None the same as chunk - feature_list (list[str]|None): - list of feature names which should be used to calculate the loss, if None - the default list is used - - Returns: - loss (float): - loss/difference between the spike features of the two chunks - """ - verbose = False - if chunk2 is None: - chunk2 = chunk - - ### get recording duration of chunk - nbr_periods = results1.recording_times.nbr_periods( - chunk=chunk, compartment=results1.data["pop_name"] - ) - chunk_duration_ms = 0 - chunk_duration_idx = 0 - for period in range(nbr_periods): - chunk_duration_ms += np.abs( - np.diff( - results1.recording_times.time_lims( - chunk=chunk, compartment=results1.data["pop_name"], period=period - ) - ) - ) - chunk_duration_idx += np.abs( - np.diff( - results1.recording_times.idx_lims( - chunk=chunk, compartment=results1.data["pop_name"], period=period - ) - ) - ) - - ### set a plausible "maximum" absolute difference for each feature - diff_max: dict[str, float] = { - "spike_count": chunk_duration_idx, - "time_to_first_spike": chunk_duration_ms, - "time_to_second_spike": chunk_duration_ms, - "time_to_third_spike": chunk_duration_ms, - "time_to_last_spike": chunk_duration_ms, - "ISI_CV": 1, - } - if verbose: - print(f"\ndiff_max: {diff_max}") - - ### set a plausible "close" absolute difference for each feature - diff_close: dict[str, float] = { - "spike_count": np.ceil(chunk_duration_ms / 200), - "time_to_first_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), - "time_to_second_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), - "time_to_third_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), - "time_to_last_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), - "ISI_CV": 0.1, - } - if verbose: - print(f"\ndiff_close: {diff_close}\n") - - ### catch if features from feature_list are not supported - if feature_list is None: - feature_list = list(diff_max.keys()) - features_not_supported = [ - feature for feature in feature_list if feature not in diff_max - ] - if features_not_supported: - raise ValueError(f"Features not supported: {features_not_supported}") - - ### calculate and return the mean of the differences of the features - features_1 = get_spike_features_of_chunk(chunk, results1) - features_2 = get_spike_features_of_chunk(chunk2, results2) - - if verbose: - print(f"\nfeatures_1: {features_1}\n") - print(f"features_2: {features_2}\n") - loss = 0.0 - for feature in feature_list: - ### if both features are None use 0 - if features_1[feature] is None and features_2[feature] is None: - diff = 0.0 - ### if single feature is None use diff_max - elif features_1[feature] is None or features_2[feature] is None: - diff = diff_max[feature] - else: - diff = float(np.absolute(features_1[feature] - features_2[feature])) - ### scale the difference by diff_close and add to loss - loss += diff / diff_close[feature] - loss /= len(feature_list) - - if verbose: - print(f"loss: {loss}") - return loss - - class _Waiter: """ Class that waits for a certain duration while the rest of the code continues to run. diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index ce4121e..efbfa08 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -571,6 +571,8 @@ def log(self, txt): txt (str): Text to be logged """ + if self._log_file is None: + return _, call_stack = self.trace_calls() From 6c1e444ba6279e992f870d094b6bd5f887b156bd Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 17 Jun 2024 09:11:56 +0200 Subject: [PATCH 37/39] updated doc strings --- src/CompNeuroPy/extra_functions.py | 8 ++++++++ src/CompNeuroPy/system_functions.py | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 1a39bec..c5bcdbd 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -2442,10 +2442,18 @@ class RNG: """ def __init__(self, seed): + """ + Args: + seed (int): + Seed for the random number generator. + """ self.rng = np.random.default_rng(seed=seed) self._original_seed = seed def reset(self): + """ + Reset the random number generator to the original seed. + """ self.rng.bit_generator.state = np.random.default_rng( seed=self._original_seed ).bit_generator.state diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index efbfa08..125147d 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -574,7 +574,7 @@ def log(self, txt): if self._log_file is None: return - _, call_stack = self.trace_calls() + _, call_stack = self._trace_calls() if call_stack == self._call_stack: txt = f"{textwrap.indent(str(txt), ' ')}" @@ -586,7 +586,7 @@ def log(self, txt): with open(self._log_file, "a") as f: print(txt, file=f) - def trace_calls(self): + def _trace_calls(self): # Get the call stack stack = inspect.stack() From d5bc89019877728e39d89381a247fb6ecaedccdb Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 17 Jun 2024 10:36:05 +0200 Subject: [PATCH 38/39] cleaned up code --- docs/built_in/neuron_models.md | 8 + docs/main/generate_models.md | 38 +- docs/main/generate_simulations.md | 124 ++-- docs/main/monitors_recordings.md | 82 +-- docs/main/optimize_neuron.md | 241 +++---- src/CompNeuroPy/__init__.py | 15 +- src/CompNeuroPy/analysis_functions.py | 826 +--------------------- src/CompNeuroPy/dbs.py | 10 +- src/CompNeuroPy/experiment.py | 4 - src/CompNeuroPy/extra_functions.py | 21 +- src/CompNeuroPy/full_models/bgm_22/bgm.py | 8 +- src/CompNeuroPy/generate_model.py | 4 - src/CompNeuroPy/generate_simulation.py | 4 - src/CompNeuroPy/monitors.py | 10 +- 14 files changed, 270 insertions(+), 1125 deletions(-) diff --git a/docs/built_in/neuron_models.md b/docs/built_in/neuron_models.md index e03825f..e1e1225 100644 --- a/docs/built_in/neuron_models.md +++ b/docs/built_in/neuron_models.md @@ -67,6 +67,10 @@ options: heading_level: 3 show_root_full_path: false +::: CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyBaseSNR + options: + heading_level: 3 + show_root_full_path: false ## Izhikevich (2007)-like Neurons ::: CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007 @@ -106,6 +110,10 @@ heading_level: 3 show_root_full_path: false ::: CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyAmpaOscillating + options: + heading_level: 3 + show_root_full_path: false +::: CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.IzhikevichGolomb options: heading_level: 3 show_root_full_path: false \ No newline at end of file diff --git a/docs/main/generate_models.md b/docs/main/generate_models.md index b2f1956..5cfb3d4 100644 --- a/docs/main/generate_models.md +++ b/docs/main/generate_models.md @@ -5,26 +5,26 @@ One can create a CompNeuroPy-model using the `CompNeuroModel` class. The `CompNe 2. **model creation**: create the ANNarchy objects (populations, projections), i.e., run the `model_creation function` 3. **model compilation**: compile all created models -## Example -
from CompNeuroPy import CompNeuroModel
-my_model = CompNeuroModel(model_creation_function=create_model,  ### the most important part, this function creates the model (populations, projections)
-                          model_kwargs={'a':1, 'b':2},           ### define the two arguments a and b of function create_model
-                          name='my_model',                       ### you can give the model a name
-                          description='my simple example model', ### you can give the model a description
-                          do_create=True,                        ### create the model directly
-                          do_compile=True,                       ### let the model (and all models created before) compile directly
-                          compile_folder_name='my_model')        ### name of the saved compilation folder
-
+!!! example +
from CompNeuroPy import CompNeuroModel
+    my_model = CompNeuroModel(model_creation_function=create_model,  ### the most important part, this function creates the model (populations, projections)
+                            model_kwargs={'a':1, 'b':2},           ### define the two arguments a and b of function create_model
+                            name='my_model',                       ### you can give the model a name
+                            description='my simple example model', ### you can give the model a description
+                            do_create=True,                        ### create the model directly
+                            do_compile=True,                       ### let the model (and all models created before) compile directly
+                            compile_folder_name='my_model')        ### name of the saved compilation folder
+    
-The following function could be the corresponding model_creation_function: -
from ANNarchy import Population, Izhikevich
-def create_model(a, b):
-    pop = Population(geometry=a, neuron=Izhikevich, name='Izh_pop_a') ### first population, size a
-    pop.b = 0                                                         ### some parameter adjustment
-    Population(geometry=b, neuron=Izhikevich, name='Izh_pop_b')       ### second population, size b
-
-Here, two populations are created (both use built-in Izhikevich neuron model of ANNarchy). The function does not require a return value. It is important that all populations and projections have unique names. + The following function could be the corresponding model_creation_function: +
from ANNarchy import Population, Izhikevich
+    def create_model(a, b):
+        pop = Population(geometry=a, neuron=Izhikevich, name='Izh_pop_a') ### first population, size a
+        pop.b = 0                                                         ### some parameter adjustment
+        Population(geometry=b, neuron=Izhikevich, name='Izh_pop_b')       ### second population, size b
+    
+ Here, two populations are created (both use built-in Izhikevich neuron model of ANNarchy). The function does not require a return value. It is important that all populations and projections have unique names. -A more detailed example is available in the [Examples](../examples/generate_models.md). + A more detailed example is available in the [Examples](../examples/generate_models.md). ::: CompNeuroPy.generate_model.CompNeuroModel \ No newline at end of file diff --git a/docs/main/generate_simulations.md b/docs/main/generate_simulations.md index 1fc6fa8..69348af 100644 --- a/docs/main/generate_simulations.md +++ b/docs/main/generate_simulations.md @@ -1,38 +1,38 @@ ## Introduction A CompNeuroPy-simulation can be created using the [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) class. Similar to the [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel) class, a function must be defined that contains the actual simulation (the _simulation_function_) and the [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) object adds a clear framework. A [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) is first initialized and can then be run multiple times. -## Example: -```python -from CompNeuroPy import CompNeuroSim -my_simulation = CompNeuroSim(simulation_function=some_simulation, ### the most important part, this function defines the simulation - simulation_kwargs={'pop':pop1, 'duration':100}, ### define the two arguments pop and duration of simulation_function - name='my_simulation', ### you can give the simulation a name - description='my simple example simulation', ### you can give the simulation a description - requirements=[req], ### a list of requirements for the simulation (here only a single requirement) - kwargs_warning=True, ### should a warning be printed if simulation kwargs change in future runs - monitor_object = mon) ### the Monitors object which is used to record variables -``` - -A possible _simulation_function_ could be: -```python -def some_simulation(pop, duration=1): - get_population(pop).a = 5 ### adjust paramter a of pop - get_population(pop).b = 5 ### adjust paramter b of pop - simulate(duration) ### simulate the duration in ms - - ### return some info - ### will later be accessible for each run - return {'paramter a': a, 'paramter b': b, 'a_x_duration': a*duration} -``` - -And a corresponding requirement could be: -```python -from CompNeuroPy import ReqPopHasAttr -req = {'req':ReqPopHasAttr, 'pop':pop1, 'attr':['a', 'b']} -``` -Here, one checks if the population _pop1_ contains the attributes _a_ and _b_. The [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr) is a built-in requirements-class of CompNeuroPy (see below). - -A more detailed example is available in the [Examples](../examples/run_and_monitor_simulations.md). +!!! example + ```python + from CompNeuroPy import CompNeuroSim + my_simulation = CompNeuroSim(simulation_function=some_simulation, ### the most important part, this function defines the simulation + simulation_kwargs={'pop':pop1, 'duration':100}, ### define the two arguments pop and duration of simulation_function + name='my_simulation', ### you can give the simulation a name + description='my simple example simulation', ### you can give the simulation a description + requirements=[req], ### a list of requirements for the simulation (here only a single requirement) + kwargs_warning=True, ### should a warning be printed if simulation kwargs change in future runs + monitor_object = mon) ### the Monitors object which is used to record variables + ``` + + A possible _simulation_function_ could be: + ```python + def some_simulation(pop, duration=1): + get_population(pop).a = 5 ### adjust paramter a of pop + get_population(pop).b = 5 ### adjust paramter b of pop + simulate(duration) ### simulate the duration in ms + + ### return some info + ### will later be accessible for each run + return {'paramter a': a, 'paramter b': b, 'a_x_duration': a*duration} + ``` + + And a corresponding requirement could be: + ```python + from CompNeuroPy import ReqPopHasAttr + req = {'req':ReqPopHasAttr, 'pop':pop1, 'attr':['a', 'b']} + ``` + Here, one checks if the population _pop1_ contains the attributes _a_ and _b_. The [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr) is a built-in requirements-class of CompNeuroPy (see below). + + A more detailed example is available in the [Examples](../examples/run_and_monitor_simulations.md). ## Simulation information The function _simulation_info()_ returns a [`SimInfo`](#CompNeuroPy.generate_simulation.SimInfo) object which contains usefull information about the simulation runs (see below). The [`SimInfo`](#CompNeuroPy.generate_simulation.SimInfo) object also provides usefull analysis functions associated with specific simulation functions. Currently it provides the _get_current_arr()_ which returns arrays containing the input current for each time step of the built-in simulation functions _current_step()_, _current_stim()_, and _current_ramp()_. @@ -40,37 +40,37 @@ The function _simulation_info()_ returns a [`SimInfo`](#CompNeuroPy.generate_sim ## Simulation functions Just define a classic ANNarchy simulation in a function. Within the functions, the ANNarchy functions _get_population()_ and _get_projection()_ can be used to access the populations and projections using the population and projection names provided by a [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel). The return value of the simulation function can later be retrieved from the [`SimInfo`](#CompNeuroPy.generate_simulation.SimInfo) object (the _info_ attribute) in a list containing the return value for each run of the simulation. -### Example: -```python -from ANNarchy import simulate, get_population - -def current_step(pop, t1=500, t2=500, a1=0, a2=100): - """ - stimulates a given population in two periods with two input currents +!!! example + ```python + from ANNarchy import simulate, get_population + + def current_step(pop, t1=500, t2=500, a1=0, a2=100): + """ + stimulates a given population in two periods with two input currents + + pop: population name of population, which should be stimulated with input current + neuron model of population has to contain "I_app" as input current in pA + t1/t2: times in ms before/after current step + a1/a2: current amplitudes before/after current step in pA + """ - pop: population name of population, which should be stimulated with input current - neuron model of population has to contain "I_app" as input current in pA - t1/t2: times in ms before/after current step - a1/a2: current amplitudes before/after current step in pA - """ - - ### save prev input current - I_prev = get_population(pop).I_app - - ### first/pre current step simulation - get_population(pop).I_app = a1 - simulate(t1) - - ### second/post current step simulation - get_population(pop).I_app = a2 - simulate(t2) - - ### reset input current to previous value - get_population(pop).I_app = I_prev - - ### return some additional information which could be usefull - return {'duration':t1+t2} -``` + ### save prev input current + I_prev = get_population(pop).I_app + + ### first/pre current step simulation + get_population(pop).I_app = a1 + simulate(t1) + + ### second/post current step simulation + get_population(pop).I_app = a2 + simulate(t2) + + ### reset input current to previous value + get_population(pop).I_app = I_prev + + ### return some additional information which could be usefull + return {'duration':t1+t2} + ``` ## Requirements In order to perform simulations with models, the models must almost always fulfill certain requirements. For example, if the input current of a population is to be set, this population (or the neuron model) must of course have the corresponding variable. Such preconditions can be tested in advance with the `simulation_requirements` classes. They only need to contain a function _run()_ to test the requirements (if requirements are not met, cause an error). In CompNeuroPy predefined [`simulation_requirements`](../additional/simulation_requirements.md) classes are available (CompNeuroPy.simulation_requirements; currently only [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr)). In the [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) class, the requirements are passed as arguments in a list (see above). Each requirement (list entry) must be defined as a dictionary with keys _req_ (the requirement class) and the arguments of the requirement class (e.g., _pop_ and _attr_ for the [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr)). diff --git a/docs/main/monitors_recordings.md b/docs/main/monitors_recordings.md index c5e1f66..98dd6fc 100644 --- a/docs/main/monitors_recordings.md +++ b/docs/main/monitors_recordings.md @@ -1,64 +1,64 @@ ## Create Monitors -CompNeuroPy provides a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) class that can be used to easily create and control multiple ANNarchy monitors at once. To create a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object, all that is needed is a monitors_dictionary that defines which variables should be recorded for each model component. All populations and projections have to have unique names to work with [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors). The keys of the monitor_dictionary are the names of the model components (in example below _"my_pop1"_ and _"my_pop2"_). The key can also include a recording period (the time between two recordings, given after a ";"), e.g. record the variables of _my_pop1_ only every 10 ms would look like this: _'pop;my_pop1;10':['v', 'spike']_. The default period is the time step of the simulation for populations and 1000 times the timestep for projections. The values of the monitor_dictionary are lists of all the variables that should be recorded from the corresponding components. The names of components (populations, projections) could be provided by a [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel). +CompNeuroPy provides a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) class that can be used to easily create and control multiple ANNarchy monitors at once. To create a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object, all that is needed is a monitors_dictionary that defines which variables should be recorded for each model component. All populations and projections have to have unique names to work with [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors). The keys of the monitor_dictionary are the names of the model components (in example below _"my_pop1"_ and _"my_pop2"_). The key can also include a recording period (the time between two recordings, given after a ";"), e.g. record the variables of _my_pop1_ only every 10 ms would look like this: _'my_pop1;10':['v', 'spike']_. The default period is the time step of the simulation for populations and 1000 times the timestep for projections. The values of the monitor_dictionary are lists of all the variables that should be recorded from the corresponding components. The names of components (populations, projections) could be provided by a [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel). -### Example: -Here the variables _v_ and _spike_ should be recorded of the population with the name _"my_pop1"_ and the variable _v_ should be recorded from the population with the name _"my_pop2"_: +!!! example + Here the variables _v_ and _spike_ should be recorded of the population with the name _"my_pop1"_ and the variable _v_ should be recorded from the population with the name _"my_pop2"_: -```python -from CompNeuroPy import CompNeuroMonitors -monitor_dictionary = {'my_pop1':['v', 'spike'], 'my_pop2':['v']} -mon = CompNeuroMonitors(monitor_dictionary) -``` + ```python + from CompNeuroPy import CompNeuroMonitors + monitor_dictionary = {'my_pop1':['v', 'spike'], 'my_pop2':['v']} + mon = CompNeuroMonitors(monitor_dictionary) + ``` -A full example is available in the [Examples](../examples/monitor_recordings.md). + A full example is available in the [Examples](../examples/monitor_recordings.md). ## Chunks and periods In CompNeuroPy, recordings are divided into so-called chunks and periods. Chunks are simulation sections that are separated by monitor resets (optionally also reset the model). A chunk can consist of several periods. A period represents the time span between the start and pause of a monitor recording. To divide a simulation into chunks and periods, a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object provides the three functions _start()_, _pause()_ and _reset()_. At the beginning of a simulation, the monitors do not start automatically which is why the _start()_ function must be called at least once. The _start()_ function can also be used to resume paused recordings. With the function _pause()_ recordings are paused. The function _reset()_ starts a new chunk for the recordings (the end of a chunk is also always the end of a period, i.e. the last period of the corresponding chunk). After calling _reset()_ the monitors remain in their current mode (active or paused). By default _reset()_ also resets the model to the compile status (time = 0) by calling the ANNarchy _reset()_ function and has the same arguments. If the argument _model_ is set to False, the ANNarchy _reset()_ function is not called and only a new chunk is created. -### Example: -```python -### first chunk, one period -simulate(100) # 100 ms not recorded -mon.start() # start all monitors -simulate(100) # 100 ms recorded +!!! example + ```python + ### first chunk, one period + simulate(100) # 100 ms not recorded + mon.start() # start all monitors + simulate(100) # 100 ms recorded -### second chunk, two periods -mon.reset() # model reset, beginning of new chunk -simulate(100) # 100 ms recorded (monitors were active before reset --> still active) -mon.pause() # pause all monitors -simulate(100) # 100 ms not recorded -mon.start() # start all monitors -simulate(100) # 100 ms recorded -``` + ### second chunk, two periods + mon.reset() # model reset, beginning of new chunk + simulate(100) # 100 ms recorded (monitors were active before reset --> still active) + mon.pause() # pause all monitors + simulate(100) # 100 ms not recorded + mon.start() # start all monitors + simulate(100) # 100 ms recorded + ``` ## Get recordings The recordings can be obtained from the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object using the _get_recordings()_ function. This returns a list of dictionaries (one for each chunk). The dictionaries contain the recorded data defined with the monitor_dictionary at the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) initialization. In the recordings dictionaries the keys have the following structure: ";variable"; the corresponding dictionary values are the recordings of the respective variable. The dictionaries always contain the time step of the simulation (key = _"dt"_), the periods (time between recorded values) for each component (key = _";period"_) and the attributes of each component (key = _";parameter_dict"_). -### Example: -```python -recordings = mon.get_recordings() -y1 = recordings[0]['my_pop1;v'] ### variable v of my_pop1 from 1st chunk -y2 = recordings[1]['my_pop1;v'] ### variable v of my_pop1 from 2nd chunk -``` +!!! example + ```python + recordings = mon.get_recordings() + y1 = recordings[0]['my_pop1;v'] ### variable v of my_pop1 from 1st chunk + y2 = recordings[1]['my_pop1;v'] ### variable v of my_pop1 from 2nd chunk + ``` ## Get recording times In addition to the recordings themselves, recording times can also be obtained from the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object, which is very useful for later analyses. With the function _get_recording_times()_ of the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object a [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object can be obtained. From the [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object one can get time limits (in ms) and coresponding indizes for the recordings. -### Example: -```python -recording_times = mon.get_recording_times() -start_time = recording_times.time_lims(chunk=1, period=1)[0] ### 200 ms -start_idx = recording_times.idx_lims(chunk=1, period=1)[0] ### 1000, if dt == 0.1 -end_time = recording_times.time_lims(chunk=1, period=1)[1] ### 300 ms -end_idx = recording_times.idx_lims(chunk=1, period=1)[1] ### 2000 -``` +!!! example + ```python + recording_times = mon.get_recording_times() + start_time = recording_times.time_lims(chunk=1, period=1)[0] ### 200 ms + start_idx = recording_times.idx_lims(chunk=1, period=1)[0] ### 1000, if dt == 0.1 + end_time = recording_times.time_lims(chunk=1, period=1)[1] ### 300 ms + end_idx = recording_times.idx_lims(chunk=1, period=1)[1] ### 2000 + ``` -You can combine the recordings of both chunks of the example simulation shown above into a single time array and a single value array using the [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object's combine_chunks function: -```python -time_arr, value_arr = recording_times.combine_chunks(recordings, 'my_pop1;v', 'consecutive') -``` + You can combine the recordings of both chunks of the example simulation shown above into a single time array and a single value array using the [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object's combine_chunks function: + ```python + time_arr, value_arr = recording_times.combine_chunks(recordings, 'my_pop1;v', 'consecutive') + ``` ## Plot recordings To get a quick overview of the recordings, CompNeuroPy provides the [`PlotRecordings`](../additional/analysis_functions.md#CompNeuroPy.analysis_functions.PlotRecordings) class. diff --git a/docs/main/optimize_neuron.md b/docs/main/optimize_neuron.md index 91ba519..bbb5ffd 100644 --- a/docs/main/optimize_neuron.md +++ b/docs/main/optimize_neuron.md @@ -23,20 +23,21 @@ Used optimization methods: * Singh, G. S., & Acerbi, L. (2023). PyBADS: Fast and robust black-box optimization in Python. arXiv preprint [arXiv:2306.15576](https://arxiv.org/abs/2306.15576). * Acerbi, L., & Ma, W. J. (2017). Practical Bayesian optimization for model fitting with Bayesian adaptive direct search. Advances in neural information processing systems, 30. [pdf](https://proceedings.neurips.cc/paper_files/paper/2017/file/df0aab058ce179e4f7ab135ed4e641a9-Paper.pdf) -### Example: -```python -opt = OptNeuron( - experiment=my_exp, - get_loss_function=get_loss, - variables_bounds=variables_bounds, - results_soll=experimental_data["results_soll"], - time_step=experimental_data["time_step"], - compile_folder_name="annarchy_opt_neuron_example", - neuron_model=my_neuron, - method="hyperopt", - record=["r"], -) -``` + +!!! example + ```python + opt = OptNeuron( + experiment=my_exp, + get_loss_function=get_loss, + variables_bounds=variables_bounds, + results_soll=experimental_data["results_soll"], + time_step=experimental_data["time_step"], + compile_folder_name="annarchy_opt_neuron_example", + neuron_model=my_neuron, + method="hyperopt", + record=["r"], + ) + ``` A full example is available in the [Examples](../examples/opt_neuron.md). @@ -60,77 +61,77 @@ You have to define a [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experimen - do not call the functions _store_model_state()_ and _reset_model_state()_ of the [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) class within the _run()_ function! -### Example: -```python -class my_exp(CompNeuroExp): - """ - Define an experiment by inheriting from CompNeuroExp. - - CompNeuroExp provides the attributes: - - monitors (CompNeuroMonitors): - a CompNeuroMonitors object to do recordings, define during init otherwise - None - data (dict): - a dictionary for storing any optional data - - and the functions: - reset(): - resets the model and monitors - results(): - returns a results object - """ - - def run(self, population_name): +!!! example + ```python + class my_exp(CompNeuroExp): """ - Do the simulations and recordings. - - To use the CompNeuroExp class, you need to define a run function which - does the simulations and recordings. The run function should return the - results object which can be obtained by calling self.results(). + Define an experiment by inheriting from CompNeuroExp. - For using the CompNeuroExp for OptNeuron, the run function should have - one argument which is the name of the population which is automatically created - by OptNeuron, containing a single neuron of the model which should be optimized. + CompNeuroExp provides the attributes: - Args: - population_name (str): - name of the population which contains a single neuron, this will be - automatically provided by opt_neuron + monitors (CompNeuroMonitors): + a CompNeuroMonitors object to do recordings, define during init otherwise + None + data (dict): + a dictionary for storing any optional data - Returns: - results (CompNeuroExp._ResultsCl): - results object with attributes: - recordings (list): - list of recordings - recording_times (recording_times_cl): - recording times object - mon_dict (dict): - dict of recorded variables of the monitors - data (dict): - dict with optional data stored during the experiment + and the functions: + reset(): + resets the model and monitors + results(): + returns a results object """ - ### you have to start monitors within the run function, otherwise nothing will - ### be recorded - self.monitors.start() - - ### run the simulation, if you reset the monitors/model the model_state argument - ### has to be True (Default) - ... - simulate(100) - self.reset() - ... - - ### optional: store anything you want in the data dict. For example infomration - ### about the simulations. This is not used for the optimization but can be - ### retrieved after the optimization is finished - self.data["sim"] = sim_step.simulation_info() - self.data["population_name"] = population_name - self.data["time_step"] = dt() - - ### return results, use the object's self.results() - return self.results() -``` + + def run(self, population_name): + """ + Do the simulations and recordings. + + To use the CompNeuroExp class, you need to define a run function which + does the simulations and recordings. The run function should return the + results object which can be obtained by calling self.results(). + + For using the CompNeuroExp for OptNeuron, the run function should have + one argument which is the name of the population which is automatically created + by OptNeuron, containing a single neuron of the model which should be optimized. + + Args: + population_name (str): + name of the population which contains a single neuron, this will be + automatically provided by opt_neuron + + Returns: + results (CompNeuroExp._ResultsCl): + results object with attributes: + recordings (list): + list of recordings + recording_times (recording_times_cl): + recording times object + mon_dict (dict): + dict of recorded variables of the monitors + data (dict): + dict with optional data stored during the experiment + """ + ### you have to start monitors within the run function, otherwise nothing will + ### be recorded + self.monitors.start() + + ### run the simulation, if you reset the monitors/model the model_state argument + ### has to be True (Default) + ... + simulate(100) + self.reset() + ... + + ### optional: store anything you want in the data dict. For example infomration + ### about the simulations. This is not used for the optimization but can be + ### retrieved after the optimization is finished + self.data["sim"] = sim_step.simulation_info() + self.data["population_name"] = population_name + self.data["time_step"] = dt() + + ### return results, use the object's self.results() + return self.results() + ``` ## The get_loss_function The _get_loss_function_ must have two arguments. When this function is called during optimization, the first argument is always the _results_ object returned by the _experiment_, i.e. the results of the neuron you want to optimize. The second argument depends on whether you have specified _results_soll_, i.e. data to be reproduced by the _neuron_model_, or whether you have specified a _target_neuron_model_ whose results are to be reproduced by the _neuron_model_. Thus, the second argument is either _results_soll_ provided to the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class during initialization or another _results_ object (returned by the [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) _run_ function), generated with the _target_neuron_model_. @@ -138,45 +139,45 @@ The _get_loss_function_ must have two arguments. When this function is called du !!! warning You always have to work with the neuron rank 0 within the _get_loss_function_! -### Example: -In this example we assume, that _results_soll_ was provided during initialization of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class (no _target_neuron_model_ used). -```python -def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): - """ - Function which has to have the arguments results_ist and results_soll and should - calculates and return the loss. This structure is needed for the OptNeuron class. - - Args: - results_ist (object): - the results object returned by the run function of experiment (see above) - results_soll (any): - the target data directly provided to OptNeuron during initialization - - Returns: - loss (float or list of floats): - the loss - """ - ### get the recordings and other important things for calculating the loss from - ### results_ist, we do not use all available information here, but you could - rec_ist = results_ist.recordings - pop_ist = results_ist.data["population_name"] - neuron = 0 - - ### get the data for calculating the loss from the results_soll - r_target_0 = results_soll[0] - r_target_1 = results_soll[1] - - ### get the data for calculating the loss from the recordings - r_ist_0 = rec_ist[0][f"{pop_ist};r"][:, neuron] - r_ist_1 = rec_ist[1][f"{pop_ist};r"][:, neuron] - - ### calculate the loss, e.g. the root mean squared error - rmse1 = rmse(r_target_0, r_ist_0) - rmse2 = rmse(r_target_1, r_ist_1) - - ### return the loss, one can return a singel value or a list of values which will - ### be summed during the optimization - return [rmse1, rmse2] -``` +!!! example + In this example we assume, that _results_soll_ was provided during initialization of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class (no _target_neuron_model_ used). + ```python + def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): + """ + Function which has to have the arguments results_ist and results_soll and should + calculates and return the loss. This structure is needed for the OptNeuron class. + + Args: + results_ist (object): + the results object returned by the run function of experiment (see above) + results_soll (any): + the target data directly provided to OptNeuron during initialization + + Returns: + loss (float or list of floats): + the loss + """ + ### get the recordings and other important things for calculating the loss from + ### results_ist, we do not use all available information here, but you could + rec_ist = results_ist.recordings + pop_ist = results_ist.data["population_name"] + neuron = 0 + + ### get the data for calculating the loss from the results_soll + r_target_0 = results_soll[0] + r_target_1 = results_soll[1] + + ### get the data for calculating the loss from the recordings + r_ist_0 = rec_ist[0][f"{pop_ist};r"][:, neuron] + r_ist_1 = rec_ist[1][f"{pop_ist};r"][:, neuron] + + ### calculate the loss, e.g. the root mean squared error + rmse1 = rmse(r_target_0, r_ist_0) + rmse2 = rmse(r_target_1, r_ist_1) + + ### return the loss, one can return a singel value or a list of values which will + ### be summed during the optimization + return [rmse1, rmse2] + ``` ::: CompNeuroPy.opt_neuron.OptNeuron \ No newline at end of file diff --git a/src/CompNeuroPy/__init__.py b/src/CompNeuroPy/__init__.py index fa8a1bd..d704cac 100644 --- a/src/CompNeuroPy/__init__.py +++ b/src/CompNeuroPy/__init__.py @@ -6,11 +6,9 @@ get_population_power_spectrum, get_power_spektrum_from_time_array, get_pop_rate, - plot_recordings, get_number_of_zero_decimals, get_number_of_decimals, sample_data_with_timestep, - time_data_add_nan, rmse, rsse, get_minimum, @@ -30,12 +28,7 @@ evaluate_expression_with_dict, VClampParamSearch, DeapCma, - interactive_plot, # TODO remove InteractivePlot, - data_obj, # TODO remove - my_linear_cmap_obj, # TODO remove - decision_tree, # TODO remove - node_cl, # TODO remove efel_loss, RNG, find_x_bound, @@ -71,10 +64,10 @@ from CompNeuroPy.statistic_functions import anova_between_groups ### classes -from CompNeuroPy.monitors import Monitors, CompNeuroMonitors -from CompNeuroPy.experiment import Experiment, CompNeuroExp -from CompNeuroPy.generate_model import generate_model, CompNeuroModel -from CompNeuroPy.generate_simulation import generate_simulation, CompNeuroSim +from CompNeuroPy.monitors import CompNeuroMonitors +from CompNeuroPy.experiment import CompNeuroExp +from CompNeuroPy.generate_model import CompNeuroModel +from CompNeuroPy.generate_simulation import CompNeuroSim from CompNeuroPy.dbs import DBSstimulator ### modules diff --git a/src/CompNeuroPy/analysis_functions.py b/src/CompNeuroPy/analysis_functions.py index ba76ea4..9741955 100644 --- a/src/CompNeuroPy/analysis_functions.py +++ b/src/CompNeuroPy/analysis_functions.py @@ -1,7 +1,6 @@ import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator -import matplotlib from ANNarchy import raster_plot, dt, inter_spike_interval, coefficient_of_variation import warnings from CompNeuroPy import system_functions as sf @@ -9,7 +8,6 @@ from CompNeuroPy.monitors import RecordingTimes from CompNeuroPy.experiment import CompNeuroExp from scipy.interpolate import interp1d -from multiprocessing import Process from typingchecker import check_types @@ -724,788 +722,6 @@ def get_pop_rate( return (np.arange(t_start, t_start + duration, dt), ret) -@check_types() -def plot_recordings( - figname: str, - recordings: list, - recording_times: RecordingTimes, - chunk: int, - shape: tuple, - plan: list[str], - time_lim: None | tuple = None, - dpi: int = 300, -): - """ - Plots the recordings of a single chunk from recordings. Plotted variables are - specified in plan. - - Args: - figname (str): - path + name of figure (e.g. "figures/my_figure.png") - recordings (list): - a recordings list from CompNeuroPy obtained with the function - get_recordings() from a CompNeuroMonitors object. - recording_times (object): - recording_times object from CompNeuroPy obtained with the - function get_recording_times() from a CompNeuroMonitors object. - chunk (int): - which chunk of recordings should be used (the index of chunk) - shape (tuple): - Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns - plan (list of strings): - Defines which recordings are plotted in which subplot and how. - Entries of the list have the structure: - "subplot_nr;model_component_name;variable_to_plot;format", - e.g. "1,my_pop1;v;line". - mode: defines how the data is plotted, available modes: - - for spike data: raster, mean, hybrid - - for other data: line, mean, matrix - - only for projection data: matrix_mean - time_lim (tuple, optional): - Defines the x-axis for all subplots. The list contains two - numbers: start and end time in ms. The times have to be - within the chunk. Default: None, i.e., time lims of chunk - dpi (int, optional): - The dpi of the saved figure. Default: 300 - """ - proc = Process( - target=_plot_recordings, - args=(figname, recordings, recording_times, chunk, shape, plan, time_lim, dpi), - ) - proc.start() - proc.join() - if proc.exitcode != 0: - quit() - - -def _plot_recordings( - figname: str, - recordings: list, - recording_times: RecordingTimes, - chunk: int, - shape: tuple, - plan: list[str], - time_lim: None | tuple, - dpi: int, -): - """ - Plots the recordings for the given recording_times specified in plan. - - Args: - figname (str): - path + name of figure (e.g. "figures/my_figure.png") - recordings (list): - a recordings list from CompNeuroPy obtained with the function - get_recordings() from a CompNeuroMonitors object. - recording_times (object): - recording_times object from CompNeuroPy obtained with the - function get_recording_times() from a CompNeuroMonitors object. - chunk (int): - which chunk of recordings should be used (the index of chunk) - shape (tuple): - Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns - plan (list of strings): - Defines which recordings are plotted in which subplot and how. - Entries of the list have the structure: - "subplot_nr;model_component_name;variable_to_plot;format", - e.g. "1,my_pop1;v;line". - mode: defines how the data is plotted, available modes: - - for spike data: raster, mean, hybrid - - for other data: line, mean, matrix - - only for projection data: matrix_mean - time_lim (tuple): - Defines the x-axis for all subplots. The list contains two - numbers: start and end time in ms. The times have to be - within the chunk. - dpi (int): - The dpi of the saved figure. - """ - print(f"generate fig {figname}", end="... ", flush=True) - recordings = recordings[chunk] - if isinstance(time_lim, type(None)): - time_lim = recording_times.time_lims(chunk=chunk) - start_time = time_lim[0] - end_time = time_lim[1] - compartment_list = [] - for plan_str in plan: - compartment = plan_str.split(";")[1] - if not (compartment in compartment_list): - compartment_list.append(compartment) - - ### get idx_lim for all compartments, in parallel check that there are no pauses - time_arr_dict = {} - time_step = recordings["dt"] - for compartment in compartment_list: - actual_period = recordings[f"{compartment};period"] - - time_arr_part = [] - - ### loop over periods - nr_periods = recording_times._get_nr_periods( - chunk=chunk, compartment=compartment - ) - for period in range(nr_periods): - ### get the time_lim and idx_lim of the period - time_lims = recording_times.time_lims( - chunk=chunk, compartment=compartment, period=period - ) - time_arr_part.append( - np.arange(time_lims[0], time_lims[1] + actual_period, actual_period) - ) - - time_arr_dict[compartment] = np.concatenate(time_arr_part) - - plt.figure(figsize=([6.4 * shape[1], 4.8 * shape[0]])) - for subplot in plan: - try: - nr, part, variable, mode = subplot.split(";") - nr = int(nr) - style = "" - except: - try: - nr, part, variable, mode, style = subplot.split(";") - nr = int(nr) - except: - print( - '\nERROR plot_recordings: for each subplot give plan-string as: "nr;part;variable;mode" or "nr;part;variable;mode;style" if style is available!\nWrong string: ' - + subplot - + "\n" - ) - quit() - try: - ### check if variable is equation - variable_is_equation = ( - "+" in variable or "-" in variable or "*" in variable or "/" in variable - ) - if variable_is_equation: - ### evalueate the equation - value_dict = {} - for rec_key, rec_val in recordings.items(): - if rec_key is f"{part};parameter_dict": - continue - if ";" in rec_key: - rec_var_name = rec_key.split(";")[1] - else: - rec_var_name = rec_key - value_dict[rec_var_name] = rec_val - for param_key, param_val in recordings[ - f"{part};parameter_dict" - ].items(): - value_dict[param_key] = param_val - ### evaluate - evaluated_variable = ef.evaluate_expression_with_dict( - expression=variable, value_dict=value_dict - ) - ### add the evaluated variable to the recordings - recordings[f"{part};{variable}"] = evaluated_variable - - ### set data - data = recordings[f"{part};{variable}"] - except: - print( - "\nWARNING plot_recordings: data", - ";".join([part, variable]), - "not in recordings\n", - ) - plt.subplot(shape[0], shape[1], nr) - plt.text( - 0.5, - 0.5, - " ".join([part, variable]) + " not available", - va="center", - ha="center", - ) - continue - - plt.subplot(shape[0], shape[1], nr) - if (variable == "spike" or variable == "axon_spike") and ( - mode == "raster" or mode == "single" - ): # "single" only for down compatibility - nr_neurons = len(list(data.keys())) - t, n = my_raster_plot(data) - t = t * time_step # convert time steps into ms - mask = ((t >= start_time).astype(int) * (t <= end_time).astype(int)).astype( - bool - ) - if mask.size == 0: - plt.title("Spikes " + part) - print( - "\nWARNING plot_recordings: data", - ";".join([part, variable]), - "does not contain any spikes in the given time interval.\n", - ) - plt.text( - 0.5, - 0.5, - " ".join([part, variable]) + " does not contain any spikes.", - va="center", - ha="center", - ) - else: - if np.unique(n).size == 1: - marker, size = ["|", 3000] - else: - marker, size = [".", 3] - if style != "": - color = style - else: - color = "k" - - plt.scatter( - t[mask], n[mask], color=color, marker=marker, s=size, linewidth=0.1 - ) - plt.xlim(start_time, end_time) - plt.ylim(0 - 0.5, nr_neurons - 0.5) - plt.xlabel("time [ms]") - plt.ylabel("# neurons") - plt.title("Spikes " + part) - elif (variable == "spike" or variable == "axon_spike") and mode == "mean": - time_arr, firing_rate = get_pop_rate( - spikes=data, - t_start=start_time, - t_end=end_time, - time_step=time_step, - ) - plt.plot(time_arr, firing_rate, color="k") - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - plt.ylabel("Mean firing rate [Hz]") - plt.title("Mean firing rate " + part) - elif (variable == "spike" or variable == "axon_spike") and mode == "hybrid": - nr_neurons = len(list(data.keys())) - t, n = my_raster_plot(data) - t = t * time_step # convert time steps into ms - mask = ((t >= start_time).astype(int) * (t <= end_time).astype(int)).astype( - bool - ) - if mask.size == 0: - plt.title("Spikes " + part) - print( - "\nWARNING plot_recordings: data", - ";".join([part, variable]), - "does not contain any spikes in the given time interval.\n", - ) - plt.text( - 0.5, - 0.5, - " ".join([part, variable]) + " does not contain any spikes.", - va="center", - ha="center", - ) - else: - if np.unique(n).size == 1: - marker, size = ["|", np.sqrt(3000)] - else: - marker, size = [".", np.sqrt(3)] - - plt.plot( - t[mask], n[mask], f"k{marker}", markersize=size, markeredgewidth=0.1 - ) - plt.ylabel("# neurons") - plt.ylim(0 - 0.5, nr_neurons - 0.5) - ax = plt.gca().twinx() - time_arr, firing_rate = get_pop_rate( - spikes=data, - t_start=start_time, - t_end=end_time, - time_step=time_step, - ) - ax.plot( - time_arr, - firing_rate, - color="r", - ) - plt.ylabel("Mean firing rate [Hz]", color="r") - ax.tick_params(axis="y", colors="r") - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - plt.title("Activity " + part) - elif (variable != "spike" and variable != "axon_spike") and mode == "line": - if len(data.shape) == 1: - plt.plot(time_arr_dict[part], data, color="k") - plt.title(f"Variable {variable} of {part} (1)") - elif len(data.shape) == 2 and isinstance(data[0, 0], list) is not True: - ### population: data[time,neurons] - for neuron in range(data.shape[1]): - # in case of gaps file time gaps and add nan to data TODO also for other plots - plot_x, plot_y = time_data_add_nan( - time_arr_dict[part], data[:, neuron] - ) - - plt.plot( - plot_x, - plot_y, - color="k", - ) - plt.title(f"Variable {variable} of {part} ({data.shape[1]})") - elif len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - if len(data.shape) == 3: - ### projection data: data[time, postneurons, preneurons] - for post_neuron in range(data.shape[1]): - for pre_neuron in range(data.shape[2]): - plt.plot( - time_arr_dict[part], - data[:, post_neuron, pre_neuron], - color="k", - ) - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - for post_neuron in range(data.shape[1]): - for pre_neuron in range(len(data[0, post_neuron])): - plt.plot( - time_arr_dict[part], - np.array( - [ - data[t, post_neuron][pre_neuron] - for t in range(data.shape[0]) - ] - ), - color="k", - ) - - plt.title(f"Variable {variable} of {part} ({data.shape[1]})") - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - elif (variable != "spike" and variable != "axon_spike") and mode == "mean": - if len(data.shape) == 1: - plt.plot(time_arr_dict[part], data, color="k") - plt.title(f"Variable {variable} of {part} (1)") - elif len(data.shape) == 2 and isinstance(data[0, 0], list) is not True: - ### population: data[time,neurons] - nr_neurons = data.shape[1] - data = np.mean(data, 1) - plt.plot( - time_arr_dict[part], - data[:], - color="k", - ) - plt.title(f"Variable {variable} of {part} ({nr_neurons}, mean)") - elif len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - if len(data.shape) == 3: - ### projection data: data[time, postneurons, preneurons] - for post_neuron in range(data.shape[1]): - plt.plot( - time_arr_dict[part], - np.mean(data[:, post_neuron, :], 1), - color="k", - ) - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - for post_neuron in range(data.shape[1]): - avg_data = [] - for pre_neuron in range(len(data[0, post_neuron])): - avg_data.append( - np.array( - [ - data[t, post_neuron][pre_neuron] - for t in range(data.shape[0]) - ] - ) - ) - plt.plot( - time_arr_dict[part], - np.mean(avg_data, 0), - color="k", - ) - - plt.title( - f"Variable {variable} of {part}, mean for {data.shape[1]} post neurons" - ) - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - - elif ( - variable != "spike" and variable != "axon_spike" - ) and mode == "matrix_mean": - if len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - if len(data.shape) == 3: - ### average over pre neurons --> get 2D array [time, postneuron] - data_avg = np.mean(data, 2) - - ### after cerating 2D array --> same procedure as for populations - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - time_arr = time_arr_dict[part][mask] - data_arr = data_avg[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = ( - np.ceil(start_time / actual_period) * actual_period - ) - actual_end_time = ( - np.ceil(end_time / actual_period - 1) * actual_period - ) - soll_times = np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {variable} of {part} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - ### average over pre neurons --> get 2D array [time, postneuron] - data_avg = np.empty((data.shape[0], data.shape[1])) - for post_neuron in range(data.shape[1]): - avg_post = [] - for pre_neuron in range(len(data[0, post_neuron])): - avg_post.append( - np.array( - [ - data[t, post_neuron][pre_neuron] - for t in range(data.shape[0]) - ] - ) - ) - data_avg[:, post_neuron] = np.mean(avg_post, 0) - - ### after cerating 2D array --> same procedure as for populations - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - time_arr = time_arr_dict[part][mask] - data_arr = data_avg[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = ( - np.ceil(start_time / actual_period) * actual_period - ) - actual_end_time = ( - np.ceil(end_time / actual_period - 1) * actual_period - ) - soll_times = np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {variable} of {part} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - - plt.title( - f"Variable {variable} of {part}, mean for {data.shape[1]} post neurons [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - - elif (variable != "spike" and variable != "axon_spike") and mode == "matrix": - # data[start_step:end_step,neuron] - if len(data.shape) == 2 and isinstance(data[0, 0], list) is not True: - ### data from population [times,neurons] - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - - time_decimals = get_number_of_decimals(time_step) - - time_arr = np.round(time_arr_dict[part][mask], time_decimals) - data_arr = data[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = np.ceil(start_time / actual_period) * actual_period - actual_end_time = np.ceil(end_time / actual_period - 1) * actual_period - soll_times = np.round( - np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ), - time_decimals, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {part} {variable} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - elif len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - ### data from projection - if len(data.shape) == 3: - ### projection data: data[time, postneurons, preneurons] - ### create a 2D array from the 3D array - data_resh = data.reshape( - (data.shape[0], int(data.shape[1] * data.shape[2])) - ) - data_split = np.split(data_resh, data.shape[1], axis=1) - ### separate the post_neurons by nan vectors - data_with_none = np.concatenate( - [ - np.concatenate( - [ - data_split[idx], - np.zeros((data.shape[0], 1)) * np.nan, - ], - axis=1, - ) - for idx in range(len(data_split)) - ], - axis=1, - )[:, :-1] - - ### after cerating 2D array --> same procedure as for populations - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - time_arr = time_arr_dict[part][mask] - data_arr = data_with_none[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = ( - np.ceil(start_time / actual_period) * actual_period - ) - actual_end_time = ( - np.ceil(end_time / actual_period - 1) * actual_period - ) - soll_times = np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {variable} of {part} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - pass - - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - quit() - else: - print( - "\nERROR plot_recordings: mode", - mode, - "not available for variable", - variable, - "\n", - ) - quit() - plt.tight_layout() - - ### save plot - figname_parts = figname.split("/") - if len(figname_parts) > 1: - save_dir = "/".join(figname_parts[:-1]) - sf.create_dir(save_dir) - plt.savefig(figname, dpi=dpi) - plt.close() - print("Done") - - def get_number_of_zero_decimals(nr): """ For numbers which are smaller than zero get the number of digits after the decimal @@ -1600,43 +816,6 @@ def sample_data_with_timestep(time_arr, data_arr, timestep): return (new_time_arr, new_data_arr) -def time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0): - """ - If there are gaps in time_arr --> fill them with respective time values. - Fill the corresponding data_arr values with nan. - - By default it is tried to fill the time array with continuously increasing times - based on the smallest time difference found there can still be discontinuities after - filling the arrays (because existing time values are not changed). - - But one can also give a fixed fill time step. - - Args: - time_arr (1D array): - times of data_arr in ms - data_arr (nD array): - the size of the specified dimension of data array must have the same length - as time_arr - fill_time_step (number, optional, default=None): - if there are gaps they are filled with this time step - axis (int): - which dimension of the data_arr belongs to the time_arr - - Returns: - time_arr (1D array): - time array with gaps filled - data_arr (nD array): - data array with gaps filled - """ - return time_data_fill_gaps( - time_arr, - data_arr, - fill_time_step=fill_time_step, - axis=axis, - fill="nan", - ) - - def time_data_fill_gaps( time_arr, data_arr, fill_time_step=None, axis=0, fill: str | float = "nan" ): @@ -1820,10 +999,9 @@ def get_maximum(input_data: list | np.ndarray | tuple | float): class PlotRecordings: """ Plot recordings from CompNeuroMonitors. - - TODO: CHeck if there are memory issues with large recordings or many subplots. """ + ### TODO: Check if there are memory issues with large recordings or many subplots. @check_types() def __init__( self, @@ -2453,7 +1631,7 @@ def _fill_subplot_other(self, plot_idx): ).astype(bool) ### fill gaps in time_arr and data_arr with nan - time_arr, data_arr = time_data_add_nan( + time_arr, data_arr = time_data_fill_gaps( time_arr=time_arr[mask], data_arr=data_arr[mask], axis=0 ) diff --git a/src/CompNeuroPy/dbs.py b/src/CompNeuroPy/dbs.py index e0643d8..0aed84a 100644 --- a/src/CompNeuroPy/dbs.py +++ b/src/CompNeuroPy/dbs.py @@ -14,7 +14,7 @@ from ANNarchy.core import ConnectorMethods import numpy as np from CompNeuroPy import model_functions as mf -from CompNeuroPy.generate_model import generate_model +from CompNeuroPy.generate_model import CompNeuroModel from typingchecker import check_types import inspect @@ -883,7 +883,7 @@ class _CreateDBSmodelcnp(_CreateDBSmodel): def __init__( self, - model: generate_model, + model: CompNeuroModel, stimulated_population: Population, excluded_populations_list: list[Population], passing_fibres_list: list[Projection], @@ -893,7 +893,7 @@ def __init__( Prepare model for DBS stimulation. Args: - model (generate_model): + model (CompNeuroModel): CompNeuroPy model stimulated_population (Population): Population which is stimulated by DBS @@ -921,7 +921,7 @@ def recreate_model(self): parent class as model_creation_function. The new model can be accessed via the model attribute. """ - self.model = generate_model( + self.model = CompNeuroModel( model_creation_function=super().recreate_model, name=f"{self.model.name}_dbs", description=f"{self.model.description}\nWith DBS mechanisms implemented.", @@ -1016,7 +1016,7 @@ def __init__( axon_rate_amp: float | dict[Population | str, float] = 1.0, seed: int | None = None, auto_implement: bool = False, - model: generate_model | None = None, + model: CompNeuroModel | None = None, ) -> None: """ Initialize DBS stimulator. diff --git a/src/CompNeuroPy/experiment.py b/src/CompNeuroPy/experiment.py index 39ac3b7..952c0ce 100644 --- a/src/CompNeuroPy/experiment.py +++ b/src/CompNeuroPy/experiment.py @@ -220,7 +220,3 @@ def run(self) -> _ResultsCl: the experiment by calling the results function of the CompNeuroExp class. """ ) - - -### old name for backward compatibility, TODO remove -Experiment = CompNeuroExp diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index c5bcdbd..269b8b4 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -57,7 +57,7 @@ def flatten_list(lst): Retuns flattened list Args: - lst (list of lists or mixed: values and lists): + lst (list of lists or mixed values and lists): List to be flattened Returns: @@ -223,10 +223,6 @@ def __getattribute__(self, __name: str): return super().__getattribute__(__name) -### keep old name for compatibility -data_obj = _DataCl - - def create_cm(colors, name="my_cmap", N=256, gamma=1.0, vmin=0, vmax=1): """ Create a `LinearSegmentedColormap` from a list of colors. @@ -370,10 +366,6 @@ def __call__(self, X, alpha=None, bytes=False): return super().__call__(X, alpha, bytes) -### keep old name for compatibility -my_linear_cmap_obj = _LinearColormapClass - - class DecisionTree: """ Class to create a decision tree. @@ -472,10 +464,6 @@ def _get_path_prod_rec(self, node): return [path_str + "/" + node.name, prob * node.prob] -### keep old name for compatibility -decision_tree = DecisionTree - - class DecisionTreeNode: """ Class to create a node in a decision tree. @@ -539,10 +527,6 @@ def get_path_prod(self): return self.tree._get_path_prod_rec(self) -### keep old name for compatibility -node_cl = DecisionTreeNode - - def evaluate_expression_with_dict(expression, value_dict): """ Evaluate a mathematical expression using values from a dictionary. @@ -2222,9 +2206,6 @@ def _recreate_plot(self): self.ani.event_source.start() -interactive_plot = InteractivePlot - - def efel_loss(trace1, trace2, feature_list): """ Calculate the loss between two traces using the features from the feature_list. diff --git a/src/CompNeuroPy/full_models/bgm_22/bgm.py b/src/CompNeuroPy/full_models/bgm_22/bgm.py index 6d260ce..d798ad1 100644 --- a/src/CompNeuroPy/full_models/bgm_22/bgm.py +++ b/src/CompNeuroPy/full_models/bgm_22/bgm.py @@ -62,8 +62,8 @@ def __init__( """ Args: name (str, optional): - name of the model, syntax: "BGM_v_p" - replace and with the versions you + name of the model, syntax: "BGM_v*model_version*_p*parameters_version*" + replace *model_version* and *parameters_version* with the versions you want to use, see CompNeuroPy.full_models.BGM_22.parameters for available versions. Default: "BGM_v01_p01" do_create (bool, optional): @@ -72,7 +72,7 @@ def __init__( if True, the model is compiled after creation. Default: True compile_folder_name (str, optional): name of the folder in which the compiled model is saved. Default: None, - i.e. "annarchy_BGM_v" is used + i.e. "annarchy_BGM_v*model_version*" is used seed (int, optional): the seed for the random number generator used during model creation. Default: None, i.e. random seed is used @@ -89,7 +89,7 @@ def __init__( and name.split("_")[2][0] == "p" ): raise ValueError( - "name has to be of the form 'BGM_v_p'" + "name has to be of the form 'BGM_v*model_version*_p*parameters_version*'" ) ### set attributes (except the ones which are set in the super().__init__()) diff --git a/src/CompNeuroPy/generate_model.py b/src/CompNeuroPy/generate_model.py index 21bc097..3bf969e 100644 --- a/src/CompNeuroPy/generate_model.py +++ b/src/CompNeuroPy/generate_model.py @@ -410,7 +410,3 @@ def _get_attribute_df(self): ### return dataframe return pd.DataFrame(attribute_dict) - - -### old name for compatibility, TODO: remove -generate_model = CompNeuroModel diff --git a/src/CompNeuroPy/generate_simulation.py b/src/CompNeuroPy/generate_simulation.py index 3b845d5..a420312 100644 --- a/src/CompNeuroPy/generate_simulation.py +++ b/src/CompNeuroPy/generate_simulation.py @@ -249,10 +249,6 @@ def simulation_info(self): return simulation_info_obj -### old name for backward compatibility, TODO: remove -generate_simulation = CompNeuroSim - - class SimInfo: """ Class for storing the simulation information. diff --git a/src/CompNeuroPy/monitors.py b/src/CompNeuroPy/monitors.py index 75f09de..d28504c 100644 --- a/src/CompNeuroPy/monitors.py +++ b/src/CompNeuroPy/monitors.py @@ -618,10 +618,6 @@ def _unpack_mon_dict_keys(self, s: str, warning: bool = False): return compartment_type, compartment_name, period -### old name for backwards compatibility, TODO: remove in future -Monitors = CompNeuroMonitors - - class RecordingTimes: def __init__(self, recording_times_list): """ @@ -774,7 +770,7 @@ def combine_periods( ### fill gaps with nan or interpolate if fill == "nan": - time_arr, data_arr = af.time_data_add_nan( + time_arr, data_arr = af.time_data_fill_gaps( time_arr, data_arr, fill_time_step=period_time, @@ -805,7 +801,7 @@ def combine_chunks( considered and filled with nan values. !!! warning - If you use mode="consecutive": Missing recordings at the end of chunks + If you use mode="consecutive": Missing recordings AT THE END OF chunks (simulated but not recorded) are not considered, this leads to times which differ from the original simulation times (these time periods without recording are simply ignored)! @@ -897,7 +893,7 @@ def combine_chunks( ### check if there are gaps in the time array ### fill them with the corersponding times and ### the data array with nan values - time_arr, data_arr = af.time_data_add_nan( + time_arr, data_arr = af.time_data_fill_gaps( time_arr, data_arr, fill_time_step=period_time, From 6eb8d18ce948ec176bde8be8ece119c91a827300 Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 17 Jun 2024 10:45:09 +0200 Subject: [PATCH 39/39] updated version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4cbae30..9a389af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "CompNeuroPy" -version = "1.0.1" +version = "1.0.2" description = 'General package for computational neuroscience with ANNarchy.' readme = "README.md" requires-python = ">=3.10"