From d5843a9f76f4391c396a11a739cbd06ae706cf41 Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 19 Jun 2024 16:53:11 +0200 Subject: [PATCH 01/21] model_configurator: fixed error: needed to round the incoming spikes which are calculated by approximating a binomial distribution with a normal distribution (you have to round the normal distribution values!) --- .../model_configurator_cnp.py | 341 ++++++++++++------ .../model_configurator_user.py | 90 ++--- .../model_configurator/reduce_model.py | 3 +- .../examples/model_configurator/test.py | 20 +- .../examples/model_configurator/test2.py | 42 ++- 5 files changed, 311 insertions(+), 185 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 05f67fa..c4abd90 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -6,6 +6,8 @@ from CompNeuroPy import system_functions as sf from CompNeuroPy import analysis_functions as af +from CompNeuroPy.examples.model_configurator.reduce_model import _CreateReducedModel + from ANNarchy import ( Population, Projection, @@ -1414,15 +1416,69 @@ def __init__( ### analyze the given model, create model before analyzing, then clear ANNarchy self._analyze_model = AnalyzeModel(model=self._model) ### create the CompNeuroModel object for the reduced model (the model itself is - ### not created yet) - self._model_reduced = CreateReducedModel( - model=self._model, - analyze_model=self._analyze_model, - reduced_size=100, - do_create=False, - do_compile=False, - verbose=True, + ### not created yet) TODO: tmp change to old _CreateReducedModel, does this work for thal? + # self._model_reduced = CreateReducedModel( + # model=self._model, + # analyze_model=self._analyze_model, + # reduced_size=100, + # do_create=False, + # do_compile=False, + # verbose=True, + # ) + self._model_reduced = _CreateReducedModel( + model=model, reduced_size=100, do_create=False, do_compile=False ) + + # ### TODO: tmp compare the reduced model with the normal model, normal model part + # mf.cnp_clear(functions=False, constants=False) + # self._model.create() + # mon = CompNeuroMonitors( + # mon_dict={pop_name: ["spike"] for pop_name in self._model.populations} + # ) + # mon.start() + # simulate(5000) + # recordings = mon.get_recordings() + # recording_times = mon.get_recording_times() + # af.PlotRecordings( + # figname="tmp_compare_normal.png", + # recordings=recordings, + # recording_times=recording_times, + # shape=(len(self._model.populations), 1), + # plan={ + # "position": list(range(1, len(self._model.populations) + 1)), + # "compartment": self._model.populations, + # "variable": ["spike"] * len(self._model.populations), + # "format": ["hybrid"] * len(self._model.populations), + # }, + # ) + # ### TODO: tmp compare the reduced model with the normal model, reduced model part + # mf.cnp_clear(functions=False, constants=False) + # self._model_reduced.model_reduced.create() + # mon = CompNeuroMonitors( + # mon_dict={ + # f"{pop_name}_reduced": ["spike"] for pop_name in self._model.populations + # } + # ) + # mon.start() + # simulate(5000) + # recordings = mon.get_recordings() + # recording_times = mon.get_recording_times() + # af.PlotRecordings( + # figname="tmp_compare_reduced.png", + # recordings=recordings, + # recording_times=recording_times, + # shape=(len(self._model.populations), 1), + # plan={ + # "position": list(range(1, len(self._model.populations) + 1)), + # "compartment": [ + # f"{pop_name}_reduced" for pop_name in self._model.populations + # ], + # "variable": ["spike"] * len(self._model.populations), + # "format": ["hybrid"] * len(self._model.populations), + # }, + # ) + # quit() + ### try to load the cached variables if clear_cache: sf.clear_dir(".model_config_cache") @@ -1543,7 +1599,7 @@ def _check_if_not_config_pops_have_correct_rates(self): rates. """ ### initialize the normal model + compile the model - self._init_model_with_fitted_base(base_dict=self._base_dict) + self._init_model_with_fitted_base() ### record spikes of the do_not_config populations mon = CompNeuroMonitors( @@ -1630,7 +1686,8 @@ def _init_model_with_fitted_base(self, base_dict: dict[str, float] | None = None ### clear ANNarchy and create the normal model mf.cnp_clear(functions=False, constants=False) self._model.create(do_compile=False) - ### set the initial variables of the neurons + ### set the initial variables of the neurons #TODO small problem = init sampler + ### initializes the neurons in resting-state, but here they get an input current for pop_name, init_sampler in self._init_sampler.init_sampler_dict.items(): init_sampler.set_init_variables(get_population(pop_name)) ### set the baseline currents @@ -1688,15 +1745,15 @@ def error_changed(error_list, tol, n=3): return True return (np.max(error_list[-n:]) - np.min(error_list[-n:])) > tol - ### TODO not check if error is small enough but if the change of the error - ### converges, for this, check the mean of the last 10 error changes + ### run until the error does not change anymore or the maximum number of + ### iterations is reached, also break if the error is small enough while it < max_it and error_changed(error_list, tol_convergence): print("\n\nnext iteration") y_old = y y = func(x) dx_list.append(x - x_old) dy_list.append(y - y_old) - ### TODO if x did not change much, use the previous gradient again + ### TODO if x did not change much, use the previous gradient again, but maybe not a good idea, or at least not easy to implement, sicne gradient depends on all inputs print(f"x: {x}") print(f"y: {y}") x_list.append(x) @@ -1715,60 +1772,27 @@ def error_changed(error_list, tol, n=3): error_list.append(np.mean(np.abs(error))) print(f"error_list: {error_list}\n") ### if the error sign changed: - ### - TODO check if error is larger as before, if yes -> use again the previous x, if use previous x also compute current y + ### - check if error is larger as before + ### - if yes -> check if error is also larger than tolerance + ### - if yes -> use again the previous x and compute current y again ### - we calculate (as usual) a new gradient ### - we reduce alpha, so this time the step is smaller error_increased = np.abs(error) > np.abs(error_old) - x[error_sign_changed & error_increased] = x_old[ - error_sign_changed & error_increased - ] - if np.any(error_sign_changed & error_increased): + error_is_large = np.abs(error) > tol_error + change_x = error_sign_changed & error_increased & error_is_large + x[change_x] = x_old[change_x] + if np.any(change_x): y = func(x) - - # TODO I do not understand this example, this message was printed but x did not change - # next iteration - # x: [12.56441496 40.92615539 18.96717589 90.30010779] - # y: [30.00888889 59.99777778 50.01333333 96.85333333] - # error_sign_changed: [False False False False] - # error_list: [23.759444444444448, 2.517777777777779, 78.90388888888889, 22.96944444444445] - - # x_plus: [13.56441496 40.92615539 18.96717589 90.30010779] - # y_plus: [32.22222222 60.10888889 50.08666667 97.41111111] - - # x_plus: [12.56441496 42.92615539 18.96717589 90.30010779] - # y_plus: [30.00888889 62.06666667 50.01333333 91.96666667] - - # x_plus: [12.56441496 40.92615539 19.96717589 90.30010779] - # y_plus: [30.00888889 59.89333333 51.14666667 96.79333333] - - # x_plus: [ 12.56441496 40.92615539 18.96717589 132.96677446] - # y_plus: [ 30.00888889 59.99777778 50.01333333 214.46666667] - - # delta_y: [-8.88888889e-03 2.22222222e-03 -1.33333333e-02 -9.18533333e+01] - # grad: - # [[ 2.21333333e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00] - # [ 1.11111111e-01 2.06888889e+00 -1.04444444e-01 0.00000000e+00] - # [ 7.33333333e-02 0.00000000e+00 1.13333333e+00 0.00000000e+00] - # [ 5.57777778e-01 -4.88666667e+00 -6.00000000e-02 1.17613333e+02]] - # Solution vector x: [-4.01606426e-03 7.08996344e-04 -1.15048429e-02 -7.80934579e-01] - # delta_y from solution: [-8.88888889e-03 2.22222222e-03 -1.33333333e-02 -9.18533333e+01] - - # next iteration - # x: [12.56200532 40.92757338 18.96027299 76.97215765] - # y: [30.01333333 60.00444444 50.00444444 61.82444444] - # error_sign_changed: [False True False False] - # error_list: [23.759444444444448, 2.517777777777779, 78.90388888888889, 22.96944444444445, 14.211666666666666] - - # some errors changed sign and increased - # x: [12.56200532 40.92615539 18.96027299 76.97215765] - # y: [30.01333333 60.02 50.00444444 61.86 ] - - print("some errors changed sign and increased") + print( + "some errors changed sign, increased, and are larger than tolerance" + ) print(f"x: {x}") print(f"y: {y}\n") x_list.append(x) y_list.append(y) it_list.append(it) + ### reduce alpha for the inputs where the error sign changed + ### for the others alpha reaches 1 alpha[error_sign_changed] /= 2 alpha[~error_sign_changed] += (1 - alpha[~error_sign_changed]) / 5 ### calculate the gradient i.e. change of the output values for each input @@ -1856,7 +1880,7 @@ def error_changed(error_list, tol, n=3): plt.ylabel(f"dy{idx}/dx{idx}") plt.xlabel("x") plt.tight_layout() - plt.savefig("dy_dx_asugehend_x.png") + plt.savefig("dy_dx_ausgehend_x.png") class GetBase: @@ -1917,6 +1941,77 @@ def _set_model_weights(self): ) def _prepare_get_base(self): + ### TODO I compare here reduced model with normal model and yes, in the reduced model, the thalamus rate is lower... but why + ### if snr is silent --> no diff in thalamus rate between reduced and normal model --> the snr-->thal input is not reduced well + ### its strange that the thalamus neurons in the reduced model seem to have all the same g_ampa... but why + ### it should be different since the population calculating the conductance has different number synapses and uses random numbers for incoming spikes + ### it is actually different, but more similiar then in the normal model + ### I think the implementation actually works as epected but the replacement of the binomial distribution with the normal distribution for calculating incoming spikes does not work well here + ### because the spike probabilities are low + ### FOUND PROBLEM: I had to round the incoming spikes to the nearest integer, the binomial distribution to approximate was around n=10, p=0.01 --> many zeros few ones, with the not-rounded normal distribution you got many values between 0 and 1 --> more then 0 --> higher conductance + ### TODO tmp test, compare normal and reduced model, reduced model part + ### clear and create model + mf.cnp_clear(functions=False, constants=False) + self._model_normal.create() + ### create monitors + mon_dict = {} + plot_position_list = [] + plot_compartment_list = [] + plot_variable_list = [] + plot_format_list = [] + for idx, pop_name in enumerate(self._model_normal.populations): + mon_dict[pop_name] = [] + if True: + mon_dict[pop_name].append("spike") + plot_position_list.append(3 * idx + 1) + plot_compartment_list.append(pop_name) + plot_variable_list.append("spike") + plot_format_list.append("hybrid") + if "g_gaba" in get_population(pop_name).variables: + mon_dict[pop_name].append("g_gaba") + plot_position_list.append(3 * idx + 2) + plot_compartment_list.append(pop_name) + plot_variable_list.append("g_gaba") + plot_format_list.append("line") + if "g_ampa" in get_population(pop_name).variables: + mon_dict[pop_name].append("g_ampa") + plot_position_list.append(3 * idx + 3) + plot_compartment_list.append(pop_name) + plot_variable_list.append("g_ampa") + plot_format_list.append("line") + mon = CompNeuroMonitors(mon_dict=mon_dict) + ### initialize the populations with the init sampler + for pop_name in self._pop_names_config: + self._init_sampler.get(pop_name=pop_name).set_init_variables( + get_population(pop_name) + ) + ### set the weights + for proj_name, weight in self._weight_dicts.weight_dict.items(): + setattr(get_projection(proj_name), "w", weight) + ### simulate + mon.start() + get_population("thal").I_app = 20 + simulate(5000) + ### plot + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + af.PlotRecordings( + figname="tmp_compare_normal.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(self._model_normal.populations), 3), + plan={ + "position": plot_position_list, + "compartment": plot_compartment_list, + "variable": plot_variable_list, + "format": plot_format_list, + }, + ) + print( + "thal rate:", len(raster_plot(recordings[0]["thal;spike"])[0]) / (5 * 100) + ) + print("snr rate:", len(raster_plot(recordings[0]["snr;spike"])[0]) / (5 * 100)) + ### clear ANNarchy mf.cnp_clear(functions=False, constants=False) ### create and compile the model @@ -1929,6 +2024,55 @@ def _prepare_get_base(self): for pop_name in self._model_normal.populations } ) + ### TODO: tmp monitors to compare normal and reduced model + mon_dict = {} + plot_position_list = [] + plot_compartment_list = [] + plot_variable_list = [] + plot_format_list = [] + for idx, pop_name in enumerate(self._model_normal.populations): + mon_dict[f"{pop_name}_reduced"] = [] + if True: + mon_dict[f"{pop_name}_reduced"].append("spike") + plot_position_list.append(3 * idx + 1) + plot_compartment_list.append(f"{pop_name}_reduced") + plot_variable_list.append("spike") + plot_format_list.append("hybrid") + if "g_gaba" in get_population(f"{pop_name}_reduced").variables: + mon_dict[f"{pop_name}_reduced"].append("g_gaba") + plot_position_list.append(3 * idx + 2) + plot_compartment_list.append(f"{pop_name}_reduced") + plot_variable_list.append("g_gaba") + plot_format_list.append("line") + if "g_ampa" in get_population(f"{pop_name}_reduced").variables: + mon_dict[f"{pop_name}_reduced"].append("g_ampa") + plot_position_list.append(3 * idx + 3) + plot_compartment_list.append(f"{pop_name}_reduced") + plot_variable_list.append("g_ampa") + plot_format_list.append("line") + + # if f"{pop_name}_spike_collecting_aux" in self._model_reduced.populations: + # mon_dict[f"{pop_name}_spike_collecting_aux"] = ["r"] + # plot_position_list.append(5 * idx + 3) + # plot_compartment_list.append(f"{pop_name}_spike_collecting_aux") + # plot_variable_list.append("r") + # plot_format_list.append("line") + + # if f"{pop_name}_ampa_aux" in self._model_reduced.populations: + # mon_dict[f"{pop_name}_ampa_aux"] = ["r"] + # plot_position_list.append(5 * idx + 4) + # plot_compartment_list.append(f"{pop_name}_ampa_aux") + # plot_variable_list.append("r") + # plot_format_list.append("line") + + # if f"{pop_name}_gaba_aux" in self._model_reduced.populations: + # mon_dict[f"{pop_name}_gaba_aux"] = ["r"] + # plot_position_list.append(5 * idx + 5) + # plot_compartment_list.append(f"{pop_name}_gaba_aux") + # plot_variable_list.append("r") + # plot_format_list.append("line") + ### TODO record incoming_spikes_proj_name of snr__thal of the thal input aux population + mon = CompNeuroMonitors(mon_dict=mon_dict) ### create the experiment self._exp = self.MyExperiment(monitors=mon) ### initialize all populations with the init sampler @@ -1950,6 +2094,39 @@ def _prepare_get_base(self): self._ub.append(self._max_syn.get(pop_name=pop_name).I_app) self._x0.append(0.0) + ### TODO tmp test, compare normal and reduced model, reduced model part + ### simulate + mon.start() + get_population("thal_reduced").I_app = 20 + simulate(5000) + ### plot + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + af.PlotRecordings( + figname="tmp_compare_reduced.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(self._model_normal.populations), 3), + plan={ + "position": plot_position_list, + "compartment": plot_compartment_list, + "variable": plot_variable_list, + "format": plot_format_list, + }, + ) + print( + "thal rate:", + len(raster_plot(recordings[0]["thal_reduced;spike"])[0]) / (5 * 100), + ) + print( + "snr rate:", + len(raster_plot(recordings[0]["snr_reduced;spike"])[0]) / (5 * 100), + ) + print(self._model_reduced.populations) + print(recordings[0]["gpe_reduced;g_ampa"][:, 0]) + print(recordings[0]["gpe_reduced;g_ampa"][:, 1]) + quit() + def _get_base(self): """ Perform the optimization to find the base currents for the target firing rates. @@ -1967,7 +2144,7 @@ def _get_base(self): lb=np.array(self._lb), ub=np.array(self._ub), tol_error=1, - tol_convergence=0.1, + tol_convergence=0.01, max_it=20, ) @@ -1981,46 +2158,6 @@ def _get_base(self): } return base_dict - def _objective_function_deap(self, population): - """ - Objective function wrapper for the DeapCma optimization. - - Args: - population (list): - List of individuals with input currents for each model population - - Returns: - loss_list (list): - List of losses for each individual of the population - """ - loss_list = [] - ### the population is a list of individuals which are lists of parameters - for individual in population: - loss_of_individual = self._objective_function(I_app_list=individual) - loss_list.append((loss_of_individual,)) - return loss_list - - def _objective_function(self, I_app_list: list[float]): - """ - Objective function to minimize the difference between the target firing rates and - the firing rates of the model with the given input currents. - - Args: - I_app_list (list[float]): - List with the input currents for each population - - Returns: - diff (float): - Difference between the target firing rates and the firing rates of the - model with the given input currents - """ - ### get the firing rates of the model with the given input currents - rate_arr = self._get_firing_rate(I_app_list) - ### calculate the difference between the target firing rates and the firing rates - ### of the model with the given input currents - diff = self._target_firing_rate_arr - rate_arr - return np.sum(diff**2) - def _get_firing_rate(self, I_app_list: list[float]): ### convert the I_app_list to a dict I_app_dict = {} @@ -2038,7 +2175,9 @@ def _get_firing_rate(self, I_app_list: list[float]): ### for the spike dict we need the "_reduced" suffix spike_dict = results.recordings[0][f"{pop_name}_reduced;spike"] t, _ = raster_plot(spike_dict) - ### only take spikes after the first 500 ms + ### only take spikes after the first 500 ms, because the neurons are + ### initialized in resting-state and with an input current there can be + ### drastic activity changes at the beginning t = t[t > 500] nbr_spikes = len(t) ### divide number of spikes by the number of neurons and the duration in s diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 5830cde..91cb906 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -4,7 +4,6 @@ setup, simulate, get_population, - get_projection, dt, ) from CompNeuroPy.neuron_models import ( @@ -12,20 +11,14 @@ Izhikevich2003NoisyBaseSNR, ) from CompNeuroPy import ( - generate_model, - Monitors, - plot_recordings, - my_raster_plot, + CompNeuroModel, CompNeuroMonitors, PlotRecordings, + my_raster_plot, ) from CompNeuroPy.examples.model_configurator.model_configurator_cnp import ( ModelConfigurator, ) -from CompNeuroPy.examples.model_configurator.model_configurator_cnp_old import ( - model_configurator, -) -import matplotlib.pyplot as plt import numpy as np @@ -293,7 +286,7 @@ def BGM_part_function(params): ### create model which should be configurated ### create or compile have no effect setup(dt=0.1) - model = generate_model( + model = CompNeuroModel( model_creation_function=BGM_part_function, model_kwargs={"params": params}, name="BGM_part_model", @@ -360,7 +353,7 @@ def BGM_part_function(params): "stn": 30, "gpe": 50, "snr": 60, - "thal": 5, + "thal": 20, } do_not_config_list = ["cor_exc", "cor_inh"] @@ -373,7 +366,7 @@ def BGM_part_function(params): print_guide=True, I_app_variable="I_app", cache=True, - clear_cache=False, + clear_cache=True, log_file="model_configurator.log", ) @@ -416,81 +409,60 @@ def BGM_part_function(params): model_conf.set_base() ### do a test simulation - mon = Monitors( + mon = CompNeuroMonitors( { "cor_exc": ["spike"], "cor_inh": ["spike"], - "stn": ["spike", "g_ampa", "g_gaba"], - "gpe": ["spike", "g_ampa", "g_gaba"], - "snr": ["spike", "g_ampa", "g_gaba"], - "thal": ["spike", "g_ampa", "g_gaba"], + "stn": ["spike"], + "gpe": ["spike"], + "snr": ["spike"], + "thal": ["spike"], } ) - get_population("cor_exc").rates = target_firing_rate_dict["cor_exc"] - get_population("cor_inh").rates = target_firing_rate_dict["cor_inh"] + ### initial simulation simulate(1000) mon.start() + ### first simulation with default inputs simulate(4000) get_population("cor_exc").rates = 0 - get_population("cor_inh").rates = 0 + ### second simulation with changed inputs simulate(2000) ### get recordings recordings = mon.get_recordings() recording_times = mon.get_recording_times() - stn_g_ampa = recordings[0]["stn;g_ampa"] - stn_g_gaba = recordings[0]["stn;g_gaba"] - cor_spike = recordings[0]["cor_exc;spike"] - cor_spike_arr = np.zeros(stn_g_ampa.shape[0]) - t, n = my_raster_plot(cor_spike) - values, counts = np.unique(t - 10000, return_counts=True) - t = values.astype(int) - cor_spike_arr[t] = counts - plt.figure(figsize=(6.4, 4.8 * 2)) - plt.subplot(211) - plt.ylabel("g_ampa") - plt.plot(stn_g_ampa[:, 0], "k.") - plt.subplot(212) - plt.ylabel("g_gaba") - plt.plot(stn_g_gaba[:, 0], "k.") - plt.tight_layout() - plt.savefig("stn_input_model.png", dpi=300) - plt.close("all") - ### print rates for pop_name in model.populations: spike_dict = recordings[0][f"{pop_name};spike"] t, n = my_raster_plot(spike_dict) nr_spikes_1st = np.sum( - (t > int(round(1000 / dt(), 0))) * (t < int(round(5000 / dt(), 0))) + (t > int(round(1000 / dt()))) * (t < int(round(5000 / dt()))) ) - nr_spikes_2nd = np.sum((t > int(round(5000 / dt(), 0)))) + nr_spikes_2nd = np.sum((t > int(round(5000 / dt())))) rate_1st = nr_spikes_1st / (4 * params[f"{pop_name}.size"]) rate_2nd = nr_spikes_2nd / (2 * params[f"{pop_name}.size"]) print(f"pop_name: {pop_name}, rate_1st: {rate_1st}, rate_2nd: {rate_2nd}") ### plot recordings - plot_recordings( + PlotRecordings( figname="model_recordings.png", recordings=recordings, recording_times=recording_times, chunk=0, - shape=(5, 3), - plan=[ - "1;cor_exc;spike;hybrid", - "2;cor_inh;spike;hybrid", - "4;stn;spike;hybrid", - "5;stn;g_ampa;line", - "6;stn;g_gaba;line", - "7;gpe;spike;hybrid", - "8;gpe;g_ampa;line", - "9;gpe;g_gaba;line", - "10;snr;spike;hybrid", - "11;snr;g_ampa;line", - "12;snr;g_gaba;line", - "13;thal;spike;hybrid", - "14;thal;g_ampa;line", - "15;thal;g_gaba;line", - ], + shape=(len(model.populations), 1), + plan={ + "position": list(range(1, len(model.populations) + 1)), + "compartment": model.populations, + "variable": ["spike"] * len(model.populations), + "format": ["hybrid"] * len(model.populations), + }, ) + + """ + TODO: there is a systematic error with Thal + - somehow it is above its target firing rate after optimization + - before the optimization the weights seem to be correctly set in the reduced model + TODO: compare normal and reduced model: do they behave the same? + - I think this needs to be done exctly before the get_base optimization + """ diff --git a/src/CompNeuroPy/examples/model_configurator/reduce_model.py b/src/CompNeuroPy/examples/model_configurator/reduce_model.py index ee0962d..770fa16 100644 --- a/src/CompNeuroPy/examples/model_configurator/reduce_model.py +++ b/src/CompNeuroPy/examples/model_configurator/reduce_model.py @@ -16,7 +16,6 @@ from ANNarchy.core import ConnectorMethods import numpy as np from CompNeuroPy import model_functions as mf -from CompNeuroPy.generate_model import generate_model from typingchecker import check_types import inspect from CompNeuroPy import CompNeuroModel @@ -691,7 +690,7 @@ def __init__(self, projection_dict): ### create equations equations = [ f""" - incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} + incoming_spikes_{proj_name} = round(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']})))) : min=0, max=number_synapses_{proj_name} """ for proj_name, vals in projection_dict.items() ] diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py index dac9db5..b173216 100644 --- a/src/CompNeuroPy/examples/model_configurator/test.py +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -18,7 +18,6 @@ from CompNeuroPy import ( CompNeuroMonitors, PlotRecordings, - interactive_plot, timing_decorator, annarchy_compiled, CompNeuroModel, @@ -38,9 +37,9 @@ setup(dt=0.1) -CONNECTION_PROB = 0.01 +CONNECTION_PROB = 0.6 WEIGHTS = 0.1 -POP_PRE_SIZE = 1000 +POP_PRE_SIZE = 100 POP_POST_SIZE = 100 POP_REDUCED_SIZE = 100 @@ -153,9 +152,9 @@ def create_model(): ### run simulation start = time.time() simulate(50.0) - get_population("pop_pre1").rates = 1000.0 + get_population("pop_pre1").rates = 30.0 simulate(50.0) - get_population("pop_pre2").rates = 1000.0 + get_population("pop_pre2").rates = 30.0 simulate(100.0) print("simulate time:", time.time() - start) recordings_normal = monitors.get_recordings() @@ -165,9 +164,8 @@ def create_model(): print("reduced model") ### create model model = _CreateReducedModel( - model=model, reduced_size=POP_REDUCED_SIZE + model=model, reduced_size=POP_REDUCED_SIZE, do_create=True, do_compile=True ).model_reduced - model.compile(warn=False) ### create monitors mon_dict = { "pop_pre1_reduced": ["spike"], @@ -188,9 +186,9 @@ def create_model(): ### run simulation start = time.time() simulate(50.0) - get_population("pop_pre1_reduced").rates = 1000.0 + get_population("pop_pre1_reduced").rates = 30.0 simulate(50.0) - get_population("pop_pre2_reduced").rates = 1000.0 + get_population("pop_pre2_reduced").rates = 30.0 simulate(100.0) print("simulate time:", time.time() - start) recordings_reduced = monitors.get_recordings() @@ -220,7 +218,7 @@ def create_model(): "hybrid", "hybrid", "hybrid", - "line_mean", + "line", ], }, ) @@ -256,7 +254,7 @@ def create_model(): "hybrid", "hybrid", "hybrid", - "line_mean", + "line", "line_mean", "line_mean", "line_mean", diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 7eddd1e..2c259f6 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -3,8 +3,8 @@ import scipy.stats as stats # Parameters -n = 15 # number of trials -p = 0.95 # probability of success +n = 10 # number of trials +p = 0.01 # probability of success N = 10000 # number of samples # Generate data samples @@ -13,14 +13,14 @@ std_dev = np.sqrt(n * p * (1 - p)) normal_sample = np.random.normal(mean, std_dev, N) -### scale normal sample above mean and below mean -normal_sample_original = normal_sample.copy() -normal_sample[normal_sample_original >= mean] = ( - normal_sample_original[normal_sample_original >= mean] * 1.1 -) -normal_sample[normal_sample_original < mean] = ( - normal_sample_original[normal_sample_original < mean] * 0.9 -) +# ### scale normal sample above mean and below mean +# normal_sample_original = normal_sample.copy() +# normal_sample[normal_sample_original >= mean] = ( +# normal_sample_original[normal_sample_original >= mean] * 1.1 +# ) +# normal_sample[normal_sample_original < mean] = ( +# normal_sample_original[normal_sample_original < mean] * 0.9 +# ) ### round and clip the normal sample normal_sample = np.round(normal_sample) @@ -66,7 +66,16 @@ bins=n + 1, range=(-0.5, n + 0.5), density=True, - alpha=0.6, + alpha=0.5, + color="b", + label="Binomial", +) +plt.hist( + binomial_sample, + bins=n * 50, + range=(-0.5, n + 0.5), + density=True, + alpha=0.5, color="b", label="Binomial", ) @@ -82,7 +91,16 @@ bins=n + 1, range=(-0.5, n + 0.5), density=True, - alpha=0.6, + alpha=0.5, + color="r", + label="Normal", +) +plt.hist( + normal_sample, + bins=n * 50, + range=(-0.5, n + 0.5), + density=True, + alpha=0.5, color="r", label="Normal", ) From 167fb0f0714d9ec4c73187a89c25fa6ebdd5d4e2 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 20 Jun 2024 08:37:01 +0200 Subject: [PATCH 02/21] model_configurator: cleaned upd code --- .../model_configurator_cnp.py | 275 +----------------- .../model_configurator_user.py | 8 - 2 files changed, 13 insertions(+), 270 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index c4abd90..bdb9fb0 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -4,10 +4,6 @@ from CompNeuroPy import model_functions as mf from CompNeuroPy import extra_functions as ef from CompNeuroPy import system_functions as sf -from CompNeuroPy import analysis_functions as af - -from CompNeuroPy.examples.model_configurator.reduce_model import _CreateReducedModel - from ANNarchy import ( Population, Projection, @@ -17,45 +13,19 @@ Network, get_projection, dt, - parallel_run, simulate, - reset, Neuron, - simulate_until, - Uniform, - get_current_step, - projections, - populations, Binomial, CurrentInjection, raster_plot, set_seed, ) - from ANNarchy.core import ConnectorMethods - -# from ANNarchy.core.Global import _network import numpy as np -from scipy.interpolate import interp1d, interpn -from scipy.signal import find_peaks, argrelmin, argrelextrema +from scipy.signal import argrelmin import matplotlib.pyplot as plt import inspect -import textwrap -import os -import itertools -from tqdm import tqdm -import multiprocessing -import importlib.util -from time import time, strftime -import datetime -from sympy import symbols, Symbol, sympify, solve -from hyperopt import fmin, tpe, hp -import pandas as pd -from scipy.stats import poisson -from ANNarchy.extensions.bold import BoldMonitor -from sklearn.linear_model import LinearRegression import sympy as sp -from scipy.optimize import minimize, Bounds class ArrSampler: @@ -1408,7 +1378,8 @@ def __init__( self._do_not_config_list = do_not_config_list self._target_firing_rate_dict = target_firing_rate_dict self._base_dict = None - self._figure_folder = "model_conf_figures" ### TODO add this to figures + ### TODO add this to figures + self._figure_folder = "model_conf_figures" ### create the figure folder sf.create_dir(self._figure_folder) ### initialize logger @@ -1416,69 +1387,16 @@ def __init__( ### analyze the given model, create model before analyzing, then clear ANNarchy self._analyze_model = AnalyzeModel(model=self._model) ### create the CompNeuroModel object for the reduced model (the model itself is - ### not created yet) TODO: tmp change to old _CreateReducedModel, does this work for thal? - # self._model_reduced = CreateReducedModel( - # model=self._model, - # analyze_model=self._analyze_model, - # reduced_size=100, - # do_create=False, - # do_compile=False, - # verbose=True, - # ) - self._model_reduced = _CreateReducedModel( - model=model, reduced_size=100, do_create=False, do_compile=False + ### not created yet) + self._model_reduced = CreateReducedModel( + model=self._model, + analyze_model=self._analyze_model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, ) - # ### TODO: tmp compare the reduced model with the normal model, normal model part - # mf.cnp_clear(functions=False, constants=False) - # self._model.create() - # mon = CompNeuroMonitors( - # mon_dict={pop_name: ["spike"] for pop_name in self._model.populations} - # ) - # mon.start() - # simulate(5000) - # recordings = mon.get_recordings() - # recording_times = mon.get_recording_times() - # af.PlotRecordings( - # figname="tmp_compare_normal.png", - # recordings=recordings, - # recording_times=recording_times, - # shape=(len(self._model.populations), 1), - # plan={ - # "position": list(range(1, len(self._model.populations) + 1)), - # "compartment": self._model.populations, - # "variable": ["spike"] * len(self._model.populations), - # "format": ["hybrid"] * len(self._model.populations), - # }, - # ) - # ### TODO: tmp compare the reduced model with the normal model, reduced model part - # mf.cnp_clear(functions=False, constants=False) - # self._model_reduced.model_reduced.create() - # mon = CompNeuroMonitors( - # mon_dict={ - # f"{pop_name}_reduced": ["spike"] for pop_name in self._model.populations - # } - # ) - # mon.start() - # simulate(5000) - # recordings = mon.get_recordings() - # recording_times = mon.get_recording_times() - # af.PlotRecordings( - # figname="tmp_compare_reduced.png", - # recordings=recordings, - # recording_times=recording_times, - # shape=(len(self._model.populations), 1), - # plan={ - # "position": list(range(1, len(self._model.populations) + 1)), - # "compartment": [ - # f"{pop_name}_reduced" for pop_name in self._model.populations - # ], - # "variable": ["spike"] * len(self._model.populations), - # "format": ["hybrid"] * len(self._model.populations), - # }, - # ) - # quit() - ### try to load the cached variables if clear_cache: sf.clear_dir(".model_config_cache") @@ -1852,6 +1770,7 @@ def error_changed(error_list, tol, n=3): y_arr = np.array(y_list) it_arr = np.array(it_list) + ### TODO remove or make this optimal (for debugging), also the prints above plt.close("all") plt.figure() for idx in range(4): @@ -1867,21 +1786,6 @@ def error_changed(error_list, tol, n=3): plt.tight_layout() plt.savefig("optimization.png") - plt.close("all") - plt.figure() - dx_arr = x_arr[1:] - x_arr[:-1] - dx_ausgehend_von = x_arr[:-1] - dy_arr = y_arr[1:] - y_arr[:-1] - dy_ausgehend_von = y_arr[:-1] - for idx in range(4): - ax = plt.subplot(4, 1, idx + 1) - ### plot the x values - plt.plot(dx_ausgehend_von[:, idx], dy_arr[:, idx] / dx_arr[:, idx]) - plt.ylabel(f"dy{idx}/dx{idx}") - plt.xlabel("x") - plt.tight_layout() - plt.savefig("dy_dx_ausgehend_x.png") - class GetBase: def __init__( @@ -1941,77 +1845,6 @@ def _set_model_weights(self): ) def _prepare_get_base(self): - ### TODO I compare here reduced model with normal model and yes, in the reduced model, the thalamus rate is lower... but why - ### if snr is silent --> no diff in thalamus rate between reduced and normal model --> the snr-->thal input is not reduced well - ### its strange that the thalamus neurons in the reduced model seem to have all the same g_ampa... but why - ### it should be different since the population calculating the conductance has different number synapses and uses random numbers for incoming spikes - ### it is actually different, but more similiar then in the normal model - ### I think the implementation actually works as epected but the replacement of the binomial distribution with the normal distribution for calculating incoming spikes does not work well here - ### because the spike probabilities are low - ### FOUND PROBLEM: I had to round the incoming spikes to the nearest integer, the binomial distribution to approximate was around n=10, p=0.01 --> many zeros few ones, with the not-rounded normal distribution you got many values between 0 and 1 --> more then 0 --> higher conductance - ### TODO tmp test, compare normal and reduced model, reduced model part - ### clear and create model - mf.cnp_clear(functions=False, constants=False) - self._model_normal.create() - ### create monitors - mon_dict = {} - plot_position_list = [] - plot_compartment_list = [] - plot_variable_list = [] - plot_format_list = [] - for idx, pop_name in enumerate(self._model_normal.populations): - mon_dict[pop_name] = [] - if True: - mon_dict[pop_name].append("spike") - plot_position_list.append(3 * idx + 1) - plot_compartment_list.append(pop_name) - plot_variable_list.append("spike") - plot_format_list.append("hybrid") - if "g_gaba" in get_population(pop_name).variables: - mon_dict[pop_name].append("g_gaba") - plot_position_list.append(3 * idx + 2) - plot_compartment_list.append(pop_name) - plot_variable_list.append("g_gaba") - plot_format_list.append("line") - if "g_ampa" in get_population(pop_name).variables: - mon_dict[pop_name].append("g_ampa") - plot_position_list.append(3 * idx + 3) - plot_compartment_list.append(pop_name) - plot_variable_list.append("g_ampa") - plot_format_list.append("line") - mon = CompNeuroMonitors(mon_dict=mon_dict) - ### initialize the populations with the init sampler - for pop_name in self._pop_names_config: - self._init_sampler.get(pop_name=pop_name).set_init_variables( - get_population(pop_name) - ) - ### set the weights - for proj_name, weight in self._weight_dicts.weight_dict.items(): - setattr(get_projection(proj_name), "w", weight) - ### simulate - mon.start() - get_population("thal").I_app = 20 - simulate(5000) - ### plot - recordings = mon.get_recordings() - recording_times = mon.get_recording_times() - af.PlotRecordings( - figname="tmp_compare_normal.png", - recordings=recordings, - recording_times=recording_times, - shape=(len(self._model_normal.populations), 3), - plan={ - "position": plot_position_list, - "compartment": plot_compartment_list, - "variable": plot_variable_list, - "format": plot_format_list, - }, - ) - print( - "thal rate:", len(raster_plot(recordings[0]["thal;spike"])[0]) / (5 * 100) - ) - print("snr rate:", len(raster_plot(recordings[0]["snr;spike"])[0]) / (5 * 100)) - ### clear ANNarchy mf.cnp_clear(functions=False, constants=False) ### create and compile the model @@ -2024,55 +1857,6 @@ def _prepare_get_base(self): for pop_name in self._model_normal.populations } ) - ### TODO: tmp monitors to compare normal and reduced model - mon_dict = {} - plot_position_list = [] - plot_compartment_list = [] - plot_variable_list = [] - plot_format_list = [] - for idx, pop_name in enumerate(self._model_normal.populations): - mon_dict[f"{pop_name}_reduced"] = [] - if True: - mon_dict[f"{pop_name}_reduced"].append("spike") - plot_position_list.append(3 * idx + 1) - plot_compartment_list.append(f"{pop_name}_reduced") - plot_variable_list.append("spike") - plot_format_list.append("hybrid") - if "g_gaba" in get_population(f"{pop_name}_reduced").variables: - mon_dict[f"{pop_name}_reduced"].append("g_gaba") - plot_position_list.append(3 * idx + 2) - plot_compartment_list.append(f"{pop_name}_reduced") - plot_variable_list.append("g_gaba") - plot_format_list.append("line") - if "g_ampa" in get_population(f"{pop_name}_reduced").variables: - mon_dict[f"{pop_name}_reduced"].append("g_ampa") - plot_position_list.append(3 * idx + 3) - plot_compartment_list.append(f"{pop_name}_reduced") - plot_variable_list.append("g_ampa") - plot_format_list.append("line") - - # if f"{pop_name}_spike_collecting_aux" in self._model_reduced.populations: - # mon_dict[f"{pop_name}_spike_collecting_aux"] = ["r"] - # plot_position_list.append(5 * idx + 3) - # plot_compartment_list.append(f"{pop_name}_spike_collecting_aux") - # plot_variable_list.append("r") - # plot_format_list.append("line") - - # if f"{pop_name}_ampa_aux" in self._model_reduced.populations: - # mon_dict[f"{pop_name}_ampa_aux"] = ["r"] - # plot_position_list.append(5 * idx + 4) - # plot_compartment_list.append(f"{pop_name}_ampa_aux") - # plot_variable_list.append("r") - # plot_format_list.append("line") - - # if f"{pop_name}_gaba_aux" in self._model_reduced.populations: - # mon_dict[f"{pop_name}_gaba_aux"] = ["r"] - # plot_position_list.append(5 * idx + 5) - # plot_compartment_list.append(f"{pop_name}_gaba_aux") - # plot_variable_list.append("r") - # plot_format_list.append("line") - ### TODO record incoming_spikes_proj_name of snr__thal of the thal input aux population - mon = CompNeuroMonitors(mon_dict=mon_dict) ### create the experiment self._exp = self.MyExperiment(monitors=mon) ### initialize all populations with the init sampler @@ -2094,39 +1878,6 @@ def _prepare_get_base(self): self._ub.append(self._max_syn.get(pop_name=pop_name).I_app) self._x0.append(0.0) - ### TODO tmp test, compare normal and reduced model, reduced model part - ### simulate - mon.start() - get_population("thal_reduced").I_app = 20 - simulate(5000) - ### plot - recordings = mon.get_recordings() - recording_times = mon.get_recording_times() - af.PlotRecordings( - figname="tmp_compare_reduced.png", - recordings=recordings, - recording_times=recording_times, - shape=(len(self._model_normal.populations), 3), - plan={ - "position": plot_position_list, - "compartment": plot_compartment_list, - "variable": plot_variable_list, - "format": plot_format_list, - }, - ) - print( - "thal rate:", - len(raster_plot(recordings[0]["thal_reduced;spike"])[0]) / (5 * 100), - ) - print( - "snr rate:", - len(raster_plot(recordings[0]["snr_reduced;spike"])[0]) / (5 * 100), - ) - print(self._model_reduced.populations) - print(recordings[0]["gpe_reduced;g_ampa"][:, 0]) - print(recordings[0]["gpe_reduced;g_ampa"][:, 1]) - quit() - def _get_base(self): """ Perform the optimization to find the base currents for the target firing rates. @@ -2674,7 +2425,7 @@ def __init__(self, projection_dict): ### create equations equations = [ f""" - incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} + incoming_spikes_{proj_name} = round(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']})))) : min=0, max=number_synapses_{proj_name} """ for proj_name, vals in projection_dict.items() ] diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 91cb906..d1ff13a 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -458,11 +458,3 @@ def BGM_part_function(params): "format": ["hybrid"] * len(model.populations), }, ) - - """ - TODO: there is a systematic error with Thal - - somehow it is above its target firing rate after optimization - - before the optimization the weights seem to be correctly set in the reduced model - TODO: compare normal and reduced model: do they behave the same? - - I think this needs to be done exctly before the get_base optimization - """ From acd5429bf38cd20af43329c3efa404f320a33ec9 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 20 Jun 2024 16:51:49 +0200 Subject: [PATCH 03/21] model_configurator: - imporved code structure and logging/printing - found new problem with model reduction if p is small and n is large - started to find a better approximation of the binomial distribution --- .../model_configurator_cnp.py | 4256 +++++++++-------- .../model_configurator_user.py | 235 +- .../examples/model_configurator/test2.py | 109 +- src/CompNeuroPy/system_functions.py | 6 +- 4 files changed, 2466 insertions(+), 2140 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index bdb9fb0..4208a2e 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -26,157 +26,501 @@ import matplotlib.pyplot as plt import inspect import sympy as sp +import textwrap +import os +_model_configurator_verbose = False +_model_configurator_figure_folder = "model_configurator_figures" +_model_configurator_do_plot = False -class ArrSampler: - """ - Class to store an array and sample from it. - """ - - def __init__(self, arr: np.ndarray, var_name_list: list[str]) -> None: - """ - Args: - arr (np.ndarray) - array with shape (n_samples, n_variables) - var_name_list (list[str]) - list of variable names - """ - self.arr_shape = arr.shape - self.var_name_list = var_name_list - ### check values of any variable are constant - self.is_const = np.std(arr, axis=0) <= np.mean(np.absolute(arr), axis=0) / 1000 - ### for the constant variables only the first value is used - self.constant_arr = arr[0, self.is_const] - ### array without the constant variables - self.not_constant_val_arr = arr[:, np.logical_not(self.is_const)] +_p_g_after_get_weights = ( + lambda template_weight_dict, template_synaptic_load_dict, template_synaptic_contribution_dict: f"""Now either set the weights of all projections directly or first set the synaptic load of the populations and the synaptic contributions of the afferent projections. +You can set the weights using the function .set_weights() which requires a weight_dict as argument. +Use this template for the weight_dict: - def sample(self, n=1, seed=0): - """ - Sample n samples from the array. +{template_weight_dict} - Args: - n (int) - number of samples to be drawn - seed (int) - seed for the random number generator +The values within the template are the maximum weight values. - Returns: - ret_arr (np.ndarray) - array with shape (n, n_variables) - """ - ### get n random indices along the n_samples axis - rng = np.random.default_rng(seed=seed) - random_idx_arr = rng.integers(low=0, high=self.arr_shape[0], size=n) - ### sample with random idx - sample_arr = self.not_constant_val_arr[random_idx_arr] - ### create return array - ret_arr = np.zeros((n,) + self.arr_shape[1:]) - ### add samples to return array - ret_arr[:, np.logical_not(self.is_const)] = sample_arr - ### add constant values to return array - ret_arr[:, self.is_const] = self.constant_arr - return ret_arr +You can set the synaptic load and contribution using the function .set_syn_load() which requires a synaptic_load_dict for the synaptic load of the populations and a synaptic_contribution_dict for the synaptic contributions to the synaptic load of the afferent projections. +Use this template for the synaptic_load_dict: - def set_init_variables(self, population: Population): - """ - Set the initial variables of the given population to the given values. - """ - variable_init_arr = self.sample(len(population), seed=0) - var_name_list = self.var_name_list - for var_name in population.variables: - if var_name in var_name_list: - set_val = variable_init_arr[:, var_name_list.index(var_name)] - setattr(population, var_name, set_val) +{template_synaptic_load_dict} +These values scale the maximum possible weights of the afferent projections of the corresponing target types for each population. The value 1 means that the highest possible weight in the afferent projections is the maximum weight of the population/target type (obtained previously by the ModelConfigurator). -class AnalyzeModel: - """ - Class to analyze the given model to be able to reproduce it. - """ +Use this template for the synaptic_contribution_dict: - _connector_methods_dict = { - "One-to-One": ConnectorMethods.connect_one_to_one, - "All-to-All": ConnectorMethods.connect_all_to_all, - "Gaussian": ConnectorMethods.connect_gaussian, - "Difference-of-Gaussian": ConnectorMethods.connect_dog, - "Random": ConnectorMethods.connect_fixed_probability, - "Random Convergent": ConnectorMethods.connect_fixed_number_pre, - "Random Divergent": ConnectorMethods.connect_fixed_number_post, - "User-defined": ConnectorMethods.connect_with_func, - "MatrixMarket": ConnectorMethods.connect_from_matrix_market, - "Connectivity matrix": ConnectorMethods.connect_from_matrix, - "Sparse connectivity matrix": ConnectorMethods.connect_from_sparse, - "From File": ConnectorMethods.connect_from_file, - } +{template_synaptic_contribution_dict} - def __init__(self, model: CompNeuroModel): - ### clear ANNarchy and create the model - self._clear_model(model=model, do_create=True) +These values scale the maximum weights which are set by the synaptic_load_dict. So values between 0 and 1 make sense (1 means, take the maximum possible weight, 0.5 means take half of the maximum possible weight, etc.). +""" +) - ### get population info (eq, params etc.) - self._analyze_populations(model=model) - ### get projection info - self._analyze_projections(model=model) +class ModelConfigurator: + """ + The external applied current has to be the variable I_app. + """ - ### clear ANNarchy - self._clear_model(model=model, do_create=False) + def __init__( + self, + model: CompNeuroModel, + target_firing_rate_dict: dict, + max_psp: float = 1.0, + v_tol: float = 1.0, + do_not_config_list: list[str] = [], + cache: bool = False, + clear_cache: bool = False, + log_file: str | None = None, + verbose: bool = False, + do_plot: bool = False, + ): + """ + Args: + model (CompNeuroModel): + Model to be configured + target_firing_rate_dict (dict): + Dict with the target firing rates for each population + max_psp (float, optional): + Maximum inhibitory post-synaptic potential caused by single spikes in mV + Default is 1.0 + v_tol (float, optional): + Tolerance for the firing rate in Hz for the optimization of the baseline + currents. Default is 1.0 + do_not_config_list (list[str], optional): + List with the names of the populations which should not be configured + (they should already have the firing rates given in + target_firing_rate_dict). Default is [] + cache (bool, optional): + If True, it is tried to load cached variables. If not possible variables + created during the initialization are cached. Default is False + clear_cache (bool, optional): + If True, the cache is cleared and new variables are created. Set this to + True if you changed the model, max_psp or target_firing_rate_dict. + Default is False + log_file (str | None, optional): + Name of the log file to be created. If None, no log file is created. + Default is None + verbose (bool, optional): + If True, the log is printed to the console. Default is False + do_plot (bool, optional): + If True, the plots of simulated data are saved in the folder + 'model_configurator_figures'. Drastically increases the runtime. + Default is False + """ + ### update global verbose and do_plot + global _model_configurator_verbose + _model_configurator_verbose = verbose + global _model_configurator_do_plot + _model_configurator_do_plot = do_plot + ### store the given variables + self._model = model + self._do_not_config_list = do_not_config_list + self._target_firing_rate_dict = target_firing_rate_dict + self._base_dict = None + self.v_tol = v_tol + ### store the state of the given model + self._model_was_created = model.created + self._model_was_compiled = model.compiled + ### create the figure folder + sf.create_dir(_model_configurator_figure_folder) + ### initialize logger + sf.Logger(log_file=log_file) + ### analyze the given model, create model before analyzing, then clear ANNarchy + self._analyze_model = AnalyzeModel(model=self._model) + ### create the CompNeuroModel object for the reduced model (the model itself is + ### not created yet) + self._model_reduced = CreateReducedModel( + model=self._model, + analyze_model=self._analyze_model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=_model_configurator_verbose, + ) + ### try to load the cached variables + if clear_cache: + sf.clear_dir(".model_config_cache") + cache_worked = False + if cache: + try: + ### load the cached variables + cache_loaded = sf.load_variables( + name_list=["init_sampler", "max_syn"], + path=".model_config_cache", + ) + cache_worked = True + sf.Logger().log( + "Loaded cached variables.", verbose=_model_configurator_verbose + ) + except FileNotFoundError: + pass + if not cache_worked: + sf.Logger().log( + "No cached variables loaded, creating new ones.", + verbose=_model_configurator_verbose, + ) + ### create the single neuron networks (networks are compiled and ready to be + ### simulated), normal model for searching for max conductances, max input + ### current, resting firing rate; voltage clamp model for preparing the PSP + ### simulationssearching, i.e., for resting potential and corresponding input + ### current I_hold (for self-active neurons) + if not cache_worked: + self._single_nets = CreateSingleNeuronNetworks( + model=self._model, + analyze_model=self._analyze_model, + do_not_config_list=do_not_config_list, + ) + ### get the init sampler for the populations + self._init_sampler = self._single_nets.init_sampler( + model=self._model, do_not_config_list=do_not_config_list + ) + ### create simulator with single_nets + self._simulator = Simulator( + single_nets=self._single_nets, + prepare_psp=None, + ) + else: + self._init_sampler: CreateSingleNeuronNetworks.AllSampler = cache_loaded[ + "init_sampler" + ] + ### get the resting potential and corresponding I_hold for each population using + ### the voltage clamp networks + if not cache_worked: + self._prepare_psp = PreparePSP( + model=self._model, + single_nets=self._single_nets, + do_not_config_list=do_not_config_list, + simulator=self._simulator, + ) + self._simulator = Simulator( + single_nets=self._single_nets, + prepare_psp=self._prepare_psp, + ) + ### get the maximum synaptic conductances and input currents for each population + if not cache_worked: + self._max_syn = GetMaxSyn( + model=self._model, + simulator=self._simulator, + do_not_config_list=do_not_config_list, + max_psp=max_psp, + target_firing_rate_dict=target_firing_rate_dict, + ).max_syn_getter + else: + self._max_syn = cache_loaded["max_syn"] + ### cache single_nets, prepare_psp, max_syn + if cache and not cache_worked: + sf.Logger().log("Caching variables.", verbose=_model_configurator_verbose) + sf.save_variables( + variable_list=[ + self._init_sampler, + self._max_syn, + ], + name_list=["init_sampler", "max_syn"], + path=".model_config_cache", + ) + ### get the weights dictionaries + self._weight_dicts = GetWeights( + model=self._model, + do_not_config_list=do_not_config_list, + analyze_model=self._analyze_model, + max_syn=self._max_syn, + ) - def _clear_model(self, model: CompNeuroModel, do_create: bool = True): + ### recreate the state of the given model + sf.Logger().log( + "Clearing ANNarchy and recreating the given model's state.", + verbose=_model_configurator_verbose, + ) mf.cnp_clear(functions=False, constants=False) - if do_create: - model.create(do_compile=False) + if self._model_was_created: + self._model.create(do_compile=self._model_was_compiled) + + ### TODO print an explanation for setting the weight dict or the syn load/contribution dicts + if _model_configurator_verbose: + self._p_g( + _p_g_after_get_weights( + self._weight_dicts.weight_dict, + self._weight_dicts.syn_load_dict, + self._weight_dicts.syn_contribution_dict, + ) + ) - def _analyze_populations(self, model: CompNeuroModel): + def _p_g(self, txt): """ - Get info of each population - - Args: - model (CompNeuroModel): - Model to be analyzed + prints guiding text """ - ### values of the paramters and variables of the population's neurons, keys are - ### the names of paramters and variables - self.neuron_model_attr_dict: dict[str, dict] = {} - ### arguments of the __init__ function of the Neuron class - self.neuron_model_init_parameter_dict: dict[str, dict] = {} - ### arguments of the __init__ function of the Population class - self.pop_init_parameter_dict: dict[str, dict] = {} + print_width = min([os.get_terminal_size().columns, 80]) - ### for loop over all populations - for pop_name in model.populations: - pop: Population = get_population(pop_name) - ### get the neuron model attributes (parameters/variables) - ### old: self.neuron_model_parameters_dict - ### old: self.neuron_model_attributes_dict = keys() - self.neuron_model_attr_dict[pop.name] = pop.init - ### get a dict of all arguments of the __init__ function of the Neuron - ### ignore self - ### old: self.neuron_model_dict[pop_name] - init_params = inspect.signature(Neuron.__init__).parameters - self.neuron_model_init_parameter_dict[pop.name] = { - param: getattr(pop.neuron_type, param) - for param in init_params - if param != "self" - } - ### get a dict of all arguments of the __init__ function of the Population - ### ignore self, storage_order and copied - init_params = inspect.signature(Population.__init__).parameters - self.pop_init_parameter_dict[pop.name] = { - param: getattr(pop, param) - for param in init_params - if param != "self" and param != "storage_order" and param != "copied" - } + print("\n[model_configurator guide]:") + for line in txt.splitlines(): + wrapped_text = textwrap.fill( + line, width=print_width - 5, replace_whitespace=False + ) + wrapped_text = textwrap.indent(wrapped_text, " |") + print(wrapped_text) + print("") - def _analyze_projections(self, model: CompNeuroModel): + def set_weights(self, weight_dict: dict[str, float]): """ - Get info of each projection + Set the weights of the model. Args: - model (CompNeuroModel): - Model to be analyzed + weight_dict (dict[str, float]): + Dict with the weights for each projection """ + self._weight_dicts.weight_dict = weight_dict + self._check_if_not_config_pops_have_correct_rates() + + def set_syn_load( + self, + syn_load_dict: dict[str, float], + syn_contribution_dict: dict[str, dict[str, float]], + ): + """ + Set the synaptic load of the model. + + Args: + syn_load_dict (dict[str, float]): + Dict with ampa and gaba synaptic load for each population + syn_contribution_dict (dict[str, dict[str, float]]): + Dict with the contribution of the afferent projections to the ampa and + gaba synaptic load of each population + """ + self._weight_dicts.syn_load_dict = syn_load_dict + self._weight_dicts.syn_contribution_dict = syn_contribution_dict + self._check_if_not_config_pops_have_correct_rates() + + def _check_if_not_config_pops_have_correct_rates(self): + """ + Check if the populations which should not be configured have the correct firing + rates. + """ + ### initialize the normal model + compile the model + self._init_model_with_fitted_base() + + ### record spikes of the do_not_config populations + mon = CompNeuroMonitors( + mon_dict={ + pop_name: ["spike"] for pop_name in self._do_not_config_list + } # _model.populations # tmp test + ) + mon.start() + ### simulate the model for 5000 ms + # get_population("stn").I_app = 8 # tmp test + simulate(5000) + + ### get the firing rates + recordings = mon.get_recordings() + for pop_name in self._do_not_config_list: + spike_dict = recordings[0][f"{pop_name};spike"] + t, _ = raster_plot(spike_dict) + spike_count = len(t) + pop_size = len(get_population(pop_name)) + firing_rate = spike_count / (5 * pop_size) + if np.abs(firing_rate - self._target_firing_rate_dict[pop_name]) > 1: + sf.Logger().log( + f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + ) + print( + f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + ) + + # ### tmp plot + # recording_times = mon.get_recording_times() + + # af.PlotRecordings( + # figname="tmp.png", + # recordings=recordings, + # recording_times=recording_times, + # shape=(len(self._model.populations), 1), + # plan={ + # "position": list(range(1, len(self._model.populations) + 1)), + # "compartment": self._model.populations, + # "variable": ["spike"] * len(self._model.populations), + # "format": ["hybrid"] * len(self._model.populations), + # }, + # ) + # quit() + + def set_base(self, base_dict: dict[str, float] | None = None): + """ + Set the baseline currents of the model, found for the current weights to reach + the target firing rates. The model is compiled after setting the baselines. + + Args: + base_dict (dict[str, float], optional): + Dict with the baseline currents for each population. If None, the + optimized baseline currents are used. Default is None + """ + ### either use the given base dict or get the base dict (or use the stored one) + if base_dict is None: + if self._base_dict is None: + self.get_base() + base_dict = self._base_dict + + ### initialize the normal model + set the baselines with the base dict + self._init_model_with_fitted_base(base_dict=base_dict) + + def get_base(self): + """ + Get the baseline currents of the model. + + Returns: + base_dict (dict[str, float]): + Dict with the baseline currents for each population + """ + ### get the base dict + self._base_dict = GetBase( + model_normal=self._model, + model_reduced=self._model_reduced, + target_firing_rate_dict=self._target_firing_rate_dict, + weight_dicts=self._weight_dicts, + do_not_config_list=self._do_not_config_list, + init_sampler=self._init_sampler, + max_syn=self._max_syn, + v_tol=self.v_tol, + ).base_dict + return self._base_dict + + def _init_model_with_fitted_base(self, base_dict: dict[str, float] | None = None): + """ + Initialize the neurons of the model using the init_sampler, set the baseline + currents of the model from the base dict (containing fitted baselines) and the + weights from the weight dicts and compile the model. + """ + ### clear ANNarchy and create the normal model + mf.cnp_clear(functions=False, constants=False) + self._model.create(do_compile=False) + ### set the initial variables of the neurons #TODO small problem = init sampler + ### initializes the neurons in resting-state, but here they get an input current + for pop_name, init_sampler in self._init_sampler.init_sampler_dict.items(): + init_sampler.set_init_variables(get_population(pop_name)) + ### set the baseline currents + if base_dict is not None: + for pop_name, I_app in base_dict.items(): + setattr(get_population(pop_name), "I_app", I_app) + ### compile the model + self._model.compile() + ### set the weights + for proj_name, weight in self._weight_dicts.weight_dict.items(): + setattr(get_projection(proj_name), "w", weight) + + +class AnalyzeModel: + """ + Class to analyze the given model to be able to reproduce it. + """ + + _connector_methods_dict = { + "One-to-One": ConnectorMethods.connect_one_to_one, + "All-to-All": ConnectorMethods.connect_all_to_all, + "Gaussian": ConnectorMethods.connect_gaussian, + "Difference-of-Gaussian": ConnectorMethods.connect_dog, + "Random": ConnectorMethods.connect_fixed_probability, + "Random Convergent": ConnectorMethods.connect_fixed_number_pre, + "Random Divergent": ConnectorMethods.connect_fixed_number_post, + "User-defined": ConnectorMethods.connect_with_func, + "MatrixMarket": ConnectorMethods.connect_from_matrix_market, + "Connectivity matrix": ConnectorMethods.connect_from_matrix, + "Sparse connectivity matrix": ConnectorMethods.connect_from_sparse, + "From File": ConnectorMethods.connect_from_file, + } + + def __init__(self, model: CompNeuroModel): + sf.Logger().log( + "Analyzing model to be able to reproduce it", + verbose=_model_configurator_verbose, + ) + ### clear ANNarchy and create the model + self._clear_model(model=model, do_create=True) + + ### get population info (eq, params etc.) + self._analyze_populations(model=model) + + ### get projection info + self._analyze_projections(model=model) + + ### clear ANNarchy + self._clear_model(model=model, do_create=False) + + def _clear_model(self, model: CompNeuroModel, do_create: bool = True): + if do_create: + sf.Logger().log( + "Clearing ANNarchy and creating model before analyzing", + verbose=_model_configurator_verbose, + ) + else: + sf.Logger().log( + "Clearing ANNarchy after analyzing", + verbose=_model_configurator_verbose, + ) + mf.cnp_clear(functions=False, constants=False) + if do_create: + model.create(do_compile=False) + + def _analyze_populations(self, model: CompNeuroModel): + """ + Get info of each population + + Args: + model (CompNeuroModel): + Model to be analyzed + """ + sf.Logger().log( + "Analyzing populations to be able to reproduce them", + verbose=_model_configurator_verbose, + ) + ### values of the paramters and variables of the population's neurons, keys are + ### the names of paramters and variables + self.neuron_model_attr_dict: dict[str, dict] = {} + ### arguments of the __init__ function of the Neuron class + self.neuron_model_init_parameter_dict: dict[str, dict] = {} + ### arguments of the __init__ function of the Population class + self.pop_init_parameter_dict: dict[str, dict] = {} + + ### for loop over all populations + for pop_name in model.populations: + sf.Logger().log( + f"Analyzing population {pop_name}", verbose=_model_configurator_verbose + ) + pop: Population = get_population(pop_name) + ### get the neuron model attributes (parameters/variables) + ### old: self.neuron_model_parameters_dict + ### old: self.neuron_model_attributes_dict = keys() + self.neuron_model_attr_dict[pop.name] = pop.init + ### get a dict of all arguments of the __init__ function of the Neuron + ### ignore self + ### old: self.neuron_model_dict[pop_name] + init_params = inspect.signature(Neuron.__init__).parameters + self.neuron_model_init_parameter_dict[pop.name] = { + param: getattr(pop.neuron_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all arguments of the __init__ function of the Population + ### ignore self, storage_order and copied + init_params = inspect.signature(Population.__init__).parameters + self.pop_init_parameter_dict[pop.name] = { + param: getattr(pop, param) + for param in init_params + if param != "self" and param != "storage_order" and param != "copied" + } + + def _analyze_projections(self, model: CompNeuroModel): + """ + Get info of each projection + + Args: + model (CompNeuroModel): + Model to be analyzed + """ + sf.Logger().log( + "Analyzing projections to be able to reproduce them", + verbose=_model_configurator_verbose, + ) ### parameters of the __init__ function of the Projection class self.proj_init_parameter_dict: dict[str, dict] = {} ### parameters of the __init__ function of the Synapse class @@ -197,6 +541,9 @@ def _analyze_projections(self, model: CompNeuroModel): ### loop over all projections for proj_name in model.projections: + sf.Logger().log( + f"Analyzing projection {proj_name}", verbose=_model_configurator_verbose + ) proj: Projection = get_projection(proj_name) ### get the synapse model attributes (parameters/variables) self.synapse_model_attr_dict[proj.name] = proj.init @@ -342,807 +689,1279 @@ def _get_connector_parameters(self, proj: Projection): } -class CreateSingleNeuronNetworks: +class CreateReducedModel: """ - Class to create single neuron networks for normal and voltage clamp mode. + Class to create a reduced model from the original model. It is accessable via the + attribute model_reduced. Attributes: - single_net_dict (dict): - Nested dict containing the single neuron networks for normal and voltage - clamp mode - keys: mode (str) - normal or v_clamp - values: dict - keys: pop_name (str) - population name - values: dict - keys: net, population, monitor, init_sampler - values: Network, Population, Monitor, ArrSampler + model_reduced (CompNeuroModel): + Reduced model, created but not compiled """ def __init__( self, model: CompNeuroModel, analyze_model: AnalyzeModel, - do_not_config_list: list[str], - ): + reduced_size: int, + do_create: bool = False, + do_compile: bool = False, + verbose: bool = False, + ) -> None: """ + Prepare model for reduction. + Args: model (CompNeuroModel): - Model to be analyzed - analyze_model (AnalyzeModel): - Analyzed model - do_not_config_list (list[str]): - List of population names which should not be configured - """ - self._single_net_dict = {} - ### create the single neuron networks for normal and voltage clamp mode - for mode in ["normal", "v_clamp"]: - self._single_net_dict[mode] = {} - self._create_single_neuron_networks( - model=model, - analyze_model=analyze_model, - do_not_config_list=do_not_config_list, - mode=mode, - ) - - def single_net(self, pop_name: str, mode: str): - """ - Return the information of the single neuron network for the given population and - mode. - - Args: - pop_name (str): - Name of the population - mode (str): - Mode for which the single neuron network should be returned (normal or - v_clamp) - - Returns: - ReturnSingleNeuronNetworks: - Information of the single neuron network with Attributes: net, - population, monitor, init_sampler + Model to be reduced + reduced_size (int): + Size of the reduced populations """ - return self.ReturnSingleNeuronNetworks(self._single_net_dict[mode][pop_name]) - - class ReturnSingleNeuronNetworks: - def __init__(self, single_net_dict): - self.net: Network = single_net_dict["net"] - self.population: Population = single_net_dict["population"] - self.monitor: Monitor = single_net_dict["monitor"] - self.init_sampler: ArrSampler = single_net_dict["init_sampler"] + ### set the attributes + self._model = model + self._analyze_model = analyze_model + self._reduced_size = reduced_size + self._verbose = verbose + ### recreate model with reduced populations and projections + sf.Logger().log( + "Initializing CompNeuroModel with reduced populations and projections.", + verbose=self._verbose, + ) + self.model_reduced = CompNeuroModel( + model_creation_function=self.recreate_model, + name=f"{model.name}_reduced", + description=f"{model.description}\nWith reduced populations and projections.", + do_create=do_create, + do_compile=do_compile, + compile_folder_name=f"{model.compile_folder_name}_reduced", + ) - def init_sampler(self, model: CompNeuroModel, do_not_config_list: list[str]): + def set_weights(self, weight_dict: dict[str, float]): """ - Return the init samplers for all populations of the normal mode. All samplers - are returned in an object with a get method to get the sampler for a specific - population. + Set the weights of the reduced model. Args: - model (CompNeuroModel): - Model to be analyzed - do_not_config_list (list[str]): - List of population names which should not be configured - - Returns: - AllSampler: - Object with a get method to get the init sampler for a specific - population + weight_dict (dict[str, float]): + Dict with the weights for each projection of the original model """ - init_sampler_dict = {} - for pop_name in model.populations: - if pop_name in do_not_config_list: - continue - init_sampler_dict[pop_name] = self._single_net_dict["normal"][pop_name][ - "init_sampler" + ### return if the model was not created yet + if not self.model_reduced.created: + print("Warning: Model was not created yet. Cannot set weights.") + return + ### loop over all projections with a given weight + for proj_name, weight in weight_dict.items(): + ### get the name of the post population + post_pop_name = self._analyze_model.pre_post_pop_name_dict[proj_name][1] + ### get the target type of the projection + target_type = self._analyze_model.proj_init_parameter_dict[proj_name][ + "target" ] - return self.AllSampler(init_sampler_dict) - - class AllSampler: - def __init__(self, init_sampler_dict: dict[str, ArrSampler]): - self.init_sampler_dict = init_sampler_dict - - def get(self, pop_name: str): - """ - Get the init sampler for the given population. - - Args: - pop_name (str): - Name of the population - - Returns: - sampler (ArrSampler): - Init sampler for the given population - """ - sampler: ArrSampler = self.init_sampler_dict[pop_name] - return sampler + ### set the weight in the conductance-calculating input current population + setattr( + get_population(f"{post_pop_name}_{target_type}_aux"), + f"weights_{proj_name}", + weight, + ) - def _create_single_neuron_networks( - self, - model: CompNeuroModel, - analyze_model: AnalyzeModel, - do_not_config_list: list[str], - mode: str, - ): + def recreate_model(self): """ - Create the single neuron networks for the given mode. Sets the single_net_dict. - - Args: - model (CompNeuroModel): - Model to be analyzed - analyze_model (AnalyzeModel): - Analyzed model - do_not_config_list (list[str]): - List of population names which should not be configured - mode (str): - Mode for which the single neuron networks should be created + Recreates the model with reduced populations and projections. """ + ### 1st for each population create a reduced population + for pop_name in self._model.populations: + self.create_reduced_pop(pop_name) + ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population + for pop_name in self._model.populations: + self.create_spike_collecting_aux_pop(pop_name) + ## 3rd for each population which has afferents create a population for incoming spikes for each target type + for pop_name in self._model.populations: + self.create_conductance_aux_pop(pop_name, target="ampa") + self.create_conductance_aux_pop(pop_name, target="gaba") - ### loop over populations which should be configured - for pop_name in model.populations: - ### skip populations which should not be configured - if pop_name in do_not_config_list: - continue - ### store the dict containing the network etc - self._single_net_dict[mode][pop_name] = self._create_net_single( - pop_name=pop_name, analyze_model=analyze_model, mode=mode - ) - - def _create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: str): + def create_reduced_pop(self, pop_name: str): """ - Creates a network with the neuron type of the population given by pop_name for - the given mode. The population size is set to 1. + Create a reduced population from the given population. Args: pop_name (str): - Name of the population - analyze_model (AnalyzeModel): - Analyzed model - mode (str): - Mode for which the network should be created - - Returns: - net_single_dict (dict): - Dict containing the Network, Population, Monitor and ArrSampler objects + Name of the population to be reduced """ - ### create the adjusted neuron model for the stop condition - neuron_model_new = self._get_single_neuron_neuron_model( - pop_name=pop_name, analyze_model=analyze_model, mode=mode - ) - - ### create the single neuron population - pop_single_neuron = self._get_single_neuron_population( - pop_name=pop_name, - neuron_model_new=neuron_model_new, - analyze_model=analyze_model, - mode=mode, + sf.Logger().log( + f"Create reduced population for {pop_name}.", verbose=self._verbose ) + ### 1st check how the population is connected + _, is_postsynaptic, ampa, gaba = self.how_pop_is_connected(pop_name) - ### create Monitor for single neuron - if mode == "normal": - mon_single = Monitor(pop_single_neuron, ["spike", "v"]) - elif mode == "v_clamp": - mon_single = Monitor(pop_single_neuron, ["v_clamp_rec_sign"]) - - ### create network with single neuron and compile it - net_single = Network() - net_single.add([pop_single_neuron, mon_single]) - mf.compile_in_folder( - folder_name=f"single_net_{mode}_{pop_name}", silent=True, net=net_single + ### 2nd recreate neuron model + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = ( + self._analyze_model.neuron_model_init_parameter_dict[pop_name].copy() ) + ### if the population is a postsynaptic population adjust the synaptic + ### conductance equations + if is_postsynaptic: + neuron_model_init_parameter_dict = self.adjust_neuron_model( + neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba + ) + ### create the new neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) - ### network dict - net_single_dict = { - "net": net_single, - "population": net_single.get(pop_single_neuron), - "monitor": net_single.get(mon_single), - "init_sampler": None, - } - - ### for v_clamp we are done here - if mode == "v_clamp": - return net_single_dict - - ### for normal neuron get the init sampler for the variables of the neuron model - ### (to initialize a population of the neuron model) - init_sampler = self._get_neuron_model_init_sampler( - net=net_single, pop=net_single.get(pop_single_neuron) + ### 3rd recreate the population + ### get the stored parameters of the __init__ function of the Population + pop_init_parameter_dict = self._analyze_model.pop_init_parameter_dict[ + pop_name + ].copy() + ### replace the neuron model with the new neuron model + pop_init_parameter_dict["neuron"] = neuron_model_new + ### replace the size with the reduced size (if reduced size is smaller than the + ### original size) + ### TODO add model requirements somewhere, here requirements = geometry = int + pop_init_parameter_dict["geometry"] = min( + [pop_init_parameter_dict["geometry"][0], self._reduced_size] ) - net_single_dict["init_sampler"] = init_sampler + ### append _reduce to the name + pop_init_parameter_dict["name"] = f"{pop_name}_reduced" + ### create the new population + pop_new = Population(**pop_init_parameter_dict) - return net_single_dict + ### 4th set the parameters and variables of the population's neurons + ### get the stored parameters and variables + neuron_model_attr_dict = self._analyze_model.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_new, attr_name, attr_val) - def _get_single_neuron_neuron_model( - self, pop_name: str, analyze_model: AnalyzeModel, mode=str - ): + def create_spike_collecting_aux_pop(self, pop_name: str): """ - Create the adjusted neuron model for the given mode. + Create a spike collecting population for the given population. Args: pop_name (str): - Name of the population - analyze_model (AnalyzeModel): - Analyzed model - mode (str): - Mode for which the neuron model should be created - - Returns: - neuron_model_new (Neuron): - Adjusted neuron model + Name of the population for which the spike collecting population should be created """ - ### get the stored parameters of the __init__ function of the Neuron - neuron_model_init_parameter_dict = ( - analyze_model.neuron_model_init_parameter_dict[pop_name].copy() - ) - ### Define the attributes of the neuron model as sympy symbols - neuron_model_attributes_name_list = list( - analyze_model.neuron_model_attr_dict[pop_name].keys() + ### get all efferent projections + efferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[0] == pop_name + ] + ### check if pop has efferent projections + if len(efferent_projection_list) == 0: + return + sf.Logger().log( + f"Create spike collecting aux population for {pop_name}.", + verbose=self._verbose, ) - ### add v_before_psp and v_psp_thresh to equations/parameters, for the stop - ### condition below - self._adjust_neuron_model( - neuron_model_init_parameter_dict, - neuron_model_attributes_name_list, - mode=mode, + ### create the spike collecting population + pop_aux = Population( + 1, + neuron=self.SpikeProbCalcNeuron( + reduced_size=get_population(f"{pop_name}_reduced").geometry[0], + ), + name=f"{pop_name}_spike_collecting_aux", ) - ### create the adjusted neuron model - neuron_model_new = Neuron(**neuron_model_init_parameter_dict) - return neuron_model_new + ### create the projection from reduced pop to spike collecting aux pop + sf.Logger().log( + f"Create projection from {pop_name}_reduced to {pop_name}_spike_collecting_aux to collect spikes.", + verbose=self._verbose, + ) + proj = Projection( + pre=get_population(f"{pop_name}_reduced"), + post=pop_aux, + target="ampa", + name=f"proj_{pop_name}_spike_collecting_aux", + ) + proj.connect_all_to_all(weights=1) - def _get_single_neuron_population( - self, - pop_name: str, - neuron_model_new: Neuron, - analyze_model: AnalyzeModel, - mode: str, - ): + def create_conductance_aux_pop(self, pop_name: str, target: str): """ - Create the single neuron population for the given mode. + Create a conductance calculating population for the given population and target. Args: pop_name (str): - Name of the population - neuron_model_new (Neuron): - Adjusted neuron model - analyze_model (AnalyzeModel): - Analyzed model - mode (str): - Mode for which the population should be created - - Returns: - pop_single_neuron (Population): - Single neuron population + Name of the population for which the conductance calculating population should be created + target (str): + Target type of the conductance calculating population """ - if mode == "normal": - pop_single_neuron = Population( - 1, - neuron=neuron_model_new, - name=f"single_neuron_{pop_name}", - stop_condition="((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", + ### get all afferent projections + afferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[1] == pop_name + ] + ### check if pop has afferent projections + if len(afferent_projection_list) == 0: + return + ### get all afferent projections with target type + afferent_target_projection_list = [ + proj_name + for proj_name in afferent_projection_list + if self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == target + ] + ### check if there are afferent projections with target type + if len(afferent_target_projection_list) == 0: + return + sf.Logger().log( + f"Create conductance calculating aux population for {pop_name} target {target}.", + verbose=self._verbose, + ) + ### get projection informations + ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) + projection_dict = { + proj_name: { + "pre_size": self._analyze_model.pop_init_parameter_dict[ + self._analyze_model.pre_post_pop_name_dict[proj_name][0] + ]["geometry"][0], + "connection_prob": self._analyze_model.connector_function_parameter_dict[ + proj_name + ][ + "probability" + ], + "weights": self._analyze_model.connector_function_parameter_dict[ + proj_name + ]["weights"], + "pre_name": self._analyze_model.pre_post_pop_name_dict[proj_name][0], + } + for proj_name in afferent_target_projection_list + } + ### create the conductance calculating population + pop_aux = Population( + get_population(f"{pop_name}_reduced").geometry[0], + neuron=self.InputCalcNeuron(projection_dict=projection_dict), + name=f"{pop_name}_{target}_aux", + ) + ### set number of synapses parameter for each projection + for proj_name, vals in projection_dict.items(): + number_synapses = Binomial( + n=vals["pre_size"], p=vals["connection_prob"] + ).get_values(get_population(f"{pop_name}_reduced").geometry[0]) + setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) + ### create the "current injection" projection from conductance calculating + ### population to the reduced post population + sf.Logger().log( + f"Create projection from {pop_name}_{target}_aux to {pop_name}_reduced to forward the calculated conductance to the post population ({pop_name}_reduced)", + verbose=self._verbose, + ) + proj = CurrentInjection( + pre=pop_aux, + post=get_population(f"{pop_name}_reduced"), + target=f"incomingaux{target}", + name=f"proj_{pop_name}_{target}_aux", + ) + proj.connect_current() + ### create projection from spike_prob calculating aux neurons of presynaptic + ### populations to conductance calculating aux population + sf.Logger().log( + f"Create projection from spike collecting, i.e. spike probability calculating, aux populations of presynaptic populations of {pop_name}_reduced to the conductance calculating population '{pop_name}_{target}_aux'.", + verbose=self._verbose, + ) + for proj_name, vals in projection_dict.items(): + pre_pop_name = vals["pre_name"] + pre_pop_spike_collecting_aux = get_population( + f"{pre_pop_name}_spike_collecting_aux" ) - elif mode == "v_clamp": - ### create the single neuron population - pop_single_neuron = Population( - 1, - neuron=neuron_model_new, - name=f"single_neuron_v_clamp_{pop_name}", + proj = Projection( + pre=pre_pop_spike_collecting_aux, + post=pop_aux, + target=f"spikeprob_{pre_pop_name}", + name=f"{proj_name}_spike_collecting_to_conductance", ) + proj.connect_all_to_all(weights=1) - ### get the stored parameters and variables - neuron_model_attr_dict = analyze_model.neuron_model_attr_dict[pop_name] - ### set the parameters and variables - for attr_name, attr_val in neuron_model_attr_dict.items(): - setattr(pop_single_neuron, attr_name, attr_val) - return pop_single_neuron - - def _get_neuron_model_init_sampler(self, net: Network, pop: Population): + def how_pop_is_connected(self, pop_name): """ - Create a sampler for the initial values of the variables of the neuron model by - simulating the neuron for 10000 ms and afterwards simulating 2000 ms and - sampling the variables every dt. + Check how a population is connected. If the population is a postsynaptic and/or + presynaptic population, check if it gets ampa and/or gaba input. Args: - net (Network): - Network with the single neuron population - pop (Population): - Single neuron population + pop_name (str): + Name of the population to check Returns: - sampler (ArrSampler): - Sampler for the initial values of the variables of the neuron model + is_presynaptic (bool): + True if the population is a presynaptic population, False otherwise + is_postsynaptic (bool): + True if the population is a postsynaptic population, False otherwise + ampa (bool): + True if the population gets ampa input, False otherwise + gaba (bool): + True if the population gets gaba input, False otherwise """ + is_presynaptic = False + is_postsynaptic = False + ampa = False + gaba = False + ### loop over all projections + for proj_name in self._model.projections: + ### check if the population is a presynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][0] == pop_name: + is_presynaptic = True + ### check if the population is a postsynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name: + is_postsynaptic = True + ### check if the projection target is ampa or gaba + if ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "ampa" + ): + ampa = True + elif ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "gaba" + ): + gaba = True - ### reset network and deactivate input - net.reset() - pop.I_app = 0 - - ### 10000 ms init duration - net.simulate(10000) - - ### simulate 2000 ms and check every dt the variables of the neuron - time_steps = int(2000 / dt()) - var_name_list = list(pop.variables) - var_arr = np.zeros((time_steps, len(var_name_list))) - for time_idx in range(time_steps): - net.simulate(dt()) - get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) - var_arr[time_idx, :] = get_arr[:, 0] - - ### reset network after simulation - net.reset() - - ### create a sampler with the data samples from the21000 ms simulation - sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) - return sampler + return is_presynaptic, is_postsynaptic, ampa, gaba - def _adjust_neuron_model( - self, - neuron_model_init_parameter_dict: dict, - neuron_model_attributes_name_list: list[str], - mode: str, + def adjust_neuron_model( + self, neuron_model_init_parameter_dict, ampa=True, gaba=True ): """ - Adjust the parameters and equations of the neuron model for the given mode. + Add the new synaptic input coming from the auxillary population to the neuron + model. Args: neuron_model_init_parameter_dict (dict): - Dict with the parameters and equations of the neuron model - neuron_model_attributes_name_list (list[str]): - List of the names of the attributes of the neuron model - mode (str): - Mode for which the neuron model should be adjusted + Dictionary with the parameters of the __init__ function of the Neuron + ampa (bool): + True if the population gets ampa input and therefore the ampa conductance + needs to be adjusted, False otherwise + gaba (bool): + True if the population gets gaba input and therefore the gaba conductance + needs to be adjusted, False otherwise + + Returns: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + with DBS mechanisms added """ + ### 1st adjust the conductance equations ### get the equations of the neuron model as a list of strings equations_line_split_list = str( neuron_model_init_parameter_dict["equations"] ).splitlines() - ### get the parameters of the neuron model as a list of strings - parameters_line_split_list = str( - neuron_model_init_parameter_dict["parameters"] - ).splitlines() - - if mode == "normal": - ### add v_before_psp=v at the beginning of the equations - equations_line_split_list.insert(0, "v_before_psp = v") - ### add v_psp_thresh to the parameters - parameters_line_split_list.append("v_psp_thresh = 0 : population") - elif mode == "v_clamp": - ### get new equations for voltage clamp - equations_new_list = CreateVoltageClampEquations( - equations_line_split_list, neuron_model_attributes_name_list - ).eq_new - neuron_model_init_parameter_dict["equations"] = equations_new_list - ### add v_clamp_rec_thresh to the parameters - parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") - - ### join equations and parameters to a string and store them in the dict + ### search for equation with dg_ampa/dt and dg_gaba/dt + for line_idx, line in enumerate(equations_line_split_list): + if ( + self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") + and ampa + ): + ### add " + tau_ampa*g_incomingauxampa/dt" + ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa + equations_line_split_list[line_idx] = ( + "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" + ) + if ( + self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") + and gaba + ): + ### add " + tau_gaba*g_incomingauxgaba/dt" + ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba + equations_line_split_list[line_idx] = ( + "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" + ) + ### join list to a string neuron_model_init_parameter_dict["equations"] = "\n".join( equations_line_split_list ) - neuron_model_init_parameter_dict["parameters"] = "\n".join( - parameters_line_split_list + + ### 2nd extend description + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." ) + return neuron_model_init_parameter_dict -class PreparePSP: + def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): + """ + Check if a equation string has the form "tau*dvar/dt = -var". + + Args: + line (str): + Equation string + var_name (str): + Name of the variable + tau_name (str): + Name of the time constant + + Returns: + is_solution_correct (bool): + True if the equation is as expected, False otherwise + """ + if var_name not in line: + return False + + # Define the variables + var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") + + # Given equation as a string + equation_str = line + + # Parse the equation string + lhs, rhs = equation_str.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs) + + # Form the equation + equation = sp.Eq(lhs, rhs) + + # Solve the equation for var + try: + solution = sp.solve(equation, var) + except: + ### equation is not solvable with variables means it is not as expected + return False + + # Given solution to compare + expected_solution_str = f"-{tau_name}*d{var_name}/dt" + expected_solution = sp.sympify(expected_solution_str) + + # Check if the solution is as expected + is_solution_correct = solution[0] == expected_solution + + return is_solution_correct + + class SpikeProbCalcNeuron(Neuron): + """ + Neuron model to calculate the spike probabilities of the presynaptic neurons. + """ + + def __init__(self, reduced_size=1): + """ + Args: + reduced_size (int): + Reduced size of the associated presynaptic population + """ + parameters = f""" + reduced_size = {reduced_size} : population + tau= 1.0 : population + """ + equations = """ + tau*dr/dt = g_ampa/reduced_size - r + g_ampa = 0 + """ + super().__init__(parameters=parameters, equations=equations) + + class InputCalcNeuron(Neuron): + """ + This neurons gets the spike probabilities of the pre neurons and calculates the + incoming spikes for each projection. It accumulates the incoming spikes of all + projections (of the same target type) and calculates the conductance increase + for the post neuron. + """ + + def __init__(self, projection_dict): + """ + Args: + projection_dict (dict): + keys: names of afferent projections (of the same target type) + values: dict with keys "weights", "pre_name" + """ + + ### create parameters + parameters = [ + f""" + number_synapses_{proj_name} = 0 + weights_{proj_name} = {vals['weights']} + """ + for proj_name, vals in projection_dict.items() + ] + parameters = "\n".join(parameters) + + ### create equations + equations = [ + f""" + incoming_spikes_{proj_name} = round(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']})))) : min=0, max=number_synapses_{proj_name} + """ + for proj_name, vals in projection_dict.items() + ] + equations = "\n".join(equations) + sum_of_conductance_increase = ( + "r = " + + "".join( + [ + f"incoming_spikes_{proj_name} * weights_{proj_name} + " + for proj_name in projection_dict.keys() + ] + )[:-3] + ) + equations = equations + "\n" + sum_of_conductance_increase + + super().__init__(parameters=parameters, equations=equations) + + +class CreateSingleNeuronNetworks: """ - Find v_rest, corresponding I_hold (in case of self-active neurons) and an - init_sampler to initialize the neuron model for the PSP calculation for each - population. + Class to create single neuron networks for normal and voltage clamp mode. + + Attributes: + single_net_dict (dict): + Nested dict containing the single neuron networks for normal and voltage + clamp mode + keys: mode (str) + normal or v_clamp + values: dict + keys: pop_name (str) + population name + values: dict + keys: net, population, monitor, init_sampler + values: Network, Population, Monitor, ArrSampler """ def __init__( self, model: CompNeuroModel, - single_nets: CreateSingleNeuronNetworks, + analyze_model: AnalyzeModel, do_not_config_list: list[str], - simulator: "Simulator", - do_plot: bool, - figure_folder: str, ): """ Args: model (CompNeuroModel): - Model to be prepared + Model to be analyzed + analyze_model (AnalyzeModel): + Analyzed model do_not_config_list (list[str]): - List of populations which should not be configured - do_plot (bool): - If True, plot the membrane potential + List of population names which should not be configured """ - self._single_nets = single_nets - self._prepare_psp_dict = {} - self._simulator = simulator - self._figure_folder = figure_folder - ### loop over all populations - for pop_name in model.populations: - ### skip populations which should not be configured - if pop_name in do_not_config_list: - continue - ### find initial v_rest using the voltage clamp network - sf.Logger().log( - f"[{pop_name}]: search v_rest with y(X) = delta_v_2000(v=X) using grid search" - ) - v_rest, delta_v_v_rest, variables_v_rest = self._find_v_rest_initial( - pop_name=pop_name, - do_plot=do_plot, - ) - sf.Logger().log( - f"[{pop_name}]: found v_rest={v_rest} with delta_v_2000(v=v_rest)={delta_v_v_rest}" - ) - ### check if v is constant after setting v to v_rest by simulating the normal - ### single neuron network for 2000 ms - v_rest_is_constant, v_rest_arr = self._get_v_rest_is_const( - pop_name=pop_name, - variables_v_rest=variables_v_rest, - do_plot=do_plot, - ) - - if v_rest_is_constant: - ### v_rest found (last v value of the previous simulation), no - ### I_app_hold needed - v_rest = v_rest_arr[-1] - I_app_hold = 0 - else: - ### there is no resting_state i.e. neuron is self-active --> find - ### smallest negative I_app to silence neuron - sf.Logger().log( - f"[{pop_name}]: neuron seems to be self-active --> find smallest I_app to silence the neuron" - ) - v_rest, I_app_hold = self._find_I_app_hold( - pop_name=pop_name, - variables_v_rest=variables_v_rest, - ) - sf.Logger().log( - f"[{pop_name}]: final values: I_app_hold = {I_app_hold}, v_rest = {v_rest}" - ) - - ### get the sampler for the initial variables - psp_init_sampler = self._get_init_neuron_variables_for_psp( - pop_name=pop_name, - v_rest=v_rest, - I_app_hold=I_app_hold, + sf.Logger().log( + "Creating single neuron networks for normal and voltage clamp mode.", + verbose=_model_configurator_verbose, + ) + self._single_net_dict = {} + ### create the single neuron networks for normal and voltage clamp mode + for mode in ["normal", "v_clamp"]: + self._single_net_dict[mode] = {} + self._create_single_neuron_networks( + model=model, + analyze_model=analyze_model, + do_not_config_list=do_not_config_list, + mode=mode, ) - ### store the prepare PSP information - self._prepare_psp_dict[pop_name] = {} - self._prepare_psp_dict[pop_name]["v_rest"] = v_rest - self._prepare_psp_dict[pop_name]["I_app_hold"] = I_app_hold - self._prepare_psp_dict[pop_name]["psp_init_sampler"] = psp_init_sampler - def get(self, pop_name: str): + def single_net(self, pop_name: str, mode: str): """ - Return the prepare PSP information for the given population. + Return the information of the single neuron network for the given population and + mode. Args: pop_name (str): Name of the population - - Returns: - ReturnPreparePSP: - Prepare PSP information for the given population with Attributes: v_rest, - I_app_hold, psp_init_sampler + mode (str): + Mode for which the single neuron network should be returned (normal or + v_clamp) + + Returns: + ReturnSingleNeuronNetworks: + Information of the single neuron network with Attributes: net, + population, monitor, init_sampler """ - return self.ReturnPreparePSP( - v_rest=self._prepare_psp_dict[pop_name]["v_rest"], - I_app_hold=self._prepare_psp_dict[pop_name]["I_app_hold"], - psp_init_sampler=self._prepare_psp_dict[pop_name]["psp_init_sampler"], - ) + return self.ReturnSingleNeuronNetworks(self._single_net_dict[mode][pop_name]) - class ReturnPreparePSP: - def __init__( - self, v_rest: float, I_app_hold: float, psp_init_sampler: ArrSampler - ): - self.v_rest = v_rest - self.I_app_hold = I_app_hold - self.psp_init_sampler = psp_init_sampler + class ReturnSingleNeuronNetworks: + def __init__(self, single_net_dict): + self.net: Network = single_net_dict["net"] + self.population: Population = single_net_dict["population"] + self.monitor: Monitor = single_net_dict["monitor"] + self.init_sampler: ArrSampler = single_net_dict["init_sampler"] - def _get_init_neuron_variables_for_psp( - self, pop_name: str, v_rest: float, I_app_hold: float - ): + def init_sampler(self, model: CompNeuroModel, do_not_config_list: list[str]): """ - Get the initial variables of the neuron model for the PSP calculation. + Return the init samplers for all populations of the normal mode. All samplers + are returned in an object with a get method to get the sampler for a specific + population. Args: - pop_name (str): - Name of the population - v_rest (float): - Resting membrane potential - I_app_hold (float): - Current which silences the neuron + model (CompNeuroModel): + Model to be analyzed + do_not_config_list (list[str]): + List of population names which should not be configured Returns: - sampler (ArrSampler): - Sampler with the initial variables of the neuron model + AllSampler: + Object with a get method to get the init sampler for a specific + population """ - ### get the names of the variables of the neuron model - var_name_list = self._single_nets.single_net( - pop_name=pop_name, mode="normal" - ).population.variables - ### get the variables of the neuron model after 5000 ms - var_arr = self._simulator.get_v_psp( - v_rest=v_rest, I_app_hold=I_app_hold, pop_name=pop_name - ) - ### create a sampler with this single data sample - sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) - return sampler + init_sampler_dict = {} + for pop_name in model.populations: + if pop_name in do_not_config_list: + continue + init_sampler_dict[pop_name] = self._single_net_dict["normal"][pop_name][ + "init_sampler" + ] + return self.AllSampler(init_sampler_dict) - def _find_I_app_hold( + class AllSampler: + def __init__(self, init_sampler_dict: dict[str, "ArrSampler"]): + self.init_sampler_dict = init_sampler_dict + + def get(self, pop_name: str): + """ + Get the init sampler for the given population. + + Args: + pop_name (str): + Name of the population + + Returns: + sampler (ArrSampler): + Init sampler for the given population + """ + sampler: ArrSampler = self.init_sampler_dict[pop_name] + return sampler + + def _create_single_neuron_networks( self, - pop_name: str, - variables_v_rest: dict, + model: CompNeuroModel, + analyze_model: AnalyzeModel, + do_not_config_list: list[str], + mode: str, ): """ - Find the current which silences the neuron. + Create the single neuron networks for the given mode. Sets the single_net_dict. + + Args: + model (CompNeuroModel): + Model to be analyzed + analyze_model (AnalyzeModel): + Analyzed model + do_not_config_list (list[str]): + List of population names which should not be configured + mode (str): + Mode for which the single neuron networks should be created + """ + sf.Logger().log( + f"Creating single neuron networks for {mode} mode.", + verbose=_model_configurator_verbose, + ) + + ### loop over populations which should be configured + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + ### store the dict containing the network etc + self._single_net_dict[mode][pop_name] = self._create_net_single( + pop_name=pop_name, analyze_model=analyze_model, mode=mode + ) + + def _create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: str): + """ + Creates a network with the neuron type of the population given by pop_name for + the given mode. The population size is set to 1. Args: pop_name (str): Name of the population - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the network should be created Returns: - v_rest (float): - Resting membrane potential - I_app_hold (float): - Current which silences the neuron + net_single_dict (dict): + Dict containing the Network, Population, Monitor and ArrSampler objects """ - ### find I_app_hold with find_x_bound sf.Logger().log( - f"[{pop_name}]: search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)" + f"[{pop_name}]: Create single neuron network in {mode} mode.", + verbose=_model_configurator_verbose, + ) + ### create the adjusted neuron model for the stop condition + sf.Logger().log( + f"[{pop_name}]: Create adjusted neuron model.", + verbose=_model_configurator_verbose, + ) + neuron_model_new = self._get_single_neuron_neuron_model( + pop_name=pop_name, analyze_model=analyze_model, mode=mode ) - I_app_hold = -ef.find_x_bound( - ### negative current initially reduces v then v climbs back up --> - ### get_v_change_after_v_rest checks how much v changes during second half of - ### 2000 ms simulation - y=lambda X_val: -self._get_v_change_after_v_rest( - pop_name=pop_name, - variables_v_rest=variables_v_rest, - ### find_x_bound only uses positive values for X and - ### increases them, expecting to increase y, therefore use -X for I_app - ### (increasing X will "increase" negative current) and negative sign for - ### the returned value (for no current input the change is positive, this - ### should decrease to zero, with negative sign: for no current input the - ### change is negative, this should increase above zero) - I_app=-X_val, - ), - ### y is initially negative and should increase above 0, therefore search for - ### y_bound=0 with bound_type="greater" - x0=0, - y_bound=0, - tolerance=0.01, - bound_type="greater", + ### create the single neuron population + sf.Logger().log( + f"[{pop_name}]: Create single neuron population.", + verbose=_model_configurator_verbose, ) - ### again simulate the neuron with the obtained I_app_hold to get the new v_rest - v_rest_arr = self._simulator.get_v_2000( + pop_single_neuron = self._get_single_neuron_population( pop_name=pop_name, - initial_variables=variables_v_rest, - I_app=I_app_hold, - do_plot=False, + neuron_model_new=neuron_model_new, + analyze_model=analyze_model, + mode=mode, ) - v_rest = v_rest_arr[-1] - return v_rest, I_app_hold - def _find_v_rest_initial( - self, - pop_name: str, - do_plot: bool, + ### create Monitor for single neuron + sf.Logger().log( + f"[{pop_name}]: Create Monitor for single neuron population.", + verbose=_model_configurator_verbose, + ) + if mode == "normal": + mon_single = Monitor(pop_single_neuron, ["spike", "v"]) + elif mode == "v_clamp": + mon_single = Monitor(pop_single_neuron, ["v_clamp_rec_sign"]) + + ### create network with single neuron and compile it + sf.Logger().log( + f"[{pop_name}]: Create Network with single neuron population and its monitor and compile it.", + verbose=_model_configurator_verbose, + ) + net_single = Network() + net_single.add([pop_single_neuron, mon_single]) + mf.compile_in_folder( + folder_name=f"single_net_{mode}_{pop_name}", silent=True, net=net_single + ) + + ### network dict + net_single_dict = { + "net": net_single, + "population": net_single.get(pop_single_neuron), + "monitor": net_single.get(mon_single), + "init_sampler": None, + } + + ### for v_clamp we are done here + if mode == "v_clamp": + return net_single_dict + + ### for normal neuron get the init sampler for the variables of the neuron model + ### (to initialize a population of the neuron model) + sf.Logger().log( + f"[{pop_name}]: Create init sampler for single neuron population.", + verbose=_model_configurator_verbose, + ) + init_sampler = self._get_neuron_model_init_sampler( + net=net_single, pop=net_single.get(pop_single_neuron) + ) + net_single_dict["init_sampler"] = init_sampler + + return net_single_dict + + def _get_single_neuron_neuron_model( + self, pop_name: str, analyze_model: AnalyzeModel, mode=str ): """ - Find the initial v_rest with the voltage clamp single neuron network for the - given population. Furthermore, get the change of v durign setting v_rest and the - stady state variables of the neuron (at the end of the simulation). + Create the adjusted neuron model for the given mode. Args: pop_name (str): Name of the population - do_plot (bool): - True if plots should be created, False otherwise + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the neuron model should be created Returns: - v_rest (float): - Resting membrane potential - detla_v_v_rest (float): - Change of the membrane potential during setting v_rest as membrane - potential - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential + neuron_model_new (Neuron): + Adjusted neuron model """ - ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can - ### only be >= 0) - v_arr = np.linspace(-90, -20, 200) - v_clamp_arr = np.array( - [ - self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_val) - for v_val in v_arr - ] + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = ( + analyze_model.neuron_model_init_parameter_dict[pop_name].copy() ) - v_clamp_min_idx = argrelmin(v_clamp_arr)[0] - v_rest = np.min(v_arr[v_clamp_min_idx]) - if do_plot: - plt.figure() - plt.plot(v_arr, v_clamp_arr) - plt.axvline(v_rest, color="k") - plt.axhline(0, color="k", ls="dashed") - plt.savefig(f"{self._figure_folder}/v_clamp_{pop_name}.png") - plt.close("all") - - ### do again the simulation only with the obtained v_rest to get the detla_v for - ### v_rest - detla_v_v_rest = ( - self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() + ### Define the attributes of the neuron model as sympy symbols + neuron_model_attributes_name_list = list( + analyze_model.neuron_model_attr_dict[pop_name].keys() ) - population = self._single_nets.single_net( - pop_name=pop_name, mode="v_clamp" - ).population - ### and the stady state variables of the neuron - variables_v_rest = { - var_name: getattr(population, var_name) for var_name in population.variables - } - return v_rest, detla_v_v_rest, variables_v_rest + ### add v_before_psp and v_psp_thresh to equations/parameters, for the stop + ### condition below + self._adjust_neuron_model( + neuron_model_init_parameter_dict, + neuron_model_attributes_name_list, + mode=mode, + ) + ### create the adjusted neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + return neuron_model_new - def _get_v_rest_is_const(self, pop_name: str, variables_v_rest: dict, do_plot=bool): + def _get_single_neuron_population( + self, + pop_name: str, + neuron_model_new: Neuron, + analyze_model: AnalyzeModel, + mode: str, + ): """ - Check if the membrane potential is constant after setting it to v_rest. + Create the single neuron population for the given mode. Args: pop_name (str): Name of the population - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential, used as initial variables for the simulation - do_plot (bool): - True if plots should be created, False otherwise + neuron_model_new (Neuron): + Adjusted neuron model + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the population should be created Returns: - v_rest_is_constant (bool): - True if the membrane potential is constant, False otherwise - v_rest_arr (np.array): - Membrane potential for the 2000 ms simulation with shape: (time_steps,) + pop_single_neuron (Population): + Single neuron population """ - ### check if the neuron stays at v_rest with normal neuron - v_rest_arr = self._simulator.get_v_2000( - pop_name=pop_name, - initial_variables=variables_v_rest, - I_app=0, - do_plot=do_plot, - ) - v_rest_arr_is_const = ( - np.std(v_rest_arr) <= np.mean(np.absolute(v_rest_arr)) / 1000 - ) - return v_rest_arr_is_const, v_rest_arr + if mode == "normal": + pop_single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_{pop_name}", + stop_condition="((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", + ) + elif mode == "v_clamp": + ### create the single neuron population + pop_single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_v_clamp_{pop_name}", + ) - def _get_v_change_after_v_rest( - self, pop_name: str, variables_v_rest: dict, I_app: float - ): + ### get the stored parameters and variables + neuron_model_attr_dict = analyze_model.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_single_neuron, attr_name, attr_val) + return pop_single_neuron + + def _get_neuron_model_init_sampler(self, net: Network, pop: Population): """ - Check how much the membrane potential changes after setting it to v_rest. + Create a sampler for the initial values of the variables of the neuron model by + simulating the neuron for 10000 ms and afterwards simulating 2000 ms and + sampling the variables every dt. Args: - pop_name (str): - Name of the population - variables_v_rest (dict): - Stady state variables of the neuron during setting v_rest as membrane - potential, used as initial variables for the simulation - do_plot (bool): - True if plots should be created, False otherwise + net (Network): + Network with the single neuron population + pop (Population): + Single neuron population Returns: - change_after_v_rest (np.array): - Change of the membrane potential after setting it to v_rest + sampler (ArrSampler): + Sampler for the initial values of the variables of the neuron model """ - ### simulate 2000 ms after setting v_rest - v_rest_arr = self._simulator.get_v_2000( - pop_name=pop_name, - initial_variables=variables_v_rest, - I_app=I_app, - do_plot=False, - ) - ### check how much v changes during the second half - ### std(v) - mean(v)/1000 should be close to 0, the larger the value the more v - ### changes - change_after_v_rest = ( - np.std(v_rest_arr[len(v_rest_arr) // 2 :], axis=0) - - np.mean(np.absolute(v_rest_arr[len(v_rest_arr) // 2 :]), axis=0) / 1000 - ) - return change_after_v_rest + ### reset network and deactivate input + net.reset() + pop.I_app = 0 -class Simulator: - """ - Class with simulations for the single neuron networks. - """ + ### 10000 ms init duration + net.simulate(10000) - def __init__( + ### simulate 2000 ms and check every dt the variables of the neuron + time_steps = int(2000 / dt()) + var_name_list = list(pop.variables) + var_arr = np.zeros((time_steps, len(var_name_list))) + for time_idx in range(time_steps): + net.simulate(dt()) + get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) + var_arr[time_idx, :] = get_arr[:, 0] + + ### reset network after simulation + net.reset() + + ### create a sampler with the data samples from the21000 ms simulation + sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) + return sampler + + def _adjust_neuron_model( self, - single_nets: CreateSingleNeuronNetworks, - figure_folder: str, - prepare_psp: PreparePSP | None = None, + neuron_model_init_parameter_dict: dict, + neuron_model_attributes_name_list: list[str], + mode: str, ): """ + Adjust the parameters and equations of the neuron model for the given mode. + Args: - single_nets (CreateSingleNeuronNetworks): - Single neuron networks for normal and voltage clamp mode - figure_folder (str): - Folder where the figures should be saved - prepare_psp (PreparePSP): - Prepare PSP information + neuron_model_init_parameter_dict (dict): + Dict with the parameters and equations of the neuron model + neuron_model_attributes_name_list (list[str]): + List of the names of the attributes of the neuron model + mode (str): + Mode for which the neuron model should be adjusted """ - self._single_nets = single_nets - self._prepare_psp = prepare_psp - self._figure_folder = figure_folder + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### get the parameters of the neuron model as a list of strings + parameters_line_split_list = str( + neuron_model_init_parameter_dict["parameters"] + ).splitlines() - def get_v_clamp_2000( - self, - pop_name: str, - v: float | None = None, - I_app: float | None = None, - ) -> float: - """ - Simulates the v_clamp single neuron network of the given pop_name for 2000 ms - and returns the v_clamp_rec value of the single neuron after 2000 ms. The - returned values is "dv/dt". Therefore, to get the hypothetical change of v for a - single time step multiply it with dt! + if mode == "normal": + ### add v_before_psp=v at the beginning of the equations + equations_line_split_list.insert(0, "v_before_psp = v") + ### add v_psp_thresh to the parameters + parameters_line_split_list.append("v_psp_thresh = 0 : population") + elif mode == "v_clamp": + ### get new equations for voltage clamp + equations_new_list = self.CreateVoltageClampEquations( + equations_line_split_list, neuron_model_attributes_name_list + ).eq_new + neuron_model_init_parameter_dict["equations"] = equations_new_list + ### add v_clamp_rec_thresh to the parameters + parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") - Args: - pop_name (str): - Name of the population - v (float): - Membrane potential (does not change over time due to voltage clamp) - I_app (float): - Applied current + ### join equations and parameters to a string and store them in the dict + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) + neuron_model_init_parameter_dict["parameters"] = "\n".join( + parameters_line_split_list + ) - Returns: - v_clamp_rec (float): - v_clamp_rec value of the single neuron after 2000 ms + class CreateVoltageClampEquations: """ - ### get the network, population, init_sampler - net = self._single_nets.single_net(pop_name=pop_name, mode="v_clamp").net - population = self._single_nets.single_net( - pop_name=pop_name, mode="v_clamp" - ).population - init_sampler = self._single_nets.single_net( - pop_name=pop_name, mode="v_clamp" - ).init_sampler - ### reset network - net.reset() - net.set_seed(0) - ### set the initial variables of the neuron model - if init_sampler is not None: - init_sampler.set_init_variables(population) - ### set v and I_app - if v is not None: - population.v = v - if I_app is not None: - population.I_app = I_app - ### simulate 2000 ms - net.simulate(2000) - ### return the v_clamp_rec value of the single neuron after 2000 ms - return population.v_clamp_rec[0] + Class to create voltage clamp equations from the given equations of a neuron model. + The equations of the neuron model have to contain the voltage change equation in the + form of ... dv/dt ... = ... - def get_v_2000( - self, pop_name, initial_variables, I_app=None, do_plot=False - ) -> np.ndarray: + Attributes: + eq_new (list[str]) + new equations of the neuron model with the voltage clamp """ - Simulate normal single neuron 2000 ms and return v for this duration. - Args: - pop_name (str): - Name of the population - initial_variables (dict): - Initial variables of the neuron model - I_app (float): - Applied current - do_plot (bool): - If True, plot the membrane potential + def __init__(self, eq: list[str], neuron_model_attributes_name_list: list[str]): + """ + Args: + eq (list[str]) + equations of the neuron model + neuron_model_attributes_name_list (list[str]) + list of the names of the attributes of the neuron model + """ + ### get the dv/dt equation from equations + eq_v, eq_v_idx = self.get_eq_v(eq=eq) - Returns: - v_arr (np.array): - Membrane potential for the 2000 ms simulation with shape: (time_steps,) - """ - ### get the network, population, monitor - net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + ### prepare the equation string for solving + ### TODO replace random distributions and mathematical expressions which may be on the left side + eq_v, tags = self.prepare_eq_v(eq_v=eq_v) + + ### solve equation to delta_v (which is dv/dt) + result = self.solve_delta_v(eq_v, neuron_model_attributes_name_list) + + ### insert the new equation lines for v_clamp and remove the old dv/dt line + self.eq_new = self.replace_delta_v( + result=result, eq=eq, eq_v_idx=eq_v_idx, tags=tags + ) + + def replace_delta_v( + self, result: str, eq: list[str], eq_v_idx: int, tags: str = None + ): + """ + Replace the dv/dt line with the voltage clamp lines. + + Args: + result (str) + right side of the dv/dt equation + eq (list[str]) + equations of the neuron model + eq_v_idx (int) + index of the dv/dt line + tags (str) + tags of the dv/dt line + + Returns: + eq (list[str]) + new equations of the neuron model with the voltage clamp + """ + ### create the line for recording voltage clamp (right side of dv/dt) + eq_new_0 = f"v_clamp_rec_sign = {result}" + ### create the line for the absolute voltage clamp + eq_new_1 = f"v_clamp_rec = fabs({result})" + ### create the line for the absolute voltage clamp from the previous time step + eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" + ### create the voltage clamp line "dv/dt=0" with tags if they exist + if not isinstance(tags, type(None)): + eq_new_3 = f"dv/dt=0 : {tags}" + else: + eq_new_3 = "dv/dt=0" + ### remove old v line and insert new three lines, order is important + del eq[eq_v_idx] + eq.insert(eq_v_idx, eq_new_0) + eq.insert(eq_v_idx, eq_new_1) + eq.insert(eq_v_idx, eq_new_2) + eq.insert(eq_v_idx, eq_new_3) + ### return new neuron equations + return eq + + def get_line_is_v(self, line: str): + """ + Check if the line contains the definition of dv/dt. + + Args: + line (str) + line of the equations of the neuron model + + Returns: + line_is_v (bool) + True if the line contains the definition of dv/dt, False otherwise + """ + if "v" not in line: + return False + + ### remove whitespaces + line = line.replace(" ", "") + + ### check for dv/dt + if "dv/dt" in line: + return True + + return False + + def get_eq_v(self, eq: list[str]): + """ + Get the dv/dt equation from the equations of the neuron model. + + Args: + eq (list[str]) + equations of the neuron model + + Returns: + eq_v (str) + dv/dt equation + eq_v_idx (int) + index of the dv/dt line + """ + ### get the dv/dt equation from equations + ### find the line with dv/dt= or v+= or v= + line_is_v_list = [False] * len(eq) + ### check in which lines v is defined + for line_idx, line in enumerate(eq): + line_is_v_list[line_idx] = self.get_line_is_v(line) + ### raise error if no v or multiple times v + if True not in line_is_v_list or sum(line_is_v_list) > 1: + raise ValueError( + "In the equations of the neurons has to be exactly a single line which defines dv/dt!" + ) + ### get the index of the line with dv/dt + eq_v_idx = line_is_v_list.index(True) + ### set the v equation + eq_v = eq.copy()[eq_v_idx] + return eq_v, eq_v_idx + + def prepare_eq_v(self, eq_v: str): + """ + Prepare the equation string for solving with sympy. + + Args: + eq_v (str) + dv/dt equation + + Returns: + eq_v (str) + dv/dt equation + tags (str) + tags of the dv/dt equation + """ + ### remove whitespaces + eq_v = eq_v.replace(" ", "") + ### replace dv/dt by delta_v + eq_v = eq_v.replace("dv/dt", "delta_v") + ### separate equation and tags + eq_tags_list = eq_v.split(":") + eq_v = eq_tags_list[0] + if len(eq_tags_list) > 1: + tags = eq_tags_list[1] + else: + tags = None + return eq_v, tags + + def solve_delta_v( + self, eq_v: str, neuron_model_attributes_name_list: list[str] + ): + """ + Solve the dv/dt equation for delta_v (which is dv/dt). + + Args: + eq_v (str) + dv/dt equation + neuron_model_attributes_name_list (list[str]) + list of the names of the attributes of the neuron model + + Returns: + solution_str (str) + right side of the dv/dt equation + """ + ### Define the attributes of the neuron model as sympy symbols + sp.symbols(",".join(neuron_model_attributes_name_list)) + ### Define delta_v and right_side as sympy symbols + delta_v, _ = sp.symbols("delta_v right_side") + + ### Parse the equation string + lhs, rhs_string = eq_v.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs_string) + + ### Form the equation + equation = sp.Eq(lhs, rhs) + + ### Solve the equation for delta_v + try: + solution = sp.solve(equation, delta_v)[0] + except: + raise ValueError("Could not find solution for dv/dt!") + + ### Get the solution as a string + solution_str = str(solution) + + ### replace right_side by the actual right side string + solution_str = solution_str.replace("right_side", f"({rhs_string})") + + return solution_str + + +class ArrSampler: + """ + Class to store an array and sample from it. + """ + + def __init__(self, arr: np.ndarray, var_name_list: list[str]) -> None: + """ + Args: + arr (np.ndarray) + array with shape (n_samples, n_variables) + var_name_list (list[str]) + list of variable names + """ + self.arr_shape = arr.shape + self.var_name_list = var_name_list + ### check values of any variable are constant + self.is_const = np.std(arr, axis=0) <= np.mean(np.absolute(arr), axis=0) / 1000 + ### for the constant variables only the first value is used + self.constant_arr = arr[0, self.is_const] + ### array without the constant variables + self.not_constant_val_arr = arr[:, np.logical_not(self.is_const)] + + def sample(self, n=1, seed=0): + """ + Sample n samples from the array. + + Args: + n (int) + number of samples to be drawn + seed (int) + seed for the random number generator + + Returns: + ret_arr (np.ndarray) + array with shape (n, n_variables) + """ + ### get n random indices along the n_samples axis + rng = np.random.default_rng(seed=seed) + random_idx_arr = rng.integers(low=0, high=self.arr_shape[0], size=n) + ### sample with random idx + sample_arr = self.not_constant_val_arr[random_idx_arr] + ### create return array + ret_arr = np.zeros((n,) + self.arr_shape[1:]) + ### add samples to return array + ret_arr[:, np.logical_not(self.is_const)] = sample_arr + ### add constant values to return array + ret_arr[:, self.is_const] = self.constant_arr + + return ret_arr + + def set_init_variables(self, population: Population): + """ + Set the initial variables of the given population to the given values. + """ + variable_init_arr = self.sample(len(population), seed=0) + var_name_list = self.var_name_list + for var_name in population.variables: + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) + + +class Simulator: + """ + Class with simulations for the single neuron networks. + """ + + def __init__( + self, + single_nets: CreateSingleNeuronNetworks, + prepare_psp: "PreparePSP" = None, + ): + """ + Args: + single_nets (CreateSingleNeuronNetworks): + Single neuron networks for normal and voltage clamp mode + prepare_psp (PreparePSP): + Prepare PSP information + """ + if prepare_psp is None: + sf.Logger().log( + "Initialize Simulator for single neuron networks without prepared calculate-PSP simulations.", + verbose=_model_configurator_verbose, + ) + else: + sf.Logger().log( + "Initialize Simulator for single neuron networks with prepared calculate-PSP simulations.", + verbose=_model_configurator_verbose, + ) + self._single_nets = single_nets + self._prepare_psp = prepare_psp + + def get_v_clamp_2000( + self, + pop_name: str, + v: float | None = None, + I_app: float | None = None, + ) -> float: + """ + Simulates the v_clamp single neuron network of the given pop_name for 2000 ms + and returns the v_clamp_rec value of the single neuron after 2000 ms. The + returned values is "dv/dt". Therefore, to get the hypothetical change of v for a + single time step multiply it with dt! + + Args: + pop_name (str): + Name of the population + v (float): + Membrane potential (does not change over time due to voltage clamp) + I_app (float): + Applied current + + Returns: + v_clamp_rec (float): + v_clamp_rec value of the single neuron after 2000 ms + """ + ### get the network, population, init_sampler + net = self._single_nets.single_net(pop_name=pop_name, mode="v_clamp").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).population + init_sampler = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).init_sampler + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### set v and I_app + if v is not None: + population.v = v + if I_app is not None: + population.I_app = I_app + ### simulate 2000 ms + net.simulate(2000) + ### return the v_clamp_rec value of the single neuron after 2000 ms + return population.v_clamp_rec[0] + + def get_v_2000(self, pop_name, initial_variables, I_app=None) -> np.ndarray: + """ + Simulate normal single neuron 2000 ms and return v for this duration. + + Args: + pop_name (str): + Name of the population + initial_variables (dict): + Initial variables of the neuron model + I_app (float): + Applied current + + Returns: + v_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ + ### get the network, population, monitor + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net population = self._single_nets.single_net( pop_name=pop_name, mode="normal" ).population @@ -1161,11 +1980,14 @@ def get_v_2000( net.simulate(2000) v_arr = monitor.get("v")[:, 0] - if do_plot: + if _model_configurator_do_plot: + plt.close("all") plt.figure() - plt.title(f"{population.I_app}") + plt.title(f"I_app = {population.I_app}") plt.plot(v_arr) - plt.savefig(f"tmp_v_rest_{pop_name}.png") + plt.savefig( + f"{_model_configurator_figure_folder}/v_rest_{pop_name}_{int(I_app*1000)}.png" + ) plt.close("all") return v_arr @@ -1215,7 +2037,6 @@ def get_ipsp( pop_name: str, g_ampa: float = 0, g_gaba: float = 0, - do_plot: bool = False, ): """ Simulate the single neuron network of the given pop_name for max 5000 ms. The @@ -1231,8 +2052,6 @@ def get_ipsp( Conductance of the ampa synapse g_gaba (float): Conductance of the gaba synapse - do_plot (bool): - If True, plot the membrane potential Returns: psp (float): @@ -1287,7 +2106,8 @@ def get_ipsp( ### multiply with -1 to get the positive value of the ipsp psp = -1 * psp - if do_plot: + if _model_configurator_do_plot: + plt.close("all") plt.figure() plt.title( f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" @@ -1297,7 +2117,7 @@ def get_ipsp( plt.xlim(0, end_timestep) plt.tight_layout() plt.savefig( - f"{self._figure_folder}/tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" + f"{_model_configurator_figure_folder}/psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" ) plt.close("all") @@ -1360,1088 +2180,339 @@ def get_firing_rate( return rate -class ModelConfigurator: +class PreparePSP: + """ + Find v_rest, corresponding I_hold (in case of self-active neurons) and an + init_sampler to initialize the neuron model for the PSP calculation for each + population. + """ + def __init__( self, model: CompNeuroModel, - target_firing_rate_dict: dict, - max_psp: float = 10.0, - do_not_config_list: list[str] = [], - print_guide: bool = False, - I_app_variable: str = "I_app", - cache: bool = False, - clear_cache: bool = False, - log_file: str | None = None, + single_nets: CreateSingleNeuronNetworks, + do_not_config_list: list[str], + simulator: "Simulator", ): - ### store the given variables - self._model = model - self._do_not_config_list = do_not_config_list - self._target_firing_rate_dict = target_firing_rate_dict - self._base_dict = None - ### TODO add this to figures - self._figure_folder = "model_conf_figures" - ### create the figure folder - sf.create_dir(self._figure_folder) - ### initialize logger - sf.Logger(log_file=log_file) - ### analyze the given model, create model before analyzing, then clear ANNarchy - self._analyze_model = AnalyzeModel(model=self._model) - ### create the CompNeuroModel object for the reduced model (the model itself is - ### not created yet) - self._model_reduced = CreateReducedModel( - model=self._model, - analyze_model=self._analyze_model, - reduced_size=100, - do_create=False, - do_compile=False, - verbose=True, + """ + Args: + model (CompNeuroModel): + Model to be prepared + do_not_config_list (list[str]): + List of populations which should not be configured + """ + sf.Logger().log( + "Prepare PSP calculation simulations for each population, i.e. find v_rest and I_app_hold.", + verbose=_model_configurator_verbose, ) - - ### try to load the cached variables - if clear_cache: - sf.clear_dir(".model_config_cache") - cache_worked = False - if cache: - try: - ### load the cached variables - cache_loaded = sf.load_variables( - name_list=["init_sampler", "max_syn"], - path=".model_config_cache", - ) - cache_worked = True - except FileNotFoundError: - pass - ### create the single neuron networks (networks are compiled and ready to be - ### simulated), normal model for searching for max conductances, max input - ### current, resting firing rate; voltage clamp model for preparing the PSP - ### simulationssearching, i.e., for resting potential and corresponding input - ### current I_hold (for self-active neurons) - if not cache_worked: - self._single_nets = CreateSingleNeuronNetworks( - model=self._model, - analyze_model=self._analyze_model, - do_not_config_list=do_not_config_list, - ) - ### get the init sampler for the populations - self._init_sampler = self._single_nets.init_sampler( - model=self._model, do_not_config_list=do_not_config_list + self._single_nets = single_nets + self._prepare_psp_dict = {} + self._simulator = simulator + ### loop over all populations + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + ### find initial v_rest using the voltage clamp network + sf.Logger().log( + f"[{pop_name}]: search v_rest with y(X) = delta_v_2000(v=X) using grid search", + verbose=_model_configurator_verbose, ) - ### create simulator with single_nets - self._simulator = Simulator( - single_nets=self._single_nets, - figure_folder=self._figure_folder, - prepare_psp=None, + v_rest, delta_v_v_rest, variables_v_rest = self._find_v_rest_initial( + pop_name=pop_name, ) - else: - self._init_sampler: CreateSingleNeuronNetworks.AllSampler = cache_loaded[ - "init_sampler" - ] - ### get the resting potential and corresponding I_hold for each population using - ### the voltage clamp networks - if not cache_worked: - self._prepare_psp = PreparePSP( - model=self._model, - single_nets=self._single_nets, - do_not_config_list=do_not_config_list, - simulator=self._simulator, - do_plot=False, - figure_folder=self._figure_folder, + sf.Logger().log( + f"[{pop_name}]: found v_rest={v_rest} with delta_v_2000(v=v_rest)={delta_v_v_rest}", + verbose=_model_configurator_verbose, ) - self._simulator = Simulator( - single_nets=self._single_nets, - figure_folder=self._figure_folder, - prepare_psp=self._prepare_psp, + ### check if v is constant after setting v to v_rest by simulating the normal + ### single neuron network for 2000 ms + sf.Logger().log( + f"[{pop_name}]: check if v stays constant after setting v_rest as membrane potential", + verbose=_model_configurator_verbose, ) - ### get the maximum synaptic conductances and input currents for each population - if not cache_worked: - self._max_syn = GetMaxSyn( - model=self._model, - simulator=self._simulator, - do_not_config_list=do_not_config_list, - max_psp=max_psp, - target_firing_rate_dict=target_firing_rate_dict, - ).max_syn_getter - else: - self._max_syn = cache_loaded["max_syn"] - ### cache single_nets, prepare_psp, max_syn - if cache and not cache_worked: - sf.save_variables( - variable_list=[ - self._init_sampler, - self._max_syn, - ], - name_list=["init_sampler", "max_syn"], - path=".model_config_cache", + v_rest_is_constant, v_rest_arr = self._get_v_rest_is_const( + pop_name=pop_name, + variables_v_rest=variables_v_rest, ) - ### get the weights dictionaries - self._weight_dicts = GetWeights( - model=self._model, - do_not_config_list=do_not_config_list, - analyze_model=self._analyze_model, - max_syn=self._max_syn, - ) - - def set_weights(self, weight_dict: dict[str, float]): - """ - Set the weights of the model. - - Args: - weight_dict (dict[str, float]): - Dict with the weights for each projection - """ - self._weight_dicts.weight_dict = weight_dict - self._check_if_not_config_pops_have_correct_rates() - - def set_syn_load( - self, - syn_load_dict: dict[str, float], - syn_contribution_dict: dict[str, dict[str, float]], - ): - """ - Set the synaptic load of the model. - - Args: - syn_load_dict (dict[str, float]): - Dict with ampa and gaba synaptic load for each population - syn_contribution_dict (dict[str, dict[str, float]]): - Dict with the contribution of the afferent projections to the ampa and - gaba synaptic load of each population - """ - self._weight_dicts.syn_load_dict = syn_load_dict - self._weight_dicts.syn_contribution_dict = syn_contribution_dict - self._check_if_not_config_pops_have_correct_rates() - - def _check_if_not_config_pops_have_correct_rates(self): - """ - Check if the populations which should not be configured have the correct firing - rates. - """ - ### initialize the normal model + compile the model - self._init_model_with_fitted_base() - - ### record spikes of the do_not_config populations - mon = CompNeuroMonitors( - mon_dict={ - pop_name: ["spike"] for pop_name in self._do_not_config_list - } # _model.populations # tmp test - ) - mon.start() - ### simulate the model for 5000 ms - # get_population("stn").I_app = 8 # tmp test - simulate(5000) - ### get the firing rates - recordings = mon.get_recordings() - for pop_name in self._do_not_config_list: - spike_dict = recordings[0][f"{pop_name};spike"] - t, _ = raster_plot(spike_dict) - spike_count = len(t) - pop_size = len(get_population(pop_name)) - firing_rate = spike_count / (5 * pop_size) - if np.abs(firing_rate - self._target_firing_rate_dict[pop_name]) > 1: + if v_rest_is_constant: + ### v_rest found (last v value of the previous simulation), no + ### I_app_hold needed sf.Logger().log( - f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + f"[{pop_name}]: v_rest stays constant, no I_app_hold needed", + verbose=_model_configurator_verbose, ) - print( - f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + v_rest = v_rest_arr[-1] + I_app_hold = 0 + else: + ### there is no resting_state i.e. neuron is self-active --> find + ### smallest negative I_app to silence neuron + sf.Logger().log( + f"[{pop_name}]: neuron seems to be self-active --> find smallest I_app to silence the neuron", + verbose=_model_configurator_verbose, ) + v_rest, I_app_hold = self._find_I_app_hold( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + ) + sf.Logger().log( + f"[{pop_name}]: final values: I_app_hold = {I_app_hold}, v_rest = {v_rest}" + ) - # ### tmp plot - # recording_times = mon.get_recording_times() - - # af.PlotRecordings( - # figname="tmp.png", - # recordings=recordings, - # recording_times=recording_times, - # shape=(len(self._model.populations), 1), - # plan={ - # "position": list(range(1, len(self._model.populations) + 1)), - # "compartment": self._model.populations, - # "variable": ["spike"] * len(self._model.populations), - # "format": ["hybrid"] * len(self._model.populations), - # }, - # ) - # quit() + ### get the sampler for the initial variables + sf.Logger().log( + f"[{pop_name}]: create init sampler for single neuron population to be used for the calculate PSP simulations", + verbose=_model_configurator_verbose, + ) + psp_init_sampler = self._get_init_neuron_variables_for_psp( + pop_name=pop_name, + v_rest=v_rest, + I_app_hold=I_app_hold, + ) + ### store the prepare PSP information + self._prepare_psp_dict[pop_name] = {} + self._prepare_psp_dict[pop_name]["v_rest"] = v_rest + self._prepare_psp_dict[pop_name]["I_app_hold"] = I_app_hold + self._prepare_psp_dict[pop_name]["psp_init_sampler"] = psp_init_sampler - def set_base(self): - """ - Set the baseline currents of the model, found for the current weights to reach - the target firing rates. The model is compiled after setting the baselines. + def get(self, pop_name: str): """ - ### get the base dict - if self._base_dict is None: - self.get_base() - - ### initialize the normal model + set the baselines with the base dict - self._init_model_with_fitted_base(base_dict=self._base_dict) + Return the prepare PSP information for the given population. - def get_base(self): - """ - Get the baseline currents of the model. + Args: + pop_name (str): + Name of the population Returns: - base_dict (dict[str, float]): - Dict with the baseline currents for each population - """ - ### get the base dict - self._base_dict = GetBase( - model_normal=self._model, - model_reduced=self._model_reduced.model_reduced, - target_firing_rate_dict=self._target_firing_rate_dict, - weight_dicts=self._weight_dicts, - do_not_config_list=self._do_not_config_list, - init_sampler=self._init_sampler, - max_syn=self._max_syn, - ).base_dict - return self._base_dict - - def _init_model_with_fitted_base(self, base_dict: dict[str, float] | None = None): - """ - Initialize the neurons of the model using the init_sampler, set the baseline - currents of the model from the base dict (containing fitted baselines) and the - weights from the weight dicts and compile the model. - """ - ### clear ANNarchy and create the normal model - mf.cnp_clear(functions=False, constants=False) - self._model.create(do_compile=False) - ### set the initial variables of the neurons #TODO small problem = init sampler - ### initializes the neurons in resting-state, but here they get an input current - for pop_name, init_sampler in self._init_sampler.init_sampler_dict.items(): - init_sampler.set_init_variables(get_population(pop_name)) - ### set the baseline currents - if base_dict is not None: - for pop_name, I_app in base_dict.items(): - setattr(get_population(pop_name), "I_app", I_app) - ### compile the model - self._model.compile() - ### set the weights - for proj_name, weight in self._weight_dicts.weight_dict.items(): - setattr(get_projection(proj_name), "w", weight) - - -class Minimize: - def __init__( - self, func, yt, x0, lb, ub, tol_error, tol_convergence, max_it - ) -> None: + ReturnPreparePSP: + Prepare PSP information for the given population with Attributes: v_rest, + I_app_hold, psp_init_sampler """ - Args: - func (Callable): - Function which takes a vector as input and returns a vector as output - target_values (np.array): - Target output vector of the function - x0 (np.array): - Initial input vector - lb (np.array): - Lower bounds of the input vector - ub (np.array): - Upper bounds of the input vector - tol_error (float): - If the error is below this value the optimization stops - tol_convergence (float): - If the change of the error stays below this value the optimization stops - max_it (int): - Maximum number of iterations - """ - ### TODO continue here, I think it works but neuron models explode - x = x0 - x_old = x0 - y = yt - error = np.ones(x0.shape) * 20 - error_old = np.ones(x0.shape) * 20 - it = 0 - search_gradient_diff = np.ones(x0.shape) - alpha = np.ones(x0.shape) - error_list = [] - dx_list = [] - dy_list = [] - x_list = [] - y_list = [] - it_list = [] - - def error_changed(error_list, tol, n=3): - if len(error_list) < 2: - return True - return (np.max(error_list[-n:]) - np.min(error_list[-n:])) > tol - - ### run until the error does not change anymore or the maximum number of - ### iterations is reached, also break if the error is small enough - while it < max_it and error_changed(error_list, tol_convergence): - print("\n\nnext iteration") - y_old = y - y = func(x) - dx_list.append(x - x_old) - dy_list.append(y - y_old) - ### TODO if x did not change much, use the previous gradient again, but maybe not a good idea, or at least not easy to implement, sicne gradient depends on all inputs - print(f"x: {x}") - print(f"y: {y}") - x_list.append(x) - y_list.append(y) - it_list.append(it) - ### here we know the new y(x) - ### check if the error sign changed - error_old = error - error = yt - y - ### if error is small enough stop the optimization - if np.all(np.abs(error) < tol_error): - break - error_sign_changed = np.sign(error) != np.sign(error_old) - print(f"error_sign_changed: {error_sign_changed}") - ### get how much the error (in total, not for individual inputs) changed - error_list.append(np.mean(np.abs(error))) - print(f"error_list: {error_list}\n") - ### if the error sign changed: - ### - check if error is larger as before - ### - if yes -> check if error is also larger than tolerance - ### - if yes -> use again the previous x and compute current y again - ### - we calculate (as usual) a new gradient - ### - we reduce alpha, so this time the step is smaller - error_increased = np.abs(error) > np.abs(error_old) - error_is_large = np.abs(error) > tol_error - change_x = error_sign_changed & error_increased & error_is_large - x[change_x] = x_old[change_x] - if np.any(change_x): - y = func(x) - print( - "some errors changed sign, increased, and are larger than tolerance" - ) - print(f"x: {x}") - print(f"y: {y}\n") - x_list.append(x) - y_list.append(y) - it_list.append(it) - ### reduce alpha for the inputs where the error sign changed - ### for the others alpha reaches 1 - alpha[error_sign_changed] /= 2 - alpha[~error_sign_changed] += (1 - alpha[~error_sign_changed]) / 5 - ### calculate the gradient i.e. change of the output values for each input - grad = np.zeros((yt.shape[0], x0.shape[0])) - for i in range(len(x0)): - ### search for the gradient of the i-th input, increase the stepwidth - ### which is used to calculate the gradient if the gradient for the - ### associated output value is below 1 - while grad[i, i] < 1: - x_plus = x.copy() - ### change only the currently selected input whose gradient should be - ### calculated - x_plus[i] += search_gradient_diff[i] - y_plus = func(x_plus) - print(f"x_plus: {x_plus}") - print(f"y_plus: {y_plus}\n") - grad[:, i] = y_plus - y - ### if gradient is above 10 reduce the search gradient diff - if grad[i, i] >= 10: - search_gradient_diff[i] /= 1.5 - ### if gradient is below 1 increase the search gradient diff - elif grad[i, i] < 1: - search_gradient_diff[i] *= 2 - ### calculate the wanted change of the output values - delta_y = yt - y - print(f"delta_y: {delta_y}") - print(f"grad:\n{grad}") - - # Example coefficient matrix A (m x n matrix) - A = grad - - # Right-hand side vector b (m-dimensional vector) - b = delta_y - - # Solve the system using least squares method - solution, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None) - - # Output the results - print("Solution vector x:", solution) - - # Calculate the matrix-vector product Ax - Ax = np.dot(A, solution) - - # Output the matrix-vector product and compare with b - print("delta_y from solution:", Ax) - - ### solution contains the info how much each input should change (how many - ### times the change of gradient is needed to reach the target values) - x_old = x - x = x + solution * search_gradient_diff * alpha - it += 1 - - self.x = x - self.success = np.all(np.abs(error) < tol_error) - - x_arr = np.array(x_list) - y_arr = np.array(y_list) - it_arr = np.array(it_list) - - ### TODO remove or make this optimal (for debugging), also the prints above - plt.close("all") - plt.figure() - for idx in range(4): - ax = plt.subplot(4, 1, idx + 1) - ### plot the x values - plt.plot(it_arr, x_arr[:, idx]) - plt.ylabel(f"x{idx}") - ### second y axis on the right for the y values - ax2 = ax.twinx() - ax2.plot(it_arr, y_arr[:, idx], color="red") - ax2.set_ylabel(f"y{idx}", color="red") - plt.xlabel("iteration") - plt.tight_layout() - plt.savefig("optimization.png") - - -class GetBase: - def __init__( - self, - model_normal: CompNeuroModel, - model_reduced: CompNeuroModel, - target_firing_rate_dict: dict, - weight_dicts: "GetWeights", - do_not_config_list: list[str], - init_sampler: CreateSingleNeuronNetworks.AllSampler, - max_syn: "GetMaxSyn.MaxSynGetter", - ): - self._model_normal = model_normal - self._model_reduced = model_reduced - self._weight_dicts = weight_dicts - self._do_not_config_list = do_not_config_list - self._init_sampler = init_sampler - self._max_syn = max_syn - ### get the populations names of the configured populations - self._pop_names_config = [ - pop_name - for pop_name in model_normal.populations - if pop_name not in do_not_config_list - ] - ### convert the target firing rate dict to an array - self._target_firing_rate_arr = [] - print(self._pop_names_config) - for pop_name in self._pop_names_config: - self._target_firing_rate_arr.append(target_firing_rate_dict[pop_name]) - self._target_firing_rate_arr = np.array(self._target_firing_rate_arr) - ### get the base currents - self._prepare_get_base() - self._base_dict = self._get_base() - - @property - def base_dict(self): - return self._base_dict - - def _set_model_weights(self): - ### loop over all populations which should be configured - for pop_name in self._pop_names_config: - ### loop over all target types - for target_type in ["ampa", "gaba"]: - ### get afferent projections of the corresponding target type - afferent_projection_list = self._weight_dicts._get_afferent_proj_names( - pop_name=pop_name, target=target_type - ) - ### loop over all afferent projections - for proj_name in afferent_projection_list: - ### set weight of the projection in the conductance-calculating - ### input current population - proj_weight = self._weight_dicts.weight_dict[proj_name] - setattr( - get_population(f"{pop_name}_{target_type}_aux"), - f"weights_{proj_name}", - proj_weight, - ) - - def _prepare_get_base(self): - ### clear ANNarchy - mf.cnp_clear(functions=False, constants=False) - ### create and compile the model - self._model_reduced.create() - ### create monitors for recording the spikes of all populations - ### for CompNeuroMonitors we need the "_reduced" suffix - mon = CompNeuroMonitors( - mon_dict={ - f"{pop_name}_reduced": ["spike"] - for pop_name in self._model_normal.populations - } - ) - ### create the experiment - self._exp = self.MyExperiment(monitors=mon) - ### initialize all populations with the init sampler - for pop_name in self._pop_names_config: - ### for get_population we need the "_reduced" suffix - self._init_sampler.get(pop_name=pop_name).set_init_variables( - get_population(f"{pop_name}_reduced") - ) - ### set the model weights - self._set_model_weights() - ### store the model state for all populations - self._exp.store_model_state(compartment_list=self._model_reduced.populations) - ### set lower and upper bounds and initial guess - self._lb = [] - self._ub = [] - self._x0 = [] - for pop_name in self._pop_names_config: - self._lb.append(-self._max_syn.get(pop_name=pop_name).I_app) - self._ub.append(self._max_syn.get(pop_name=pop_name).I_app) - self._x0.append(0.0) - - def _get_base(self): - """ - Perform the optimization to find the base currents for the target firing rates. - - Returns: - base_dict (dict): - Dict with the base currents for each population - """ - - ### Perform the optimization using Minimize class - result = Minimize( - func=self._get_firing_rate, - yt=self._target_firing_rate_arr, - x0=np.array(self._x0), - lb=np.array(self._lb), - ub=np.array(self._ub), - tol_error=1, - tol_convergence=0.01, - max_it=20, - ) - - optimized_inputs = result.x - if not result.success: - sf.Logger().log("Optimization failed, target firing rates not reached!") - print("Optimization failed, target firing rates not reached!") - base_dict = { - pop_name: optimized_inputs[idx] - for idx, pop_name in enumerate(self._pop_names_config) - } - return base_dict - - def _get_firing_rate(self, I_app_list: list[float]): - ### convert the I_app_list to a dict - I_app_dict = {} - counter = 0 - for pop_name in self._pop_names_config: - ### for the I_app_dict we need the "_reduced" suffix - I_app_dict[f"{pop_name}_reduced"] = I_app_list[counter] - counter += 1 - ### run the experiment - results = self._exp.run(I_app_dict) - ### get the firing rates from the recorded spikes - rate_list = [] - rate_dict = {} - for pop_name in self._pop_names_config: - ### for the spike dict we need the "_reduced" suffix - spike_dict = results.recordings[0][f"{pop_name}_reduced;spike"] - t, _ = raster_plot(spike_dict) - ### only take spikes after the first 500 ms, because the neurons are - ### initialized in resting-state and with an input current there can be - ### drastic activity changes at the beginning - t = t[t > 500] - nbr_spikes = len(t) - ### divide number of spikes by the number of neurons and the duration in s - rate = nbr_spikes / (4.5 * get_population(f"{pop_name}_reduced").size) - rate_list.append(rate) - rate_dict[pop_name] = rate - # sf.Logger().log(f"I_app_dict: {I_app_dict}") - # sf.Logger().log(f"Firing rates: {rate_dict}") - - # af.PlotRecordings( - # figname="firing_rates.png", - # recordings=results.recordings, - # recording_times=results.recording_times, - # shape=(len(self._model_normal.populations), 1), - # plan={ - # "position": list(range(1, len(self._model_normal.populations) + 1)), - # "compartment": [ - # f"{pop_name}_reduced" for pop_name in self._model_normal.populations - # ], - # "variable": ["spike"] * len(self._model_normal.populations), - # "format": ["hybrid"] * len(self._model_normal.populations), - # }, - # ) - return np.array(rate_list) - - class MyExperiment(CompNeuroExp): - def run(self, I_app_dict: dict[str, float]): - """ - Simulate the model for 5000 ms with the given input currents. - - Args: - I_app_dict (dict[str, float]): - Dict with the input currents for each population - - Returns: - results (CompNeuroResults): - Results of the simulation - """ - ### reset to initial state - self.reset() - set_seed(0) - ### activate monitor - self.monitors.start() - ### set the input currents - for pop_name, I_app in I_app_dict.items(): - get_population(pop_name).I_app = I_app - ### simulate 5000 ms - simulate(5000) - ### return results - return self.results() - - -class CreateReducedModel: - """ - Class to create a reduced model from the original model. It is accessable via the - attribute model_reduced. - - Attributes: - model_reduced (CompNeuroModel): - Reduced model, created but not compiled - """ - - def __init__( - self, - model: CompNeuroModel, - analyze_model: AnalyzeModel, - reduced_size: int, - do_create: bool = False, - do_compile: bool = False, - verbose: bool = False, - ) -> None: - """ - Prepare model for reduction. - - Args: - model (CompNeuroModel): - Model to be reduced - reduced_size (int): - Size of the reduced populations - """ - ### set the attributes - self._model = model - self._analyze_model = analyze_model - self._reduced_size = reduced_size - self._verbose = verbose - ### recreate model with reduced populations and projections - self.model_reduced = CompNeuroModel( - model_creation_function=self.recreate_model, - name=f"{model.name}_reduced", - description=f"{model.description}\nWith reduced populations and projections.", - do_create=do_create, - do_compile=do_compile, - compile_folder_name=f"{model.compile_folder_name}_reduced", - ) - - def recreate_model(self): - """ - Recreates the model with reduced populations and projections. - """ - ### 1st for each population create a reduced population - for pop_name in self._model.populations: - self.create_reduced_pop(pop_name) - ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population - for pop_name in self._model.populations: - self.create_spike_collecting_aux_pop(pop_name) - ## 3rd for each population which has afferents create a population for incoming spikes for each target type - for pop_name in self._model.populations: - self.create_conductance_aux_pop(pop_name, target="ampa") - self.create_conductance_aux_pop(pop_name, target="gaba") - - def create_reduced_pop(self, pop_name: str): - """ - Create a reduced population from the given population. - - Args: - pop_name (str): - Name of the population to be reduced - """ - if self._verbose: - print(f"create_reduced_pop for {pop_name}") - ### 1st check how the population is connected - _, is_postsynaptic, ampa, gaba = self.how_pop_is_connected(pop_name) - - ### 2nd recreate neuron model - ### get the stored parameters of the __init__ function of the Neuron - neuron_model_init_parameter_dict = ( - self._analyze_model.neuron_model_init_parameter_dict[pop_name].copy() - ) - ### if the population is a postsynaptic population adjust the synaptic - ### conductance equations - if is_postsynaptic: - neuron_model_init_parameter_dict = self.adjust_neuron_model( - neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba - ) - ### create the new neuron model - neuron_model_new = Neuron(**neuron_model_init_parameter_dict) - - ### 3rd recreate the population - ### get the stored parameters of the __init__ function of the Population - pop_init_parameter_dict = self._analyze_model.pop_init_parameter_dict[ - pop_name - ].copy() - ### replace the neuron model with the new neuron model - pop_init_parameter_dict["neuron"] = neuron_model_new - ### replace the size with the reduced size (if reduced size is smaller than the - ### original size) - ### TODO add model requirements somewhere, here requirements = geometry = int - pop_init_parameter_dict["geometry"] = min( - [pop_init_parameter_dict["geometry"][0], self._reduced_size] - ) - ### append _reduce to the name - pop_init_parameter_dict["name"] = f"{pop_name}_reduced" - ### create the new population - pop_new = Population(**pop_init_parameter_dict) - - ### 4th set the parameters and variables of the population's neurons - ### get the stored parameters and variables - neuron_model_attr_dict = self._analyze_model.neuron_model_attr_dict[pop_name] - ### set the parameters and variables - for attr_name, attr_val in neuron_model_attr_dict.items(): - setattr(pop_new, attr_name, attr_val) - - def create_spike_collecting_aux_pop(self, pop_name: str): - """ - Create a spike collecting population for the given population. - - Args: - pop_name (str): - Name of the population for which the spike collecting population should be created - """ - ### get all efferent projections - efferent_projection_list = [ - proj_name - for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() - if pre_post_pop_name_dict[0] == pop_name - ] - ### check if pop has efferent projections - if len(efferent_projection_list) == 0: - return - if self._verbose: - print(f"create_spike_collecting_aux_pop for {pop_name}") - ### create the spike collecting population - pop_aux = Population( - 1, - neuron=self.SpikeProbCalcNeuron( - reduced_size=min( - [ - self._analyze_model.pop_init_parameter_dict[pop_name][ - "geometry" - ][0], - self._reduced_size, - ] - ) - ), - name=f"{pop_name}_spike_collecting_aux", - ) - ### create the projection from reduced pop to spike collecting aux pop - proj = Projection( - pre=get_population(pop_name + "_reduced"), - post=pop_aux, - target="ampa", - name=f"proj_{pop_name}_spike_collecting_aux", - ) - proj.connect_all_to_all(weights=1) - - def create_conductance_aux_pop(self, pop_name: str, target: str): - """ - Create a conductance calculating population for the given population and target. - - Args: - pop_name (str): - Name of the population for which the conductance calculating population should be created - target (str): - Target type of the conductance calculating population - """ - ### get all afferent projections - afferent_projection_list = [ - proj_name - for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() - if pre_post_pop_name_dict[1] == pop_name - ] - ### check if pop has afferent projections - if len(afferent_projection_list) == 0: - return - ### get all afferent projections with target type - afferent_target_projection_list = [ - proj_name - for proj_name in afferent_projection_list - if self._analyze_model.proj_init_parameter_dict[proj_name]["target"] - == target - ] - ### check if there are afferent projections with target type - if len(afferent_target_projection_list) == 0: - return - if self._verbose: - print(f"create_conductance_aux_pop for {pop_name} target {target}") - ### get projection informations - ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) - projection_dict = { - proj_name: { - "pre_size": self._analyze_model.pop_init_parameter_dict[ - self._analyze_model.pre_post_pop_name_dict[proj_name][0] - ]["geometry"][0], - "connection_prob": self._analyze_model.connector_function_parameter_dict[ - proj_name - ][ - "probability" - ], - "weights": self._analyze_model.connector_function_parameter_dict[ - proj_name - ]["weights"], - "pre_name": self._analyze_model.pre_post_pop_name_dict[proj_name][0], - } - for proj_name in afferent_target_projection_list - } - ### create the conductance calculating population - pop_aux = Population( - self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0], - neuron=self.InputCalcNeuron(projection_dict=projection_dict), - name=f"{pop_name}_{target}_aux", - ) - ### set number of synapses parameter for each projection - for proj_name, vals in projection_dict.items(): - number_synapses = Binomial( - n=vals["pre_size"], p=vals["connection_prob"] - ).get_values( - self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0] - ) - setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) - ### create the "current injection" projection from conductance calculating - ### population to the reduced post population - proj = CurrentInjection( - pre=pop_aux, - post=get_population(f"{pop_name}_reduced"), - target=f"incomingaux{target}", - name=f"proj_{pop_name}_{target}_aux", - ) - proj.connect_current() - ### create projection from spike_prob calculating aux neurons of presynaptic - ### populations to conductance calculating aux population - for proj_name, vals in projection_dict.items(): - pre_pop_name = vals["pre_name"] - pre_pop_spike_collecting_aux = get_population( - f"{pre_pop_name}_spike_collecting_aux" - ) - proj = Projection( - pre=pre_pop_spike_collecting_aux, - post=pop_aux, - target=f"spikeprob_{pre_pop_name}", - name=f"{proj_name}_spike_collecting_to_conductance", - ) - proj.connect_all_to_all(weights=1) - - def how_pop_is_connected(self, pop_name): - """ - Check how a population is connected. If the population is a postsynaptic and/or - presynaptic population, check if it gets ampa and/or gaba input. - - Args: - pop_name (str): - Name of the population to check - - Returns: - is_presynaptic (bool): - True if the population is a presynaptic population, False otherwise - is_postsynaptic (bool): - True if the population is a postsynaptic population, False otherwise - ampa (bool): - True if the population gets ampa input, False otherwise - gaba (bool): - True if the population gets gaba input, False otherwise - """ - is_presynaptic = False - is_postsynaptic = False - ampa = False - gaba = False - ### loop over all projections - for proj_name in self._model.projections: - ### check if the population is a presynaptic population in any projection - if self._analyze_model.pre_post_pop_name_dict[proj_name][0] == pop_name: - is_presynaptic = True - ### check if the population is a postsynaptic population in any projection - if self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name: - is_postsynaptic = True - ### check if the projection target is ampa or gaba - if ( - self._analyze_model.proj_init_parameter_dict[proj_name]["target"] - == "ampa" - ): - ampa = True - elif ( - self._analyze_model.proj_init_parameter_dict[proj_name]["target"] - == "gaba" - ): - gaba = True - - return is_presynaptic, is_postsynaptic, ampa, gaba - - def adjust_neuron_model( - self, neuron_model_init_parameter_dict, ampa=True, gaba=True - ): - """ - Add the new synaptic input coming from the auxillary population to the neuron - model. - - Args: - neuron_model_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Neuron - ampa (bool): - True if the population gets ampa input and therefore the ampa conductance - needs to be adjusted, False otherwise - gaba (bool): - True if the population gets gaba input and therefore the gaba conductance - needs to be adjusted, False otherwise - - Returns: - neuron_model_init_parameter_dict (dict): - Dictionary with the parameters of the __init__ function of the Neuron - with DBS mechanisms added - """ - ### 1st adjust the conductance equations - ### get the equations of the neuron model as a list of strings - equations_line_split_list = str( - neuron_model_init_parameter_dict["equations"] - ).splitlines() - ### search for equation with dg_ampa/dt and dg_gaba/dt - for line_idx, line in enumerate(equations_line_split_list): - if ( - self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") - and ampa - ): - ### add " + tau_ampa*g_incomingauxampa/dt" - ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa - equations_line_split_list[line_idx] = ( - "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" - ) - if ( - self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") - and gaba - ): - ### add " + tau_gaba*g_incomingauxgaba/dt" - ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba - equations_line_split_list[line_idx] = ( - "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" - ) - ### join list to a string - neuron_model_init_parameter_dict["equations"] = "\n".join( - equations_line_split_list - ) - - ### 2nd extend description - neuron_model_init_parameter_dict["description"] = ( - f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." + return self.ReturnPreparePSP( + v_rest=self._prepare_psp_dict[pop_name]["v_rest"], + I_app_hold=self._prepare_psp_dict[pop_name]["I_app_hold"], + psp_init_sampler=self._prepare_psp_dict[pop_name]["psp_init_sampler"], ) - return neuron_model_init_parameter_dict + class ReturnPreparePSP: + def __init__( + self, v_rest: float, I_app_hold: float, psp_init_sampler: ArrSampler + ): + self.v_rest = v_rest + self.I_app_hold = I_app_hold + self.psp_init_sampler = psp_init_sampler - def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): + def _get_init_neuron_variables_for_psp( + self, pop_name: str, v_rest: float, I_app_hold: float + ): """ - Check if a equation string has the form "tau*dvar/dt = -var". + Get the initial variables of the neuron model for the PSP calculation. Args: - line (str): - Equation string - var_name (str): - Name of the variable - tau_name (str): - Name of the time constant + pop_name (str): + Name of the population + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron Returns: - is_solution_correct (bool): - True if the equation is as expected, False otherwise + sampler (ArrSampler): + Sampler with the initial variables of the neuron model """ - if var_name not in line: - return False - - # Define the variables - var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") - - # Given equation as a string - equation_str = line + ### get the names of the variables of the neuron model + var_name_list = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population.variables + ### get the variables of the neuron model after 5000 ms + var_arr = self._simulator.get_v_psp( + v_rest=v_rest, I_app_hold=I_app_hold, pop_name=pop_name + ) + ### create a sampler with this single data sample + sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) + return sampler - # Parse the equation string - lhs, rhs = equation_str.split("=") - lhs = sp.sympify(lhs) - rhs = sp.sympify(rhs) + def _find_I_app_hold( + self, + pop_name: str, + variables_v_rest: dict, + ): + """ + Find the current which silences the neuron. - # Form the equation - equation = sp.Eq(lhs, rhs) + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential - # Solve the equation for var - try: - solution = sp.solve(equation, var) - except: - ### equation is not solvable with variables means it is not as expected - return False + Returns: + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron + """ + ### find I_app_hold with find_x_bound + sf.Logger().log( + f"[{pop_name}]: search I_app_hold with y(X) = CHANGE_OF_V(I_app=X), y(X_bound) should be 0", + verbose=_model_configurator_verbose, + ) - # Given solution to compare - expected_solution_str = f"-{tau_name}*d{var_name}/dt" - expected_solution = sp.sympify(expected_solution_str) + I_app_hold = -ef.find_x_bound( + ### negative current initially reduces v then v climbs back up --> + ### get_v_change_after_v_rest checks how much v changes during second half of + ### 2000 ms simulation + y=lambda X_val: -self._get_v_change_after_v_rest( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + ### find_x_bound only uses positive values for X and + ### increases them, expecting to increase y, therefore use -X for I_app + ### (increasing X will "increase" negative current) and negative sign for + ### the returned value (for no current input the change is positive, this + ### should decrease to zero, with negative sign: for no current input the + ### change is negative, this should increase above zero) + I_app=-X_val, + ), + ### y is initially negative and should increase above 0, therefore search for + ### y_bound=0 with bound_type="greater" + x0=0, + y_bound=0, + tolerance=0.01, + bound_type="greater", + ) + sf.Logger().log( + f"[{pop_name}]: found I_app_hold={I_app_hold} with y(X_bound)={self._get_v_change_after_v_rest(pop_name=pop_name, variables_v_rest=variables_v_rest, I_app=I_app_hold)}", + verbose=_model_configurator_verbose, + ) + ### again simulate the neuron with the obtained I_app_hold to get the new v_rest + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app_hold, + ) + v_rest = v_rest_arr[-1] + return v_rest, I_app_hold - # Check if the solution is as expected - is_solution_correct = solution[0] == expected_solution + def _find_v_rest_initial( + self, + pop_name: str, + ): + """ + Find the initial v_rest with the voltage clamp single neuron network for the + given population. Furthermore, get the change of v durign setting v_rest and the + stady state variables of the neuron (at the end of the simulation). - return is_solution_correct + Args: + pop_name (str): + Name of the population - class SpikeProbCalcNeuron(Neuron): - """ - Neuron model to calculate the spike probabilities of the presynaptic neurons. + Returns: + v_rest (float): + Resting membrane potential + detla_v_v_rest (float): + Change of the membrane potential during setting v_rest as membrane + potential + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential """ + ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can + ### only be >= 0) + v_arr = np.linspace(-90, -20, 200) + v_clamp_arr = np.array( + [ + self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_val) + for v_val in v_arr + ] + ) + v_clamp_min_idx = argrelmin(v_clamp_arr)[0] + v_rest = np.min(v_arr[v_clamp_min_idx]) + if _model_configurator_do_plot: + plt.close("all") + plt.figure() + plt.plot(v_arr, v_clamp_arr) + plt.xlabel("v in mV") + plt.ylabel("v_clamp_rec") + plt.axvline(v_rest, color="k") + plt.axhline(0, color="k", ls="dashed") + plt.savefig(f"{_model_configurator_figure_folder}/v_clamp_{pop_name}.png") + plt.close("all") - def __init__(self, reduced_size=1): - """ - Args: - reduced_size (int): - Reduced size of the associated presynaptic population - """ - parameters = f""" - reduced_size = {reduced_size} : population - tau= 1.0 : population - """ - equations = """ - tau*dr/dt = g_ampa/reduced_size - r - g_ampa = 0 - """ - super().__init__(parameters=parameters, equations=equations) + ### do again the simulation only with the obtained v_rest to get the detla_v for + ### v_rest + detla_v_v_rest = ( + self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() + ) + population = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).population + ### and the stady state variables of the neuron + variables_v_rest = { + var_name: getattr(population, var_name) for var_name in population.variables + } + return v_rest, detla_v_v_rest, variables_v_rest - class InputCalcNeuron(Neuron): - """ - This neurons gets the spike probabilities of the pre neurons and calculates the - incoming spikes for each projection. It accumulates the incoming spikes of all - projections (of the same target type) and calculates the conductance increase - for the post neuron. + def _get_v_rest_is_const(self, pop_name: str, variables_v_rest: dict): """ + Check if the membrane potential is constant after setting it to v_rest. - def __init__(self, projection_dict): - """ - Args: - projection_dict (dict): - keys: names of afferent projections (of the same target type) - values: dict with keys "weights", "pre_name" - """ + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation - ### create parameters - parameters = [ - f""" - number_synapses_{proj_name} = 0 - weights_{proj_name} = {vals['weights']} - """ - for proj_name, vals in projection_dict.items() - ] - parameters = "\n".join(parameters) + Returns: + v_rest_is_constant (bool): + True if the membrane potential is constant, False otherwise + v_rest_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ + ### check if the neuron stays at v_rest with normal neuron + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=0, + ) + v_rest_arr_is_const = ( + np.std(v_rest_arr) <= np.mean(np.absolute(v_rest_arr)) / 1000 + ) + return v_rest_arr_is_const, v_rest_arr - ### create equations - equations = [ - f""" - incoming_spikes_{proj_name} = round(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']})))) : min=0, max=number_synapses_{proj_name} - """ - for proj_name, vals in projection_dict.items() - ] - equations = "\n".join(equations) - sum_of_conductance_increase = ( - "r = " - + "".join( - [ - f"incoming_spikes_{proj_name} * weights_{proj_name} + " - for proj_name in projection_dict.keys() - ] - )[:-3] - ) - equations = equations + "\n" + sum_of_conductance_increase + def _get_v_change_after_v_rest( + self, pop_name: str, variables_v_rest: dict, I_app: float + ): + """ + Check how much the membrane potential changes after setting it to v_rest. - super().__init__(parameters=parameters, equations=equations) + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation + + Returns: + change_after_v_rest (np.array): + Change of the membrane potential after setting it to v_rest + """ + ### simulate 2000 ms after setting v_rest + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app, + ) + ### check how much v changes during the second half + ### std(v) - mean(v)/1000 should be close to 0, the larger the value the more v + ### changes + change_after_v_rest = ( + np.std(v_rest_arr[len(v_rest_arr) // 2 :], axis=0) + - np.mean(np.absolute(v_rest_arr[len(v_rest_arr) // 2 :]), axis=0) / 1000 + ) + return change_after_v_rest class GetMaxSyn: @@ -2470,6 +2541,10 @@ def __init__( target_firing_rate_dict (dict[str, float]): Target firing rate for each population """ + sf.Logger().log( + "Find maximal synaptic input for each population.", + verbose=_model_configurator_verbose, + ) self._simulator = simulator self._max_syn_dict = {} ### loop over all populations @@ -2545,9 +2620,10 @@ def _get_max_g_gaba(self, pop_name: str, max_psp: float): """ ### find g_gaba max using max IPSP sf.Logger().log( - f"[{pop_name}]: search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)" + f"[{pop_name}]: search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X), y(X_bound) should be {max_psp}", + verbose=_model_configurator_verbose, ) - return ef.find_x_bound( + x_bound = ef.find_x_bound( y=lambda X_val: self._simulator.get_ipsp( pop_name=pop_name, g_gaba=X_val, @@ -2556,6 +2632,11 @@ def _get_max_g_gaba(self, pop_name: str, max_psp: float): y_bound=max_psp, tolerance=0.005, ) + sf.Logger().log( + f"[{pop_name}]: found g_gaba_max={x_bound} with y(X_bound)={self._simulator.get_ipsp(pop_name=pop_name, g_gaba=x_bound)}", + verbose=_model_configurator_verbose, + ) + return x_bound def _get_max_g_ampa(self, pop_name: str, g_gaba_max: float): """ @@ -2574,7 +2655,8 @@ def _get_max_g_ampa(self, pop_name: str, g_gaba_max: float): """ ### find g_ampa max by "overriding" IPSP of g_gaba max sf.Logger().log( - f"[{pop_name}]: search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" + f"[{pop_name}]: search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max}), y(X_bound) should be {0}", + verbose=_model_configurator_verbose, ) def func(x): @@ -2594,12 +2676,17 @@ def func(x): else: return y - return ef.find_x_bound( + x_bound = ef.find_x_bound( y=func, x0=0, y_bound=0, tolerance=0.005, ) + sf.Logger().log( + f"[{pop_name}]: found g_ampa_max={x_bound} with y(X_bound)={self._simulator.get_ipsp(pop_name=pop_name, g_ampa=x_bound, g_gaba=g_gaba_max)}", + verbose=_model_configurator_verbose, + ) + return x_bound def _get_max_I_app(self, pop_name: str, target_firing_rate_dict: dict[str, float]): """ @@ -2622,9 +2709,10 @@ def _get_max_I_app(self, pop_name: str, target_firing_rate_dict: dict[str, float ### find I_max with f_0, and f_max using find_x_bound sf.Logger().log( - f"[{pop_name}]: search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0)" + f"[{pop_name}]: search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0), y(X_bound) should be {f_max}", + verbose=_model_configurator_verbose, ) - I_max = ef.find_x_bound( + x_bound = ef.find_x_bound( y=lambda X_val: self._simulator.get_firing_rate( pop_name=pop_name, I_app=X_val, @@ -2633,8 +2721,11 @@ def _get_max_I_app(self, pop_name: str, target_firing_rate_dict: dict[str, float y_bound=f_max, tolerance=1, ) - - return I_max + sf.Logger().log( + f"[{pop_name}]: found I_app_max={x_bound} with y(X_bound)={self._simulator.get_firing_rate(pop_name=pop_name, I_app=x_bound)}", + verbose=_model_configurator_verbose, + ) + return x_bound class GetWeights: @@ -2645,6 +2736,10 @@ def __init__( analyze_model: AnalyzeModel, max_syn: GetMaxSyn.MaxSynGetter, ): + sf.Logger().log( + "Initialize the weight dictionary with the maximal weights.", + verbose=_model_configurator_verbose, + ) self._model = model self._do_not_config_list = do_not_config_list self._analyze_model = analyze_model @@ -2850,199 +2945,372 @@ def _get_afferent_proj_names(self, pop_name: str, target: str): return proj_name_list -class CreateVoltageClampEquations: - """ - Class to create voltage clamp equations from the given equations of a neuron model. - The equations of the neuron model have to contain the voltage change equation in the - form of ... dv/dt ... = ... - - Attributes: - eq_new (list[str]) - new equations of the neuron model with the voltage clamp - """ - - def __init__(self, eq: list[str], neuron_model_attributes_name_list: list[str]): - """ - Args: - eq (list[str]) - equations of the neuron model - neuron_model_attributes_name_list (list[str]) - list of the names of the attributes of the neuron model - """ - ### get the dv/dt equation from equations - eq_v, eq_v_idx = self.get_eq_v(eq=eq) - - ### prepare the equation string for solving - ### TODO replace random distributions and mathematical expressions which may be on the left side - eq_v, tags = self.prepare_eq_v(eq_v=eq_v) - - ### solve equation to delta_v (which is dv/dt) - result = self.solve_delta_v(eq_v, neuron_model_attributes_name_list) - - ### insert the new equation lines for v_clamp and remove the old dv/dt line - self.eq_new = self.replace_delta_v( - result=result, eq=eq, eq_v_idx=eq_v_idx, tags=tags - ) - - def replace_delta_v( - self, result: str, eq: list[str], eq_v_idx: int, tags: str = None +class GetBase: + def __init__( + self, + model_normal: CompNeuroModel, + model_reduced: CreateReducedModel, + target_firing_rate_dict: dict, + weight_dicts: "GetWeights", + do_not_config_list: list[str], + init_sampler: CreateSingleNeuronNetworks.AllSampler, + max_syn: "GetMaxSyn.MaxSynGetter", + v_tol: float, ): - """ - Replace the dv/dt line with the voltage clamp lines. - - Args: - result (str) - right side of the dv/dt equation - eq (list[str]) - equations of the neuron model - eq_v_idx (int) - index of the dv/dt line - tags (str) - tags of the dv/dt line - - Returns: - eq (list[str]) - new equations of the neuron model with the voltage clamp - """ - ### create the line for recording voltage clamp (right side of dv/dt) - eq_new_0 = f"v_clamp_rec_sign = {result}" - ### create the line for the absolute voltage clamp - eq_new_1 = f"v_clamp_rec = fabs({result})" - ### create the line for the absolute voltage clamp from the previous time step - eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" - ### create the voltage clamp line "dv/dt=0" with tags if they exist - if not isinstance(tags, type(None)): - eq_new_3 = f"dv/dt=0 : {tags}" - else: - eq_new_3 = "dv/dt=0" - ### remove old v line and insert new three lines, order is important - del eq[eq_v_idx] - eq.insert(eq_v_idx, eq_new_0) - eq.insert(eq_v_idx, eq_new_1) - eq.insert(eq_v_idx, eq_new_2) - eq.insert(eq_v_idx, eq_new_3) - ### return new neuron equations - return eq - - def get_line_is_v(self, line: str): - """ - Check if the line contains the definition of dv/dt. - - Args: - line (str) - line of the equations of the neuron model - - Returns: - line_is_v (bool) - True if the line contains the definition of dv/dt, False otherwise - """ - if "v" not in line: - return False - - ### remove whitespaces - line = line.replace(" ", "") - - ### check for dv/dt - if "dv/dt" in line: - return True - - return False - - def get_eq_v(self, eq: list[str]): - """ - Get the dv/dt equation from the equations of the neuron model. + ### TODO if the weights are "too strong" crazy I_app values are needed --> I + ### have to use lower and upper bounds! + ### but then TODO: find a better way to get I_app_max ... this f_0+f_target+100 is not so good + ### maybe something with to voltage clamp + ### an input current which causes this change of v, maybe I find a good heuristic a la max change of v caused by input current within 1 ms should be 10 mV + ### TODO another idea: for the optimization not only consider the firing rate but also the membrane potential if the firing rate is zero + ### the problem with "only firng rate" is that it has no gradient information for below zero firing rates + sf.Logger().log( + "Find the base currents for the target firing rates.", + verbose=_model_configurator_verbose, + ) + self._model_normal = model_normal + self._create_reduce_model = model_reduced + self._weight_dicts = weight_dicts + self._do_not_config_list = do_not_config_list + self._init_sampler = init_sampler + self._max_syn = max_syn + self._v_tol = v_tol + ### get the populations names of the configured populations + self._pop_names_config = [ + pop_name + for pop_name in model_normal.populations + if pop_name not in do_not_config_list + ] + ### convert the target firing rate dict to an array + self._target_firing_rate_arr = [] + print(self._pop_names_config) + for pop_name in self._pop_names_config: + self._target_firing_rate_arr.append(target_firing_rate_dict[pop_name]) + self._target_firing_rate_arr = np.array(self._target_firing_rate_arr) + ### get the base currents + self._prepare_get_base() + self._base_dict = self._get_base() - Args: - eq (list[str]) - equations of the neuron model + @property + def base_dict(self): + return self._base_dict - Returns: - eq_v (str) - dv/dt equation - eq_v_idx (int) - index of the dv/dt line - """ - ### get the dv/dt equation from equations - ### find the line with dv/dt= or v+= or v= - line_is_v_list = [False] * len(eq) - ### check in which lines v is defined - for line_idx, line in enumerate(eq): - line_is_v_list[line_idx] = self.get_line_is_v(line) - ### raise error if no v or multiple times v - if True not in line_is_v_list or sum(line_is_v_list) > 1: - raise ValueError( - "In the equations of the neurons has to be exactly a single line which defines dv/dt!" + def _prepare_get_base(self): + sf.Logger().log( + "Prepare the model and experiment for the optimization.", + verbose=_model_configurator_verbose, + ) + ### clear ANNarchy + mf.cnp_clear(functions=False, constants=False) + ### create and compile the model + self._create_reduce_model.model_reduced.create() + ### create monitors for recording the spikes of all populations + ### for CompNeuroMonitors we need the "_reduced" suffix + mon = CompNeuroMonitors( + mon_dict={ + f"{pop_name}_reduced": ["spike"] + for pop_name in self._model_normal.populations + } + ) + ### initialize all populations with the init sampler + for pop_name in self._pop_names_config: + ### for get_population we need the "_reduced" suffix + self._init_sampler.get(pop_name=pop_name).set_init_variables( + get_population(f"{pop_name}_reduced") ) - ### get the index of the line with dv/dt - eq_v_idx = line_is_v_list.index(True) - ### set the v equation - eq_v = eq.copy()[eq_v_idx] - return eq_v, eq_v_idx + ### set the model weights + self._create_reduce_model.set_weights( + weight_dict=self._weight_dicts.weight_dict + ) + ### create the experiment + self._exp = self.MyExperiment(monitors=mon) + ### store the model state for all populations + self._exp.store_model_state( + compartment_list=self._create_reduce_model.model_reduced.populations + ) + ### set lower and upper bounds and initial guess + self._lb = [] + self._ub = [] + self._x0 = [] + for pop_name in self._pop_names_config: + self._lb.append(-self._max_syn.get(pop_name=pop_name).I_app) + self._ub.append(self._max_syn.get(pop_name=pop_name).I_app) + self._x0.append(0.0) - def prepare_eq_v(self, eq_v: str): + def _get_base(self): """ - Prepare the equation string for solving with sympy. - - Args: - eq_v (str) - dv/dt equation + Perform the optimization to find the base currents for the target firing rates. Returns: - eq_v (str) - dv/dt equation - tags (str) - tags of the dv/dt equation - """ - ### remove whitespaces - eq_v = eq_v.replace(" ", "") - ### replace dv/dt by delta_v - eq_v = eq_v.replace("dv/dt", "delta_v") - ### separate equation and tags - eq_tags_list = eq_v.split(":") - eq_v = eq_tags_list[0] - if len(eq_tags_list) > 1: - tags = eq_tags_list[1] - else: - tags = None - return eq_v, tags - - def solve_delta_v(self, eq_v: str, neuron_model_attributes_name_list: list[str]): + base_dict (dict): + Dict with the base currents for each population """ - Solve the dv/dt equation for delta_v (which is dv/dt). - Args: - eq_v (str) - dv/dt equation - neuron_model_attributes_name_list (list[str]) - list of the names of the attributes of the neuron model + ### Perform the optimization using Minimize class + result = self.Minimize( + func=self._get_firing_rate, + yt=self._target_firing_rate_arr, + x0=np.array(self._x0), + lb=np.array(self._lb), + ub=np.array(self._ub), + tol_error=self._v_tol, + tol_convergence=0.01, + max_it=20, + ) - Returns: - solution_str (str) - right side of the dv/dt equation - """ - ### Define the attributes of the neuron model as sympy symbols - sp.symbols(",".join(neuron_model_attributes_name_list)) - ### Define delta_v and right_side as sympy symbols - delta_v, _ = sp.symbols("delta_v right_side") + optimized_inputs = result.x + if not result.success: + sf.Logger().log("Optimization failed, target firing rates not reached!") + print("Optimization failed, target firing rates not reached!") + base_dict = { + pop_name: optimized_inputs[idx] + for idx, pop_name in enumerate(self._pop_names_config) + } + return base_dict - ### Parse the equation string - lhs, rhs_string = eq_v.split("=") - lhs = sp.sympify(lhs) - rhs = sp.sympify(rhs_string) + def _get_firing_rate(self, I_app_list: list[float]): + ### convert the I_app_list to a dict + I_app_dict = {} + counter = 0 + for pop_name in self._pop_names_config: + ### for the I_app_dict we need the "_reduced" suffix + I_app_dict[f"{pop_name}_reduced"] = I_app_list[counter] + counter += 1 + ### run the experiment + results = self._exp.run(I_app_dict) + ### get the firing rates from the recorded spikes + rate_list = [] + rate_dict = {} + for pop_name in self._pop_names_config: + ### for the spike dict we need the "_reduced" suffix + spike_dict = results.recordings[0][f"{pop_name}_reduced;spike"] + t, _ = raster_plot(spike_dict) + ### only take spikes after the first 500 ms, because the neurons are + ### initialized in resting-state and with an input current there can be + ### drastic activity changes at the beginning + t = t[t > 500] + nbr_spikes = len(t) + ### divide number of spikes by the number of neurons and the duration in s + if nbr_spikes > 0: + rate = nbr_spikes / (4.5 * get_population(f"{pop_name}_reduced").size) + else: + ### for rate 0 return the membrane potential instead, so that <0 still contains gradient information + rate = np.mean(get_population(f"{pop_name}_reduced").v) + rate_list.append(rate) + rate_dict[pop_name] = rate + # sf.Logger().log(f"I_app_dict: {I_app_dict}") + # sf.Logger().log(f"Firing rates: {rate_dict}") - ### Form the equation - equation = sp.Eq(lhs, rhs) + # af.PlotRecordings( + # figname="firing_rates.png", + # recordings=results.recordings, + # recording_times=results.recording_times, + # shape=(len(self._model_normal.populations), 1), + # plan={ + # "position": list(range(1, len(self._model_normal.populations) + 1)), + # "compartment": [ + # f"{pop_name}_reduced" for pop_name in self._model_normal.populations + # ], + # "variable": ["spike"] * len(self._model_normal.populations), + # "format": ["hybrid"] * len(self._model_normal.populations), + # }, + # ) + return np.array(rate_list) - ### Solve the equation for delta_v - try: - solution = sp.solve(equation, delta_v)[0] - except: - raise ValueError("Could not find solution for dv/dt!") + class MyExperiment(CompNeuroExp): + def run(self, I_app_dict: dict[str, float]): + """ + Simulate the model for 5000 ms with the given input currents. - ### Get the solution as a string - solution_str = str(solution) + Args: + I_app_dict (dict[str, float]): + Dict with the input currents for each population - ### replace right_side by the actual right side string - solution_str = solution_str.replace("right_side", f"({rhs_string})") + Returns: + results (CompNeuroResults): + Results of the simulation + """ + ### reset to initial state + self.reset() + set_seed(0) + ### activate monitor + self.monitors.start() + ### set the input currents + for pop_name, I_app in I_app_dict.items(): + get_population(pop_name).I_app = I_app + ### simulate 5000 ms + simulate(5000) + ### return results + return self.results() - return solution_str + class Minimize: + def __init__( + self, func, yt, x0, lb, ub, tol_error, tol_convergence, max_it + ) -> None: + """ + Args: + func (Callable): + Function which takes a vector as input and returns a vector as output + target_values (np.array): + Target output vector of the function + x0 (np.array): + Initial input vector + lb (np.array): + Lower bounds of the input vector + ub (np.array): + Upper bounds of the input vector + tol_error (float): + If the error is below this value the optimization stops + tol_convergence (float): + If the change of the error stays below this value the optimization stops + max_it (int): + Maximum number of iterations + """ + x = x0 + x_old = x0 + y = yt + error = np.ones(x0.shape) * 20 + error_old = np.ones(x0.shape) * 20 + it = 0 + search_gradient_diff = np.ones(x0.shape) + alpha = np.ones(x0.shape) + error_list = [] + dx_list = [] + dy_list = [] + x_list = [] + y_list = [] + it_list = [] + + def error_changed(error_list, tol, n=3): + if len(error_list) < 2: + return True + return (np.max(error_list[-n:]) - np.min(error_list[-n:])) > tol + + ### run until the error does not change anymore or the maximum number of + ### iterations is reached, also break if the error is small enough + while it < max_it and error_changed(error_list, tol_convergence): + sf.Logger().log(f"iteration: {it}", verbose=_model_configurator_verbose) + y_old = y + y = func(x) + dx_list.append(x - x_old) + dy_list.append(y - y_old) + ### TODO if x did not change much, use the previous gradient again, but maybe not a good idea, or at least not easy to implement, sicne gradient depends on all inputs + sf.Logger().log(f"x: {x}", verbose=_model_configurator_verbose) + sf.Logger().log(f"y: {y}", verbose=_model_configurator_verbose) + x_list.append(x) + y_list.append(y) + it_list.append(it) + ### here we know the new y(x) + ### check if the error sign changed + error_old = error + error = yt - y + ### if error is small enough stop the optimization + if np.all(np.abs(error) < tol_error): + break + error_sign_changed = np.sign(error) != np.sign(error_old) + sf.Logger().log( + f"error_sign_changed: {error_sign_changed}", + verbose=_model_configurator_verbose, + ) + ### get how much the error (in total, not for individual inputs) changed + error_list.append(np.mean(np.abs(error))) + sf.Logger().log(f"error_list: {error_list}") + ### if the error sign changed: + ### - check if error is larger as before + ### - if yes -> check if error is also larger than tolerance + ### - if yes -> use again the previous x and compute current y again + ### - we calculate (as usual) a new gradient + ### - we reduce alpha, so this time the step is smaller + error_increased = np.abs(error) > np.abs(error_old) + error_is_large = np.abs(error) > tol_error + change_x = error_sign_changed & error_increased & error_is_large + x[change_x] = x_old[change_x] + if np.any(change_x): + y = func(x) + sf.Logger().log( + "some errors changed sign, increased, and are larger than tolerance", + verbose=_model_configurator_verbose, + ) + sf.Logger().log(f"x: {x}", verbose=_model_configurator_verbose) + sf.Logger().log(f"y: {y}", verbose=_model_configurator_verbose) + x_list.append(x) + y_list.append(y) + it_list.append(it) + ### reduce alpha for the inputs where the error sign changed + ### for the others alpha reaches 1 + alpha[error_sign_changed] /= 2 + alpha[~error_sign_changed] += (1 - alpha[~error_sign_changed]) / 5 + ### calculate the gradient i.e. change of the output values for each input + grad = np.zeros((yt.shape[0], x0.shape[0])) + for i in range(len(x0)): + ### search for the gradient of the i-th input, increase the stepwidth + ### which is used to calculate the gradient if the gradient for the + ### associated output value is below 1 + while grad[i, i] < 1: + x_plus = x.copy() + ### change only the currently selected input whose gradient should be + ### calculated + x_plus[i] += search_gradient_diff[i] + y_plus = func(x_plus) + sf.Logger().log(f"x_plus: {x_plus}", verbose=False) + sf.Logger().log(f"y_plus: {y_plus}", verbose=False) + grad[:, i] = y_plus - y + ### if gradient is above 10 reduce the search gradient diff + if grad[i, i] >= 10: + search_gradient_diff[i] /= 1.5 + ### if gradient is below 1 increase the search gradient diff + elif grad[i, i] < 1: + search_gradient_diff[i] *= 2 + ### calculate the wanted change of the output values + delta_y = yt - y + sf.Logger().log(f"delta_y: {delta_y}", verbose=False) + sf.Logger().log(f"grad:\n{grad}", verbose=False) + + # Example coefficient matrix A (m x n matrix) + A = grad + + # Right-hand side vector b (m-dimensional vector) + b = delta_y + + # Solve the system using least squares method + solution, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None) + + # Output the results + sf.Logger().log(f"Solution vector x: {solution}", verbose=False) + + # Calculate the matrix-vector product Ax + Ax = np.dot(A, solution) + + # Output the matrix-vector product and compare with b + sf.Logger().log(f"delta_y from solution: {Ax}", verbose=False) + + ### solution contains the info how much each input should change (how many + ### times the change of gradient is needed to reach the target values) + x_old = x + x = x + solution * search_gradient_diff * alpha + it += 1 + + self.x = x + self.success = np.all(np.abs(error) < tol_error) + + x_arr = np.array(x_list) + y_arr = np.array(y_list) + it_arr = np.array(it_list) + + ### TODO remove or make this optimal (for debugging), also the prints above + if _model_configurator_do_plot: + plt.close("all") + plt.figure() + for idx in range(4): + ax = plt.subplot(4, 1, idx + 1) + ### plot the x values + plt.plot(it_arr, x_arr[:, idx]) + plt.ylabel(f"x{idx}") + ### second y axis on the right for the y values + ax2 = ax.twinx() + ax2.plot(it_arr, y_arr[:, idx], color="red") + ax2.set_ylabel(f"y{idx}", color="red") + plt.xlabel("iteration") + plt.tight_layout() + plt.savefig(f"{_model_configurator_figure_folder}/optimization.png") + plt.close("all") diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index d1ff13a..7f836b9 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -15,6 +15,7 @@ CompNeuroMonitors, PlotRecordings, my_raster_plot, + cnp_clear, ) from CompNeuroPy.examples.model_configurator.model_configurator_cnp import ( ModelConfigurator, @@ -56,7 +57,7 @@ def BGM_part_function(params): tau_gaba=params["stn.tau_gaba"], E_ampa=params["stn.E_ampa"], E_gaba=params["stn.E_gaba"], - noise=0, + noise=1, tau_power=10, snr_target=4, rate_noise=100, @@ -77,7 +78,7 @@ def BGM_part_function(params): tau_gaba=params["snr.tau_gaba"], E_ampa=params["snr.E_ampa"], E_gaba=params["snr.E_gaba"], - noise=0, + noise=1, tau_power=10, snr_target=4, rate_noise=100, @@ -98,7 +99,7 @@ def BGM_part_function(params): tau_gaba=params["gpe.tau_gaba"], E_ampa=params["gpe.E_ampa"], E_gaba=params["gpe.E_gaba"], - noise=0, + noise=1, tau_power=10, snr_target=4, rate_noise=100, @@ -119,7 +120,7 @@ def BGM_part_function(params): tau_gaba=params["thal.tau_gaba"], E_ampa=params["thal.E_ampa"], E_gaba=params["thal.E_gaba"], - noise=0, + noise=1, tau_power=10, snr_target=4, rate_noise=100, @@ -215,16 +216,16 @@ def BGM_part_function(params): params = {} ####### POPULATIONS PARAMETERS ###### ### cortex / input populations - params["cor_exc.size"] = 100 + params["cor_exc.size"] = 1000 params["cor_exc.tau_up"] = 10 params["cor_exc.tau_down"] = 30 params["cor_exc.rates"] = 15 - params["cor_inh.size"] = 100 + params["cor_inh.size"] = 1000 params["cor_inh.tau_up"] = 10 params["cor_inh.tau_down"] = 30 params["cor_inh.rates"] = 30 ### BG Populations - params["snr.size"] = 100 + params["snr.size"] = 1000 params["snr.a"] = 0.005 params["snr.b"] = 0.585 params["snr.c"] = -65 @@ -236,7 +237,7 @@ def BGM_part_function(params): params["snr.tau_gaba"] = 10 params["snr.E_ampa"] = 0 params["snr.E_gaba"] = -90 - params["stn.size"] = 50 + params["stn.size"] = 1000 params["stn.a"] = 0.005 params["stn.b"] = 0.265 params["stn.c"] = -65 @@ -248,7 +249,7 @@ def BGM_part_function(params): params["stn.tau_gaba"] = 10 params["stn.E_ampa"] = 0 params["stn.E_gaba"] = -90 - params["gpe.size"] = 100 + params["gpe.size"] = 1000 params["gpe.a"] = params["snr.a"] # 0.039191890241715294 params["gpe.b"] = params["snr.b"] # 0.000548238111291427 params["gpe.c"] = params["snr.c"] # -49.88014418530518 @@ -260,7 +261,7 @@ def BGM_part_function(params): params["gpe.tau_gaba"] = 10 params["gpe.E_ampa"] = 0 params["gpe.E_gaba"] = -90 - params["thal.size"] = 100 + params["thal.size"] = 1000 params["thal.a"] = 0.02 params["thal.b"] = 0.2 params["thal.c"] = -65 @@ -294,56 +295,6 @@ def BGM_part_function(params): do_create=False, ) - # model.create() - # mon = CompNeuroMonitors( - # { - # pop_name: [ - # "I_noise", - # "I_signal", - # "I", - # "power_I_signal", - # "spike", - # ] - # for pop_name in ["stn"] - # } - # ) - # mon.start() - - # simulate(500) - # get_population("stn").I_app = 10 - # simulate(500) - - # recordings = mon.get_recordings() - # recording_times = mon.get_recording_times() - - # PlotRecordings( - # recordings=recordings, - # recording_times=recording_times, - # chunk=0, - # shape=(5, 1), - # plan={ - # "position": list(range(1, 5 + 1)), - # "compartment": ["stn"] * 5, - # "variable": [ - # "I_noise", - # "I_signal", - # "I", - # "power_I_signal", - # "spike", - # ], - # "format": [ - # "line", - # "line", - # "line", - # "line", - # "hybrid", - # ], - # }, - # figname="model_recordings_noise.png", - # # time_lim=(495, 505), - # ) - # quit() - ### model configurator should get target resting-state firing rates for the ### model populations one wants to configure and their afferents as input ### TODO allow for target range @@ -361,22 +312,23 @@ def BGM_part_function(params): model_conf = ModelConfigurator( model=model, target_firing_rate_dict=target_firing_rate_dict, - max_psp=0.7, + max_psp=5.0, + v_tol=1.0, do_not_config_list=do_not_config_list, - print_guide=True, - I_app_variable="I_app", cache=True, - clear_cache=True, + clear_cache=False, log_file="model_configurator.log", + verbose=True, + do_plot=False, ) ### set syn load model_conf.set_syn_load( syn_load_dict={ - "stn": {"ampa": 1.0, "gaba": 1.0}, - "snr": {"ampa": 1.0, "gaba": 1.0}, - "gpe": {"ampa": 1.0}, - "thal": {"gaba": 1.0}, + "stn": {"ampa": 1.0, "gaba": 0.0}, + "snr": {"ampa": 1.0, "gaba": 0.0}, + "gpe": {"ampa": 0.0}, + "thal": {"gaba": 0.0}, }, syn_contribution_dict={ "stn": {"ampa": {"cor_exc__stn": 1.0}, "gaba": {"cor_inh__stn": 1.0}}, @@ -402,23 +354,49 @@ def BGM_part_function(params): # } # ) - I_base_dict = model_conf.get_base() + I_base_dict = { + "stn": -67.26137607678399, + "snr": -159.42154403510528, + "gpe": 20.259671890303633, + "thal": 7.695960857828326, + } # model_conf.get_base() print("I_base:") print(I_base_dict) - model_conf.set_base() + model_conf.set_base(base_dict=I_base_dict) ### do a test simulation - mon = CompNeuroMonitors( - { - "cor_exc": ["spike"], - "cor_inh": ["spike"], - "stn": ["spike"], - "gpe": ["spike"], - "snr": ["spike"], - "thal": ["spike"], - } - ) + mon_dict = { + "cor_exc": ["spike"], + "cor_inh": ["spike"], + "stn": ["spike", "g_ampa"], + "gpe": ["spike"], + "snr": ["spike", "g_ampa"], + "thal": ["spike"], + } + mon = CompNeuroMonitors(mon_dict) + position_list = [] + compartment_list = [] + variable_list = [] + format_list = [] + for idx, pop_name in enumerate(model.populations): + for key, val in mon_dict.items(): + if pop_name in key: + if "spike" in val: + position_list.append(3 * model.populations.index(pop_name) + 1) + compartment_list.append(key) + variable_list.append("spike") + format_list.append("hybrid") + if "g_ampa" in val: + position_list.append(3 * model.populations.index(pop_name) + 2) + compartment_list.append(key) + variable_list.append("g_ampa") + format_list.append("line") + if "r" in val: + position_list.append(3 * model.populations.index(pop_name) + 3) + compartment_list.append(key) + variable_list.append("r") + format_list.append("line") ### initial simulation simulate(1000) mon.start() @@ -440,8 +418,8 @@ def BGM_part_function(params): (t > int(round(1000 / dt()))) * (t < int(round(5000 / dt()))) ) nr_spikes_2nd = np.sum((t > int(round(5000 / dt())))) - rate_1st = nr_spikes_1st / (4 * params[f"{pop_name}.size"]) - rate_2nd = nr_spikes_2nd / (2 * params[f"{pop_name}.size"]) + rate_1st = nr_spikes_1st / (4 * get_population(pop_name).size) + rate_2nd = nr_spikes_2nd / (2 * get_population(pop_name).size) print(f"pop_name: {pop_name}, rate_1st: {rate_1st}, rate_2nd: {rate_2nd}") ### plot recordings @@ -450,11 +428,94 @@ def BGM_part_function(params): recordings=recordings, recording_times=recording_times, chunk=0, - shape=(len(model.populations), 1), + shape=(len(model.populations), 3), + plan={ + "position": position_list, + "compartment": compartment_list, + "variable": variable_list, + "format": format_list, + }, + ) + + ### clear ANNarchy, create the reduced model, set the weights for the reduced model + ### and the baselines and then compare the results + cnp_clear() + create_reduced_model = model_conf._model_reduced + create_reduced_model.model_reduced.create() + create_reduced_model.set_weights(weight_dict=model_conf._weight_dicts.weight_dict) + for pop_name, I_app in I_base_dict.items(): + get_population(f"{pop_name}_reduced").I_app = I_app + + mon_dict = { + "cor_exc_reduced": ["spike"], + "cor_exc_spike_collecting_aux": ["r"], + "cor_inh_reduced": ["spike"], + "stn_reduced": ["spike", "g_ampa"], + "gpe_reduced": ["spike"], + "snr_reduced": ["spike", "g_ampa"], + "thal_reduced": ["spike"], + } + mon = CompNeuroMonitors(mon_dict) + position_list = [] + compartment_list = [] + variable_list = [] + format_list = [] + counter = 0 + for idx, pop_name in enumerate(model.populations): + for key, val in mon_dict.items(): + if pop_name in key: + if "spike" in val: + position_list.append(3 * model.populations.index(pop_name) + 1) + compartment_list.append(key) + variable_list.append("spike") + format_list.append("hybrid") + if "g_ampa" in val: + position_list.append(3 * model.populations.index(pop_name) + 2) + compartment_list.append(key) + variable_list.append("g_ampa") + format_list.append("line") + if "r" in val: + position_list.append(3 * model.populations.index(pop_name) + 3) + compartment_list.append(key) + variable_list.append("r") + format_list.append("line") + ### initial simulation + simulate(1000) + mon.start() + ### first simulation with default inputs + simulate(4000) + get_population("cor_exc_reduced").rates = 0 + ### second simulation with changed inputs + simulate(2000) + + ### get recordings + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + + ### print rates + population_list = [f"{pop_name}_reduced" for pop_name in model.populations] + for pop_name in population_list: + spike_dict = recordings[0][f"{pop_name};spike"] + t, n = my_raster_plot(spike_dict) + nr_spikes_1st = np.sum( + (t > int(round(1000 / dt()))) * (t < int(round(5000 / dt()))) + ) + nr_spikes_2nd = np.sum((t > int(round(5000 / dt())))) + rate_1st = nr_spikes_1st / (4 * get_population(pop_name).size) + rate_2nd = nr_spikes_2nd / (2 * get_population(pop_name).size) + print(f"pop_name: {pop_name}, rate_1st: {rate_1st}, rate_2nd: {rate_2nd}") + + ### plot recordings + PlotRecordings( + figname="model_recordings_reduced.png", + recordings=recordings, + recording_times=recording_times, + chunk=0, + shape=(len(population_list), 3), plan={ - "position": list(range(1, len(model.populations) + 1)), - "compartment": model.populations, - "variable": ["spike"] * len(model.populations), - "format": ["hybrid"] * len(model.populations), + "position": position_list, + "compartment": compartment_list, + "variable": variable_list, + "format": format_list, }, ) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 2c259f6..57113ca 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -2,31 +2,44 @@ import matplotlib.pyplot as plt import scipy.stats as stats -# Parameters -n = 10 # number of trials -p = 0.01 # probability of success -N = 10000 # number of samples - -# Generate data samples -binomial_sample = np.random.binomial(n, p, N) -mean = n * p -std_dev = np.sqrt(n * p * (1 - p)) -normal_sample = np.random.normal(mean, std_dev, N) - -# ### scale normal sample above mean and below mean -# normal_sample_original = normal_sample.copy() -# normal_sample[normal_sample_original >= mean] = ( -# normal_sample_original[normal_sample_original >= mean] * 1.1 -# ) -# normal_sample[normal_sample_original < mean] = ( -# normal_sample_original[normal_sample_original < mean] * 0.9 -# ) - -### round and clip the normal sample -normal_sample = np.round(normal_sample) -normal_sample[normal_sample < 0] = 0 -normal_sample[normal_sample > n] = n - +n_arr = np.arange(10, 1000, 10) +p_arr = np.array([0.001, 0.01, 0.1, 0.5, 0.9, 0.99, 0.999]) + +### TODO I have the problem that for very small p the normal distribution is not a good +### approximation of the binomial distribution. +### I think one can shift the mean and scale the standard deviation depending on the p +### and n values. I will try to optimize the shift and scale for each n and p value. +shift_mean_bounds = [-1, 1] +scale_std_bounds = [0.5, 2] + +for n in n_arr: + for p in p_arr: + # set n and p + # then optimize shift of mean + # Parameters + # number of samples + N = 10000 + + # Generate data samples + binomial_sample = np.random.binomial(n, p, N) + mean = n * p + std_dev = np.sqrt(n * p * (1 - p)) + normal_sample = np.random.normal(mean, std_dev, N) + + ### round and clip the normal sample + normal_sample = np.round(normal_sample) + normal_sample[normal_sample < 0] = 0 + normal_sample[normal_sample > n] = n + + print(np.histogram(binomial_sample, bins=n + 1, range=(-0.5, n + 0.5))[0]) + print(np.histogram(normal_sample, bins=n + 1, range=(-0.5, n + 0.5))[0]) + diff = ( + np.histogram(binomial_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] + - np.histogram(normal_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] + ) + error = np.sum(np.abs(diff)) + +quit() # Statistical comparison # Calculate descriptive statistics @@ -57,55 +70,35 @@ # Visual comparison -plt.figure(figsize=(12, 6)) +plt.figure(figsize=(12, 10)) # Histogram of binomial sample -plt.subplot(1, 2, 1) plt.hist( binomial_sample, - bins=n + 1, - range=(-0.5, n + 0.5), + bins=plot_max + 1, + range=(-0.5, plot_max + 0.5), density=True, alpha=0.5, color="b", label="Binomial", ) -plt.hist( - binomial_sample, - bins=n * 50, - range=(-0.5, n + 0.5), - density=True, - alpha=0.5, - color="b", - label="Binomial", -) -plt.xlim(-0.5, n + 0.5) -plt.title("Binomial Distribution") -plt.xlabel("Value") -plt.ylabel("Frequency") - -# Histogram of normal sample -plt.subplot(1, 2, 2) -plt.hist( - normal_sample, - bins=n + 1, - range=(-0.5, n + 0.5), - density=True, - alpha=0.5, - color="r", - label="Normal", -) plt.hist( normal_sample, - bins=n * 50, - range=(-0.5, n + 0.5), + bins=plot_max + 1, + range=(-0.5, plot_max + 0.5), density=True, alpha=0.5, color="r", label="Normal", ) -plt.xlim(-0.5, n + 0.5) -plt.title("Normal Distribution") +# set the y ticks every 0.1 +plt.yticks(np.arange(0, 1.1, 0.1)) +plt.grid() + +plt.legend() +plt.ylim(0, 1) +plt.xlim(-0.5, plot_max + 0.5) +plt.title("Binomial Distribution") plt.xlabel("Value") plt.ylabel("Frequency") diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index 125147d..e72cc10 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -562,7 +562,7 @@ def __new__(cls, log_file: str | None = None): print("Logger file:", file=f) return cls._instance - def log(self, txt): + def log(self, txt, verbose=False): """ Log the given text to the log file. Only if the log file was given during the first initialization. @@ -570,7 +570,11 @@ def log(self, txt): Args: txt (str): Text to be logged + verbose (bool, optional): + Whether to print the text. Default: False. """ + if verbose: + print(txt) if self._log_file is None: return From 1247e5f1b62f3f9ceef506175a8b8177bf1ed619 Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 24 Jun 2024 17:06:51 +0200 Subject: [PATCH 04/21] changed run_script_parallel --- .../examples/model_configurator/test2.py | 205 +++++++++++++++--- src/CompNeuroPy/system_functions.py | 82 ++++++- 2 files changed, 255 insertions(+), 32 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 57113ca..a5b73eb 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -1,43 +1,194 @@ import numpy as np import matplotlib.pyplot as plt import scipy.stats as stats +from CompNeuroPy import DeapCma, save_variables, load_variables -n_arr = np.arange(10, 1000, 10) + +def generate_samples(n, p, m, mean_shift=0, std_scale=1): + # Generate data samples + binomial_sample = np.random.binomial(n, p, m) + mean = n * p + std_dev = np.sqrt(n * p * (1 - p)) + normal_sample = np.random.normal(mean + mean_shift, std_dev * std_scale, m) + + ### round and clip the normal sample + normal_sample = np.round(normal_sample) + normal_sample[normal_sample < 0] = 0 + normal_sample[normal_sample > n] = n + + return binomial_sample, normal_sample + + +def get_error_of_samples(binomial_sample, normal_sample, m): + diff = ( + np.histogram(binomial_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] + - np.histogram(normal_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] + ) + return np.sum(np.abs(diff)) / (2 * m) + + +def objective_function(mean_shift, std_scale): + # Generate data samples + binomial_sample, normal_sample = generate_samples( + n=N, p=P, m=M, mean_shift=mean_shift, std_scale=std_scale + ) + + # Calculate error + error = get_error_of_samples(binomial_sample, normal_sample, m=M) + return error + + +def evaluate_function(population): + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = objective_function( + mean_shift=individual[0], std_scale=individual[1] + ) + loss_list.append((loss_of_individual,)) + return loss_list + + +n_arr = np.linspace(10, 1000, 20, dtype=int) p_arr = np.array([0.001, 0.01, 0.1, 0.5, 0.9, 0.99, 0.999]) ### TODO I have the problem that for very small p the normal distribution is not a good ### approximation of the binomial distribution. ### I think one can shift the mean and scale the standard deviation depending on the p ### and n values. I will try to optimize the shift and scale for each n and p value. + +### bounds for optimized parameters shift_mean_bounds = [-1, 1] scale_std_bounds = [0.5, 2] - -for n in n_arr: +lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) +ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) +# number of samples +M = 10000 +OPTIMIZE = False +if OPTIMIZE: + p_list = [] + n_list = [] + mean_shift_list = [] + std_scale_list = [] + error_list = [] + error_improved_list = [] for p in p_arr: - # set n and p - # then optimize shift of mean - # Parameters - # number of samples - N = 10000 - - # Generate data samples - binomial_sample = np.random.binomial(n, p, N) - mean = n * p - std_dev = np.sqrt(n * p * (1 - p)) - normal_sample = np.random.normal(mean, std_dev, N) - - ### round and clip the normal sample - normal_sample = np.round(normal_sample) - normal_sample[normal_sample < 0] = 0 - normal_sample[normal_sample > n] = n - - print(np.histogram(binomial_sample, bins=n + 1, range=(-0.5, n + 0.5))[0]) - print(np.histogram(normal_sample, bins=n + 1, range=(-0.5, n + 0.5))[0]) - diff = ( - np.histogram(binomial_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] - - np.histogram(normal_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] - ) - error = np.sum(np.abs(diff)) + for n in n_arr: + ### set the global variables probability of success and number of trials + P = p + N = n + + ### get the error without optimization + error = objective_function(mean_shift=0, std_scale=1) + error_list.append(error) + + ### create an instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + param_names=["mean_shift", "std_scale"], + hard_bounds=True, + ) + + ### run the optimization + deap_cma_result = deap_cma.run(max_evals=1000) + + ### get the optimized parameters and best error + mean_shift = deap_cma_result["mean_shift"] + std_scale = deap_cma_result["std_scale"] + error = deap_cma_result["best_fitness"] + + ### store the results + p_list.append(p) + n_list.append(n) + error_improved_list.append(error) + mean_shift_list.append(mean_shift) + std_scale_list.append(std_scale) + + ### save variables + save_variables( + variable_list=[ + p_list, + n_list, + mean_shift_list, + std_scale_list, + error_list, + error_improved_list, + ], + name_list=[ + "p_list", + "n_list", + "mean_shift_list", + "std_scale_list", + "error_list", + "error_improved_list", + ], + path="data_optimize_binomial_normal", + ) +else: + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "mean_shift_list", + "std_scale_list", + "error_list", + "error_improved_list", + ], + path="data_optimize_binomial_normal", + ) + p_list = loaded_variables["p_list"] + n_list = loaded_variables["n_list"] + mean_shift_list = loaded_variables["mean_shift_list"] + std_scale_list = loaded_variables["std_scale_list"] + error_list = loaded_variables["error_list"] + error_improved_list = loaded_variables["error_improved_list"] + + +# Plot the error as a function of p and n as a heatmap +plt.figure(figsize=(12, 10)) +### scatter plot with max from error_list and error_improved_list +plt.subplot(2, 2, 1) +plt.scatter( + n_list, + p_list, + c=error_list, + cmap="viridis", + vmin=0, + vmax=np.max([error_list, error_improved_list]), +) +plt.colorbar() +plt.xlabel("n") +plt.ylabel("p") +plt.title("Error original") +plt.subplot(2, 2, 2) +plt.scatter( + n_list, + p_list, + c=error_improved_list, + cmap="viridis", + vmin=0, + vmax=np.max([error_list, error_improved_list]), +) +plt.colorbar() +plt.xlabel("n") +plt.ylabel("p") +plt.title("Error optimized") +plt.subplot(2, 2, 3) +plt.scatter(n_list, p_list, c=mean_shift_list, cmap="viridis") +plt.colorbar() +plt.xlabel("n") +plt.ylabel("p") +plt.title("Mean shift") +plt.subplot(2, 2, 4) +plt.scatter(n_list, p_list, c=std_scale_list, cmap="viridis") +plt.colorbar() +plt.xlabel("n") +plt.ylabel("p") +plt.title("Standard deviation scale") +plt.tight_layout() +plt.savefig("error_heatmap.png") quit() diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index e72cc10..35d6055 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -1,13 +1,16 @@ import os import traceback import shutil -from time import time +from time import time, sleep import pickle from functools import wraps -from joblib import Parallel, delayed import inspect import subprocess import textwrap +import concurrent.futures +import signal +from typing import List +import threading def clear_dir(path): @@ -245,10 +248,79 @@ def run_script_parallel( n_jobs = min(n_jobs, len(args_list)) ### run the script in parallel - Parallel(n_jobs=n_jobs)( - delayed(os.system)(f"python {script_path} {' '.join(args)}") - for args in args_list + runner = _ScriptRunner( + script_path=script_path, num_workers=n_jobs, args_list=args_list ) + runner.run() + + +class _ScriptRunner: + def __init__(self, script_path: str, num_workers: int, args_list: List[List[str]]): + self.script_path = script_path + self.args_list = args_list + self.num_workers = num_workers + self.processes = [] + self.executor = None + self.error_flag = threading.Event() + + def run_script(self, args: List[str]): + """ + Run the script with the given arguments. + + Args: + args (List[str]): + List of arguments to pass to the script. + """ + process = subprocess.Popen( + ["python", self.script_path] + args, + ) + self.processes.append(process) + + process.wait() + # Check if the process returned an error + if process.returncode != 0: + self.error_flag.set() + return -1 + + def signal_handler(self, sig, frame): + """ + Signal handler to terminate all running processes and shutdown the executor. + """ + # need a small sleep here, otherwise a single new process is started, don't know why + sleep(0.01) + # Terminate all running processes + for process in self.processes: + if process.poll() is None: + process.terminate() + # Shutdown the executor + if self.executor: + self.executor.shutdown(wait=False, cancel_futures=True) + # Exit the program + exit(1) + + def run(self): + """ + Run the script with the given arguments in parallel. + """ + # Register the signal handler for SIGINT (Ctrl+C) + signal.signal(signal.SIGINT, self.signal_handler) + # Create a thread pool executor with the specified number of workers + self.executor = concurrent.futures.ThreadPoolExecutor( + max_workers=self.num_workers + ) + # Submit the tasks to the executor + try: + futures = [ + self.executor.submit(self.run_script, args) for args in self.args_list + ] + # Wait for all futures to complete + while any(f.running() for f in futures): + # Check if an error occurred in any of the threads + if self.error_flag.is_set(): + self.signal_handler(None, None) + break + finally: + self.executor.shutdown(wait=True) def _is_git_repo(): From 0e32ca42fcd4eaa219531dda98f66653db82dd5d Mon Sep 17 00:00:00 2001 From: olimaol Date: Tue, 25 Jun 2024 15:38:12 +0200 Subject: [PATCH 05/21] model_configurator: tried to regress mean shift and std scaling in test2 --- .../examples/model_configurator/test2.py | 808 ++++++++++++++++-- src/CompNeuroPy/extra_functions.py | 19 +- 2 files changed, 767 insertions(+), 60 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index a5b73eb..d2d27f6 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -2,6 +2,271 @@ import matplotlib.pyplot as plt import scipy.stats as stats from CompNeuroPy import DeapCma, save_variables, load_variables +from scipy.optimize import minimize, Bounds +from scipy.interpolate import griddata +from sklearn.preprocessing import PolynomialFeatures +from sklearn.linear_model import LinearRegression +from scipy.optimize import curve_fit + + +def mean_shift_regression(n, p): + x0 = n + x1 = p + return ( + -0.122 + + -0.000 * 1 + + -0.001 * x0 + + 0.257 * x1 + + 0.000 * x0**2 + + 0.003 * x0 * x1 + + 0.847 * x1**2 + + 0.000 * x0**3 + + -0.000 * x0**2 * x1 + + -0.001 * x0 * x1**2 + + -3.413 * x1**3 + + -0.000 * x0**4 + + 0.000 * x0**3 * x1 + + -0.000 * x0**2 * x1**2 + + 0.001 * x0 * x1**3 + + 2.644 * x1**4 + ) + + +def std_scale_regression(n, p): + x0 = n + x1 = p + return ( + 1.218 + + -0.000 * 1 + + 0.001 * x0 + + -4.365 * x1 + + -0.000 * x0**2 + + -0.001 * x0 * x1 + + 16.631 * x1**2 + + 0.000 * x0**3 + + 0.000 * x0**2 * x1 + + 0.001 * x0 * x1**2 + + -25.130 * x1**3 + + -0.000 * x0**4 + + -0.000 * x0**3 * x1 + + -0.000 * x0**2 * x1**2 + + -0.001 * x0 * x1**3 + + 12.968 * x1**4 + ) + + +def gauss_1d(x, amp, mean, sig): + return amp * np.exp(-((x - mean) ** 2) / (2 * sig**2)) + + +def plot_2d_curve_fit_regression( + x, y, z, sample_weight=None, vmin=None, vmax=None, grid_size=100 +): + """ + Plots a 2D color-coded image of the data with curve_fit regression and prints the regression equation. + + Parameters: + - x: list or array of x coordinates + - y: list or array of y coordinates + - z: list or array of z values corresponding to the (x, y) coordinates + - grid_size: size of the grid for plotting (default: 100) + """ + # Check if sample_weight is provided and does not contain zeros + if sample_weight is not None and 0 in sample_weight: + raise ValueError("Sample weight cannot contain zeros.") + + # Normalize x and y and keep the transformation for later + x_mean = np.mean(x) + x_std = np.std(x) + y_mean = np.mean(y) + y_std = np.std(y) + x = (x - x_mean) / x_std + y = (y - y_mean) / y_std + + # Fit the curve_fit regression model + def curve_fit_func(X, p0, p1, p2, p3, p4, p5, p6, p7): + x, y = X + return ( + p0 * np.exp(p1 + p2 * x + p3 * y + p4 * x * y + p5 * x**2 + p6 * y**2) + p7 + ) + + def curve_fit_evaluate_function(population): + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = curve_fit_objective_function(individual) + loss_list.append((loss_of_individual,)) + return loss_list + + def curve_fit_objective_function(individual): + is_data = curve_fit_func((x, y), *individual) + target_data = z + return np.sum((is_data - target_data) ** 2) + + # popt, pcov = curve_fit( + # curve_fit_func, + # (x, y), + # z, + # p0=[-0.3, 0, -1, -1, -1], + # sigma=1 / sample_weight if sample_weight is not None else None, + # absolute_sigma=False, + # ) + param_names = ["p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7"] + deap_cma = DeapCma( + lower=np.array([-10] * len(param_names)), + upper=np.array([10] * len(param_names)), + evaluate_function=curve_fit_evaluate_function, + param_names=param_names, + hard_bounds=False, + display_progress_bar=False, + ) + + ### run the optimization + best_fitness = 1e9 + for _ in range(100): + deap_cma_result = deap_cma.run(max_evals=1000) + if deap_cma_result["best_fitness"] < best_fitness: + best_fitness = deap_cma_result["best_fitness"] + popt = [deap_cma_result[param_name] for param_name in param_names] + + # Create grid for plotting + xi = np.linspace(min(x), max(x), grid_size) + yi = np.linspace(min(y), max(y), grid_size) + xi, yi = np.meshgrid(xi, yi) + zi = curve_fit_func((xi, yi), *popt) + + # Unnormalize the grid + xi = xi * x_std + x_mean + yi = yi * y_std + y_mean + + # Unnormalize the original data + x = x * x_std + x_mean + y = y * y_std + y_mean + + # Plot the regression surface + if vmin is None: + vmin = np.min(z) + if vmax is None: + vmax = np.max(z) + plt.contourf(xi, yi, zi, levels=100, cmap="viridis") + plt.scatter( + x, + y, + c=z, + cmap="viridis", + vmin=vmin, + vmax=vmax, + edgecolor="k", + marker="o", + s=( + 40 * np.array(sample_weight) / np.max(sample_weight) + if sample_weight + else None + ), + ) + + +def plot_2d_regression_image( + x, y, z, sample_weight=None, vmin=None, vmax=None, degree=2, grid_size=100 +): + """ + Plots a 2D color-coded image of the data with polynomial regression and plots the regression equation. + + Parameters: + - x: list or array of x coordinates + - y: list or array of y coordinates + - z: list or array of z values corresponding to the (x, y) coordinates + - degree: degree of the polynomial regression (default: 2) + - grid_size: size of the grid for plotting (default: 100) + """ + # Prepare the data for polynomial regression + X = np.array([x, y]).T + poly = PolynomialFeatures(degree) + X_poly = poly.fit_transform(X) + + # Perform the polynomial regression + model = LinearRegression() + model.fit( + X_poly, + z, + sample_weight=sample_weight if sample_weight is not None else None, + ) + + # Create a grid for plotting the regression surface + xi = np.linspace(min(x), max(x), grid_size) + yi = np.linspace(min(y), max(y), grid_size) + xi, yi = np.meshgrid(xi, yi) + X_grid = np.c_[xi.ravel(), yi.ravel()] + X_grid_poly = poly.transform(X_grid) + zi = model.predict(X_grid_poly).reshape(xi.shape) + + # Plot the regression surface + if vmin is None: + vmin = np.min(z) + if vmax is None: + vmax = np.max(z) + plt.contourf(xi, yi, zi, levels=100, cmap="viridis", vmin=vmin, vmax=vmax) + + # Plot the original data points, scaled by the sample weight + plt.scatter( + x, + y, + c=z, + cmap="viridis", + vmin=vmin, + vmax=vmax, + edgecolor="k", + marker="o", + s=( + 40 * np.array(sample_weight) / np.max(sample_weight) + if sample_weight + else None + ), + ) + + # Print the regression equation + coefs = model.coef_ + intercept = model.intercept_ + terms = poly.get_feature_names_out() + equation = " + ".join([f"{coefs[i]:.3f}*{terms[i]}" for i in range(len(coefs))]) + equation = f"{intercept:.3f} + " + equation + print(f"Regression equation:\n{equation}") + + +def plot_2d_interpolated_image( + x, y, z, vmin=None, vmax=None, grid_size=100, method="linear" +): + """ + Plots a 2D color-coded image of the data with interpolation and extrapolation. + + Parameters: + - x: list or array of x coordinates + - y: list or array of y coordinates + - z: list or array of z values corresponding to the (x, y) coordinates + - grid_size: size of the interpolation grid (default: 100) + - method: interpolation method, options are 'linear', 'nearest', 'cubic' (default: 'linear') + """ + # Define the grid for interpolation + xi = np.linspace(min(x), max(x), grid_size) + yi = np.linspace(min(y), max(y), grid_size) + xi, yi = np.meshgrid(xi, yi) + + # Perform the interpolation + zi = griddata((x, y), z, (xi, yi), method=method) + + print(f"max interpolated: {np.max(zi)}") + + # Plot the interpolated data + if vmin is None: + vmin = np.min(z) + if vmax is None: + vmax = np.max(z) + plt.contourf(xi, yi, zi, levels=100, cmap="viridis", vmin=vmin, vmax=vmax) + + # plot scatter plot of original data + plt.scatter( + x, y, c=z, cmap="viridis", vmin=vmin, vmax=vmax, edgecolor="k", marker="o" + ) def generate_samples(n, p, m, mean_shift=0, std_scale=1): @@ -38,6 +303,11 @@ def objective_function(mean_shift, std_scale): return error +def objective_function_for_minimize(x): + # print(f"P: {P}, N: {N}, mean_shift: {x[0]}, std_scale: {x[1]}") + return objective_function(mean_shift=x[0], std_scale=x[1]) + + def evaluate_function(population): loss_list = [] ### the population is a list of individuals which are lists of parameters @@ -49,14 +319,46 @@ def evaluate_function(population): return loss_list -n_arr = np.linspace(10, 1000, 20, dtype=int) -p_arr = np.array([0.001, 0.01, 0.1, 0.5, 0.9, 0.99, 0.999]) +def logarithmic_distribution(start, end, num_points): + """ + Generate a list of logarithmically spaced points between a start and end point. + + Parameters: + start (float): The starting point of the distribution. + end (float): The ending point of the distribution. + num_points (int): The number of points to generate. + + Returns: + list: A list of logarithmically spaced points. + """ + if start <= 0 or end <= 0: + raise ValueError("Start and end points must be positive numbers.") + if num_points < 2: + raise ValueError("Number of points must be at least 2.") + + # Create an array of logarithmically spaced points + log_start = np.log10(start) + log_end = np.log10(end) + log_points = np.linspace(log_start, log_end, num_points) + points = np.power(10, log_points) + + return points + ### TODO I have the problem that for very small p the normal distribution is not a good ### approximation of the binomial distribution. ### I think one can shift the mean and scale the standard deviation depending on the p ### and n values. I will try to optimize the shift and scale for each n and p value. +OPTIMIZE = False +USE_REGRESSION = False +PLOT_OPTIMIZED = True +PLOT_REGRESSION = False + +### 1st optimize mean shift and std scale for each n and p value +n_arr = logarithmic_distribution(10, 1000, 20).astype(int) +p_arr = logarithmic_distribution(0.001, 0.1, 10) + ### bounds for optimized parameters shift_mean_bounds = [-1, 1] scale_std_bounds = [0.5, 2] @@ -64,125 +366,519 @@ def evaluate_function(population): ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) # number of samples M = 10000 -OPTIMIZE = False + +### 1st get errors for all n and p values without optimization +p_list = [] +n_list = [] +error_list = [] +for p in p_arr: + for n in n_arr: + ### set the global variables probability of success and number of trials + P = p + N = n + + ### get the error without optimization + error = objective_function(mean_shift=0, std_scale=1) + error_list.append(error) + + ### store the results + p_list.append(p) + n_list.append(n) + +### save variables +save_variables( + variable_list=[ + p_list, + n_list, + error_list, + ], + name_list=[ + "p_list", + "n_list", + "error_list", + ], + path="data_optimize_binomial_normal", +) + +### 2nd optimize mean shift and std scale for each n and p value and get improved error if OPTIMIZE: - p_list = [] - n_list = [] + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "error_list", + ], + path="data_optimize_binomial_normal", + ) + p_list = loaded_variables["p_list"] + n_list = loaded_variables["n_list"] + error_list = loaded_variables["error_list"] mean_shift_list = [] std_scale_list = [] - error_list = [] error_improved_list = [] - for p in p_arr: - for n in n_arr: - ### set the global variables probability of success and number of trials - P = p - N = n - - ### get the error without optimization - error = objective_function(mean_shift=0, std_scale=1) - error_list.append(error) - - ### create an instance of the DeapCma class - deap_cma = DeapCma( - lower=lb, - upper=ub, - evaluate_function=evaluate_function, - param_names=["mean_shift", "std_scale"], - hard_bounds=True, - ) + for p, n in zip(p_list, n_list): + ### set the global variables probability of success and number of trials + P = p + N = n + + # ### optimize the mean shift and standard deviation scale using scipy minimize + # result = minimize( + # objective_function_for_minimize, + # x0=[0, 1], + # bounds=Bounds(lb, ub), + # method="Nelder-Mead", + # ) + # mean_shift = result.x[0] + # std_scale = result.x[1] + # error_improved = result.fun + + ### create an instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + param_names=["mean_shift", "std_scale"], + hard_bounds=True, + ) - ### run the optimization - deap_cma_result = deap_cma.run(max_evals=1000) + ### run the optimization + deap_cma_result = deap_cma.run(max_evals=1000) - ### get the optimized parameters and best error - mean_shift = deap_cma_result["mean_shift"] - std_scale = deap_cma_result["std_scale"] - error = deap_cma_result["best_fitness"] + ### get the optimized parameters and best error + mean_shift = deap_cma_result["mean_shift"] + std_scale = deap_cma_result["std_scale"] + error_improved = deap_cma_result["best_fitness"] - ### store the results - p_list.append(p) - n_list.append(n) - error_improved_list.append(error) - mean_shift_list.append(mean_shift) - std_scale_list.append(std_scale) + ### store the results + error_improved_list.append(error_improved) + mean_shift_list.append(mean_shift) + std_scale_list.append(std_scale) ### save variables save_variables( variable_list=[ - p_list, - n_list, mean_shift_list, std_scale_list, - error_list, error_improved_list, ], name_list=[ - "p_list", - "n_list", "mean_shift_list", "std_scale_list", - "error_list", "error_improved_list", ], path="data_optimize_binomial_normal", ) -else: + + +### 3rd use regression for mean shift and std scale and recalculate the improved error +if USE_REGRESSION: + ### load the optimized parameters and corresponding original and optimized errors loaded_variables = load_variables( name_list=[ "p_list", "n_list", - "mean_shift_list", - "std_scale_list", - "error_list", - "error_improved_list", ], path="data_optimize_binomial_normal", ) p_list = loaded_variables["p_list"] n_list = loaded_variables["n_list"] + + ### use regression equations to recalculate mean shift and std scale + mean_shift_reg_list = [] + std_scale_reg_list = [] + error_improved_reg_list = [] + for p, n in zip(p_list, n_list): + ### set the global variables probability of success and number of trials + P = p + N = n + + ### get the optimized parameters and best error + mean_shift = mean_shift_regression(n, p) + std_scale = std_scale_regression(n, p) + error_improved = objective_function(mean_shift=mean_shift, std_scale=std_scale) + + ### store the results + error_improved_reg_list.append(error_improved) + mean_shift_reg_list.append(mean_shift) + std_scale_reg_list.append(std_scale) + + ### save variables + save_variables( + variable_list=[ + mean_shift_reg_list, + std_scale_reg_list, + error_improved_reg_list, + ], + name_list=[ + "mean_shift_reg_list", + "std_scale_reg_list", + "error_improved_reg_list", + ], + path="data_optimize_binomial_normal", + ) + +### 4th plot the original error +# original error -> interpolation plot +loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "error_list", + ], + path="data_optimize_binomial_normal", +) +p_list = loaded_variables["p_list"] +n_list = loaded_variables["n_list"] +error_list = loaded_variables["error_list"] +plt.figure(figsize=(6.4 * 2, 4.8 * 2)) +plt.subplot(1, 1, 1) +plot_2d_interpolated_image( + x=n_list, y=p_list, z=error_list, vmin=0, vmax=np.max(error_list) +) +plt.colorbar() +plt.xlabel("n") +plt.ylabel("p") +plt.title(f"Error original\n(max: {np.max(error_list)})") +plt.tight_layout() +plt.savefig("test2_01_error_original.png", dpi=300) + +### 5th plot the optimized error with optimized mean shift and std scale +if PLOT_OPTIMIZED: + # fitting improved error -> interpolation plot + # fitting improvement -> interpolation plot + # fitting mean shift -> regression plot + # fitting std scale -> regression plot + loaded_variables = load_variables( + name_list=[ + "error_improved_list", + "mean_shift_list", + "std_scale_list", + ], + path="data_optimize_binomial_normal", + ) + error_improved_list = loaded_variables["error_improved_list"] mean_shift_list = loaded_variables["mean_shift_list"] std_scale_list = loaded_variables["std_scale_list"] - error_list = loaded_variables["error_list"] - error_improved_list = loaded_variables["error_improved_list"] + error_change_arr = np.array(error_improved_list) - np.array(error_list) + improvement_arr = -np.clip(error_change_arr, None, 0) + improvement_arr_norm = improvement_arr / np.max(improvement_arr) + + ### scale the mean shift and std scale by the error improvement + mean_shift_list = np.array(mean_shift_list) * improvement_arr_norm + std_scale_list = ( + np.array(std_scale_list) * improvement_arr_norm + (1 - improvement_arr_norm) * 1 + ) + plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) + plt.subplot(4, 1, 1) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_improved_list, + vmin=0, + vmax=np.max(error_improved_list), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title(f"Error optimized\n(max: {np.max(error_improved_list)})") + plt.subplot(4, 1, 2) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_change_arr, + vmin=-np.max(np.abs(error_change_arr)), + vmax=np.max(np.abs(error_change_arr)), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error improvement") + plt.subplot(4, 1, 3) + plot_2d_curve_fit_regression( + x=n_list, + y=p_list, + z=mean_shift_list, + vmin=-np.max(np.abs(mean_shift_list)), + vmax=np.max(np.abs(mean_shift_list)), + # sample_weight=-np.clip(error_change_arr, None, 0) + # + 0.01 * np.max(improvement_arr), + # degree=5, + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift") + plt.subplot(4, 1, 4) + plot_2d_curve_fit_regression( + x=n_list, + y=p_list, + z=std_scale_list, + vmin=1 - np.max(1 - np.array(std_scale_list)), + vmax=1 + np.max(np.array(std_scale_list) - 1), + # sample_weight=-np.clip(error_change_arr, None, 0) + # + 0.01 * np.max(improvement_arr), + # degree=5, + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Standard deviation scale") + plt.tight_layout() + plt.savefig("test2_02_error_optimized.png", dpi=300) + +### 6th plot the regression error with regressed mean shift and std scale and compare it +### with the optimized error +if PLOT_REGRESSION: + # regression improved error -> interpolation plot + # regression improvement -> interpolation plot + # regression mean shift -> regression plot + # regression std scale -> regression plot + loaded_variables = load_variables( + name_list=[ + "error_improved_list", + "mean_shift_list", + "std_scale_list", + "error_improved_reg_list", + "mean_shift_reg_list", + "std_scale_reg_list", + ], + path="data_optimize_binomial_normal", + ) + error_improved_list = loaded_variables["error_improved_list"] + mean_shift_list = loaded_variables["mean_shift_list"] + std_scale_list = loaded_variables["std_scale_list"] + error_improved_reg_list = loaded_variables["error_improved_reg_list"] + mean_shift_reg_list = loaded_variables["mean_shift_reg_list"] + std_scale_reg_list = loaded_variables["std_scale_reg_list"] + + plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) + plt.subplot(4, 1, 1) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_improved_reg_list, + vmin=0, + vmax=np.max(error_improved_reg_list), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title(f"Error optimized\n(max: {np.max(error_improved_reg_list)})") + plt.subplot(4, 1, 2) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(error_improved_reg_list) - np.array(error_list), + vmin=-np.max(np.abs(np.array(error_improved_reg_list) - np.array(error_list))), + vmax=np.max(np.abs(np.array(error_improved_reg_list) - np.array(error_list))), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error improvement") + plt.subplot(4, 1, 3) + plot_2d_regression_image( + x=n_list, + y=p_list, + z=mean_shift_reg_list, + sample_weight=error_list, + vmin=-np.max(np.abs(mean_shift_reg_list)), + vmax=np.max(np.abs(mean_shift_reg_list)), + degree=4, + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift") + plt.subplot(4, 1, 4) + plot_2d_regression_image( + x=n_list, + y=p_list, + z=std_scale_reg_list, + sample_weight=error_list, + vmin=1 - np.max(1 - np.array(std_scale_reg_list)), + vmax=1 + np.max(np.array(std_scale_reg_list) - 1), + degree=4, + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Standard deviation scale") + plt.tight_layout() + plt.savefig("test2_03_error_regression.png", dpi=300) + + # difference fitting/regression improved error -> interpolation plot + # difference fitting/regression improvement -> interpolation plot + # difference fitting/regression mean shift -> interpolation plot + # difference fitting/regression std scale -> interpolation plot + plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) + plt.subplot(4, 1, 1) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(error_improved_list) - np.array(error_improved_reg_list), + vmin=-np.max( + np.abs(np.array(error_improved_list) - np.array(error_improved_reg_list)) + ), + vmax=np.max( + np.abs(np.array(error_improved_list) - np.array(error_improved_reg_list)) + ), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error difference between optimized and regression") + plt.subplot(4, 1, 2) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(error_improved_list) + - np.array(error_list) + - np.array(error_improved_reg_list) + + np.array(error_list), + vmin=-np.max( + np.abs( + np.array(error_improved_list) + - np.array(error_list) + - np.array(error_improved_reg_list) + + np.array(error_list) + ) + ), + vmax=np.max( + np.abs( + np.array(error_improved_list) + - np.array(error_list) + - np.array(error_improved_reg_list) + + np.array(error_list) + ) + ), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error improvement difference between optimized and regression") + plt.subplot(4, 1, 3) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(mean_shift_list) - np.array(mean_shift_reg_list), + vmin=-np.max(np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list))), + vmax=np.max(np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list))), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift difference between optimized and regression") + plt.subplot(4, 1, 4) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(std_scale_list) - np.array(std_scale_reg_list), + vmin=-np.max(np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list))), + vmax=np.max(np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list))), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Standard deviation scale difference between optimized and regression") + plt.tight_layout() + plt.savefig("test2_04_error_difference.png", dpi=300) +quit() # Plot the error as a function of p and n as a heatmap -plt.figure(figsize=(12, 10)) +plt.figure(figsize=(20, 15)) ### scatter plot with max from error_list and error_improved_list plt.subplot(2, 2, 1) + +plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_list, + vmin=0, + vmax=np.max(error_list + error_improved_list), +) +print(np.max(error_list + error_improved_list)) plt.scatter( n_list, p_list, c=error_list, cmap="viridis", vmin=0, - vmax=np.max([error_list, error_improved_list]), + vmax=np.max(error_list + error_improved_list), + edgecolor="k", + marker="o", ) plt.colorbar() plt.xlabel("n") plt.ylabel("p") -plt.title("Error original") +plt.title(f"Error original\n(max: {np.max(error_list)})") plt.subplot(2, 2, 2) +plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_improved_list, + vmin=0, + vmax=np.max(error_list + error_improved_list), +) plt.scatter( n_list, p_list, c=error_improved_list, cmap="viridis", vmin=0, - vmax=np.max([error_list, error_improved_list]), + vmax=np.max(error_list + error_improved_list), + edgecolor="k", + marker="o", ) plt.colorbar() plt.xlabel("n") plt.ylabel("p") -plt.title("Error optimized") +plt.title(f"Error optimized\n(max: {np.max(error_improved_list)})") plt.subplot(2, 2, 3) -plt.scatter(n_list, p_list, c=mean_shift_list, cmap="viridis") +plot_2d_regression_image( + x=n_list, + y=p_list, + z=mean_shift_list, + sample_weight=error_list, + vmin=-np.max(np.abs(mean_shift_list)), + vmax=np.max(np.abs(mean_shift_list)), + degree=4, +) +plt.scatter( + n_list, + p_list, + c=mean_shift_list, + cmap="viridis", + vmin=-np.max(np.abs(mean_shift_list)), + vmax=np.max(np.abs(mean_shift_list)), + edgecolor="k", + marker="o", +) plt.colorbar() plt.xlabel("n") plt.ylabel("p") plt.title("Mean shift") plt.subplot(2, 2, 4) -plt.scatter(n_list, p_list, c=std_scale_list, cmap="viridis") +plot_2d_regression_image( + x=n_list, + y=p_list, + z=std_scale_list, + sample_weight=error_list, + vmin=1 - np.max(1 - np.array(std_scale_list)), + vmax=1 + np.max(np.array(std_scale_list) - 1), + degree=4, +) +plt.scatter( + n_list, + p_list, + c=std_scale_list, + cmap="viridis", + vmin=1 - np.max(1 - np.array(std_scale_list)), + vmax=1 + np.max(np.array(std_scale_list) - 1), + edgecolor="k", + marker="o", +) plt.colorbar() plt.xlabel("n") plt.ylabel("p") diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 269b8b4..ae455d2 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -727,10 +727,11 @@ def __init__( learn_rate_factor: float = 1, damping_factor: float = 1, verbose: bool = False, - plot_file: None | str = "logbook.png", + plot_file: None | str = None, cma_params_dict: dict = {}, source_solutions: list[tuple[np.ndarray, float]] = [], hard_bounds: bool = False, + display_progress_bar: bool = True, ): """ @@ -763,7 +764,7 @@ def __init__( Whether or not to print details. By default False. plot_file (None | str, optional): File to save the deap plot to. If not given here, it has to be given in - the run function. By default "logbook.png". + the run function. By default None. cma_params_dict (dict, optional): Parameters for the deap cma strategy (deap.cma.Strategy). See [here](https://deap.readthedocs.io/en/master/api/algo.html#deap.cma.Strategy) for more details @@ -776,6 +777,8 @@ def __init__( hard_bounds (bool, optional): Whether or not to use hard bounds (parmeters are clipped to lower and upper bounds). By default False. + display_progress_bar (bool, optional): + Whether or not to display a progress bar. By default True. """ ### store attributes self.max_evals = max_evals @@ -792,6 +795,7 @@ def __init__( self.cma_params_dict = cma_params_dict self.source_solutions = source_solutions self.hard_bounds = hard_bounds + self.display_progress_bar = display_progress_bar ### prepare the optimization self.deap_dict = self._prepare() @@ -994,6 +998,10 @@ def run( best["deap_pop"] = pop best["best_fitness"] = best_fitness + ### skip plotting if plot_file is None + if plot_file is None: + return best + ### plot logbook with logaritmic y-axis fig, ax = plt.subplots(figsize=(10, 5)) ax.set_yscale("log") @@ -1006,6 +1014,7 @@ def run( fig.tight_layout() sf.create_dir("/".join(plot_file.split("/")[:-1])) fig.savefig(plot_file, dpi=300) + plt.close(fig) return best @@ -1059,8 +1068,10 @@ def _deap_ea_generate_update( ### define progress bar if verbose: progress_bar = range(ngen) - else: + elif self.display_progress_bar: progress_bar = tqdm(range(ngen), total=ngen, unit="gen") + else: + progress_bar = range(ngen) early_stop = False ### loop over generations @@ -1115,7 +1126,7 @@ def _deap_ea_generate_update( print("") ### update progress bar with current best loss - if not verbose: + if not verbose and self.display_progress_bar: progress_bar.set_postfix_str( f"best loss: {halloffame[0].fitness.values[0]:.5f}" ) From 5291e209398c7139163d4b906067bfaa95615b4d Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 26 Jun 2024 12:34:58 +0200 Subject: [PATCH 06/21] model_configurator: continue regress mean shift std scale --- .../examples/model_configurator/test2.py | 258 +++++++++++++----- 1 file changed, 194 insertions(+), 64 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index d2d27f6..e889940 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -8,6 +8,10 @@ from sklearn.linear_model import LinearRegression from scipy.optimize import curve_fit +from sklearn.pipeline import make_pipeline +from sklearn.metrics import mean_squared_error +from tqdm import tqdm + def mean_shift_regression(n, p): x0 = n @@ -75,19 +79,59 @@ def plot_2d_curve_fit_regression( if sample_weight is not None and 0 in sample_weight: raise ValueError("Sample weight cannot contain zeros.") - # Normalize x and y and keep the transformation for later - x_mean = np.mean(x) - x_std = np.std(x) - y_mean = np.mean(y) - y_std = np.std(y) - x = (x - x_mean) / x_std - y = (y - y_mean) / y_std + # Normalize x, y, and z and keep the transformation for later + x_max = np.max(x) + x_min = np.min(x) + y_max = np.max(y) + y_min = np.min(y) + z_max = np.max(z) + z_min = np.min(z) + x = (x - x_min) / (x_max - x_min) + y = (y - y_min) / (y_max - y_min) + z = (z - z_min) / (z_max - z_min) # Fit the curve_fit regression model - def curve_fit_func(X, p0, p1, p2, p3, p4, p5, p6, p7): - x, y = X - return ( - p0 * np.exp(p1 + p2 * x + p3 * y + p4 * x * y + p5 * x**2 + p6 * y**2) + p7 + def curve_fit_func( + X, + p0, + p1, + p2, + p3, + p4, + p5, + # p6, + # p7, + # p8, + # p9, + # p10, + # p11, + # p12, + # p13, + # p14, + ): + x0, x1 = X + ### 2D polynomial with certain degree + return np.clip( + ( + p0 + + p1 * x0 + + p2 * x1 + + p3 * x0**2 + + p4 * x0 * x1 + + p5 * x1**2 + # + p6 * x0**3 + # + p7 * x0**2 * x1 + # + p8 * x0 * x1**2 + # + p9 * x1**3 + # + p10 * x0**4 + # + p11 * x0**3 * x1 + # + p12 * x0**2 * x1**2 + # + p13 * x0 * x1**3 + # + p14 * x1**4 + ) + ** 3, + np.min(z), + np.max(z), ) def curve_fit_evaluate_function(population): @@ -103,31 +147,79 @@ def curve_fit_objective_function(individual): target_data = z return np.sum((is_data - target_data) ** 2) + # ### do opt with scipy curve_fit # popt, pcov = curve_fit( # curve_fit_func, # (x, y), # z, - # p0=[-0.3, 0, -1, -1, -1], # sigma=1 / sample_weight if sample_weight is not None else None, # absolute_sigma=False, # ) - param_names = ["p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7"] - deap_cma = DeapCma( - lower=np.array([-10] * len(param_names)), - upper=np.array([10] * len(param_names)), - evaluate_function=curve_fit_evaluate_function, - param_names=param_names, - hard_bounds=False, - display_progress_bar=False, - ) - ### run the optimization + ### do opt with deap cma + param_names = [ + "p0", + "p1", + "p2", + "p3", + "p4", + "p5", + # "p6", + # "p7", + # "p8", + # "p9", + # "p10", + # "p11", + # "p12", + # "p13", + # "p14", + ] + ### run the optimization, get popt best_fitness = 1e9 - for _ in range(100): - deap_cma_result = deap_cma.run(max_evals=1000) + ### create progress bar showing the best fitness + progress_bar = tqdm(range(100), total=100) + for _ in progress_bar: + deap_cma = DeapCma( + lower=np.array([-1] * len(param_names)), + upper=np.array([1] * len(param_names)), + evaluate_function=curve_fit_evaluate_function, + param_names=param_names, + hard_bounds=False, + display_progress_bar=False, + ) + deap_cma_result = deap_cma.run(max_evals=2000) if deap_cma_result["best_fitness"] < best_fitness: best_fitness = deap_cma_result["best_fitness"] popt = [deap_cma_result[param_name] for param_name in param_names] + progress_bar.set_description(f"Best Fitness: {best_fitness}") + + # ### solve polynomial using linalg + # x0 = x + # x1 = y + # n_samples = len(x0) + # X = np.column_stack( + # [ + # np.ones(n_samples), + # x0, + # x1, + # x0**2, + # x0 * x1, + # x1**2, + # x0**3, + # x0**2 * x1, + # x0 * x1**2, + # x1**3, + # x0**4, + # x0**3 * x1, + # x0**2 * x1**2, + # x0 * x1**3, + # x1**4, + # ] + # ) + + # # Solve the least squares problem + # coefficients, residuals, rank, s = np.linalg.lstsq(X, z, rcond=None) + # popt = coefficients # Create grid for plotting xi = np.linspace(min(x), max(x), grid_size) @@ -135,20 +227,20 @@ def curve_fit_objective_function(individual): xi, yi = np.meshgrid(xi, yi) zi = curve_fit_func((xi, yi), *popt) - # Unnormalize the grid - xi = xi * x_std + x_mean - yi = yi * y_std + y_mean - - # Unnormalize the original data - x = x * x_std + x_mean - y = y * y_std + y_mean + # Unnormalize the data + xi = xi * (x_max - x_min) + x_min + yi = yi * (y_max - y_min) + y_min + zi = zi * (z_max - z_min) + z_min + x = x * (x_max - x_min) + x_min + y = y * (y_max - y_min) + y_min + z = z * (z_max - z_min) + z_min # Plot the regression surface if vmin is None: vmin = np.min(z) if vmax is None: vmax = np.max(z) - plt.contourf(xi, yi, zi, levels=100, cmap="viridis") + plt.contourf(xi, yi, zi, levels=100, cmap="viridis", vmin=vmin, vmax=vmax) plt.scatter( x, y, @@ -179,26 +271,45 @@ def plot_2d_regression_image( - degree: degree of the polynomial regression (default: 2) - grid_size: size of the grid for plotting (default: 100) """ + # Normalize x and y and keep the transformation for later + x_mean = np.mean(x) + x_std = np.std(x) + x_max = np.max(x) + x_min = np.min(x) + y_mean = np.mean(y) + y_std = np.std(y) + y_max = np.max(y) + y_min = np.min(y) + x = (x - x_min) / (x_max - x_min) + y = (y - y_min) / (y_max - y_min) + # Prepare the data for polynomial regression - X = np.array([x, y]).T - poly = PolynomialFeatures(degree) - X_poly = poly.fit_transform(X) - - # Perform the polynomial regression - model = LinearRegression() - model.fit( - X_poly, + X = np.column_stack((x, y)) + # Create a polynomial regression pipeline + polynomial_model = make_pipeline(PolynomialFeatures(degree), LinearRegression()) + + # Fit the model + polynomial_model.fit( + X, z, - sample_weight=sample_weight if sample_weight is not None else None, + linearregression__sample_weight=( + sample_weight if sample_weight is not None else None + ), ) - # Create a grid for plotting the regression surface + # Predict new values for the surface plot xi = np.linspace(min(x), max(x), grid_size) yi = np.linspace(min(y), max(y), grid_size) xi, yi = np.meshgrid(xi, yi) - X_grid = np.c_[xi.ravel(), yi.ravel()] - X_grid_poly = poly.transform(X_grid) - zi = model.predict(X_grid_poly).reshape(xi.shape) + Xi = np.column_stack((xi.ravel(), yi.ravel())) + zi = polynomial_model.predict(Xi) + zi = zi.reshape(xi.shape) + + # Unnormalize the x,y values + xi = xi * (x_max - x_min) + x_min + yi = yi * (y_max - y_min) + y_min + x = x * (x_max - x_min) + x_min + y = y * (y_max - y_min) + y_min # Plot the regression surface if vmin is None: @@ -225,12 +336,7 @@ def plot_2d_regression_image( ) # Print the regression equation - coefs = model.coef_ - intercept = model.intercept_ - terms = poly.get_feature_names_out() - equation = " + ".join([f"{coefs[i]:.3f}*{terms[i]}" for i in range(len(coefs))]) - equation = f"{intercept:.3f} + " + equation - print(f"Regression equation:\n{equation}") + # TODO def plot_2d_interpolated_image( @@ -564,13 +670,13 @@ def logarithmic_distribution(start, end, num_points): improvement_arr_norm = improvement_arr / np.max(improvement_arr) ### scale the mean shift and std scale by the error improvement - mean_shift_list = np.array(mean_shift_list) * improvement_arr_norm - std_scale_list = ( - np.array(std_scale_list) * improvement_arr_norm + (1 - improvement_arr_norm) * 1 - ) + ### --> only keep the transformations which improve the error + alpha = improvement_arr_norm + mean_shift_list = alpha * np.array(mean_shift_list) + (1 - alpha) * 0 + std_scale_list = alpha * np.array(std_scale_list) + (1 - alpha) * 1 - plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) - plt.subplot(4, 1, 1) + plt.figure(figsize=(6.4 * 2 * 2, 4.8 * 2 * 4)) + plt.subplot(4, 2, 1) plot_2d_interpolated_image( x=n_list, y=p_list, @@ -582,7 +688,7 @@ def logarithmic_distribution(start, end, num_points): plt.xlabel("n") plt.ylabel("p") plt.title(f"Error optimized\n(max: {np.max(error_improved_list)})") - plt.subplot(4, 1, 2) + plt.subplot(4, 2, 3) plot_2d_interpolated_image( x=n_list, y=p_list, @@ -594,7 +700,31 @@ def logarithmic_distribution(start, end, num_points): plt.xlabel("n") plt.ylabel("p") plt.title("Error improvement") - plt.subplot(4, 1, 3) + plt.subplot(4, 2, 5) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=mean_shift_list, + vmin=-np.max(np.abs(mean_shift_list)), + vmax=np.max(np.abs(mean_shift_list)), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift") + plt.subplot(4, 2, 7) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=std_scale_list, + vmin=1 - np.max(1 - np.array(std_scale_list)), + vmax=1 + np.max(np.array(std_scale_list) - 1), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Standard deviation scale") + plt.subplot(4, 2, 6) plot_2d_curve_fit_regression( x=n_list, y=p_list, @@ -603,13 +733,13 @@ def logarithmic_distribution(start, end, num_points): vmax=np.max(np.abs(mean_shift_list)), # sample_weight=-np.clip(error_change_arr, None, 0) # + 0.01 * np.max(improvement_arr), - # degree=5, + # degree=3, ) plt.colorbar() plt.xlabel("n") plt.ylabel("p") - plt.title("Mean shift") - plt.subplot(4, 1, 4) + plt.title("Mean shift regression") + plt.subplot(4, 2, 8) plot_2d_curve_fit_regression( x=n_list, y=p_list, @@ -618,12 +748,12 @@ def logarithmic_distribution(start, end, num_points): vmax=1 + np.max(np.array(std_scale_list) - 1), # sample_weight=-np.clip(error_change_arr, None, 0) # + 0.01 * np.max(improvement_arr), - # degree=5, + # degree=3, ) plt.colorbar() plt.xlabel("n") plt.ylabel("p") - plt.title("Standard deviation scale") + plt.title("Standard deviation scale regression") plt.tight_layout() plt.savefig("test2_02_error_optimized.png", dpi=300) From f5828dd9dc8dd0fdb4f009ae2bbe2f37b46b693c Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 26 Jun 2024 17:19:13 +0200 Subject: [PATCH 07/21] . --- .../examples/model_configurator/test2.py | 230 ++++++++++++------ 1 file changed, 161 insertions(+), 69 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index e889940..6c60a59 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -16,48 +16,114 @@ def mean_shift_regression(n, p): x0 = n x1 = p - return ( - -0.122 - + -0.000 * 1 - + -0.001 * x0 - + 0.257 * x1 - + 0.000 * x0**2 - + 0.003 * x0 * x1 - + 0.847 * x1**2 - + 0.000 * x0**3 - + -0.000 * x0**2 * x1 - + -0.001 * x0 * x1**2 - + -3.413 * x1**3 - + -0.000 * x0**4 - + 0.000 * x0**3 * x1 - + -0.000 * x0**2 * x1**2 - + 0.001 * x0 * x1**3 - + 2.644 * x1**4 + + p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14 = ( + 1.176321040012159, + -6.429595249324671, + -6.804798904871692, + 21.915210556787986, + 61.64443550309026, + 27.70993009301549, + -27.95654152965883, + 44.87058003864243, + -63.817886336670654, + -40.65337691430986, + 12.246014608429185, + -101.39842049962134, + 55.18658444426345, + 3.6975636800782237, + 19.531732368721627, + ) + + x0_min = 10 + x0_max = 1000 + x0_norm = (x0 - x0_min) / (x0_max - x0_min) + x1_min = 0.001 + x1_max = 0.1 + x1_norm = (x1 - x1_min) / (x1_max - x1_min) + z_min = -0.34886991656269 + z_max = 0.03020699313695153 + + z_norm = np.clip( + ( + p0 + + p1 * x0_norm + + p2 * x1_norm + + p3 * x0_norm**2 + + p4 * x0_norm * x1_norm + + p5 * x1_norm**2 + + p6 * x0_norm**3 + + p7 * x0_norm**2 * x1_norm + + p8 * x0_norm * x1_norm**2 + + p9 * x1_norm**3 + + p10 * x0_norm**4 + + p11 * x0_norm**3 * x1_norm + + p12 * x0_norm**2 * x1_norm**2 + + p13 * x0_norm * x1_norm**3 + + p14 * x1_norm**4 + ) + ** 3, + 0, + 1, ) + return z_norm * (z_max - z_min) + z_min def std_scale_regression(n, p): x0 = n x1 = p - return ( - 1.218 - + -0.000 * 1 - + 0.001 * x0 - + -4.365 * x1 - + -0.000 * x0**2 - + -0.001 * x0 * x1 - + 16.631 * x1**2 - + 0.000 * x0**3 - + 0.000 * x0**2 * x1 - + 0.001 * x0 * x1**2 - + -25.130 * x1**3 - + -0.000 * x0**4 - + -0.000 * x0**3 * x1 - + -0.000 * x0**2 * x1**2 - + -0.001 * x0 * x1**3 - + 12.968 * x1**4 + + p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14 = ( + 0.320502224444166, + 6.699870528297452, + 6.935907486422781, + -22.52956657500412, + 108.421673456727, + -30.088834608939973, + 29.237548128991747, + -1172.785284855411, + -1027.3975831574996, + 58.67858836080429, + -14.043681478309374, + 1174.4693590015047, + -3413.5816408185233, + 1307.2105954815152, + -44.03158341787127, ) + x0_min = 10 + x0_max = 1000 + x0_norm = (x0 - x0_min) / (x0_max - x0_min) + x1_min = 0.001 + x1_max = 0.1 + x1_norm = (x1 - x1_min) / (x1_max - x1_min) + z_min = 0.9710804893692282 + z_max = 1.6265889931274558 + + z_norm = np.clip( + ( + p0 + + p1 * x0_norm + + p2 * x1_norm + + p3 * x0_norm**2 + + p4 * x0_norm * x1_norm + + p5 * x1_norm**2 + + p6 * x0_norm**3 + + p7 * x0_norm**2 * x1_norm + + p8 * x0_norm * x1_norm**2 + + p9 * x1_norm**3 + + p10 * x0_norm**4 + + p11 * x0_norm**3 * x1_norm + + p12 * x0_norm**2 * x1_norm**2 + + p13 * x0_norm * x1_norm**3 + + p14 * x1_norm**4 + ) + ** 3, + 0, + 1, + ) + return z_norm * (z_max - z_min) + z_min + def gauss_1d(x, amp, mean, sig): return amp * np.exp(-((x - mean) ** 2) / (2 * sig**2)) @@ -99,15 +165,21 @@ def curve_fit_func( p3, p4, p5, - # p6, - # p7, - # p8, - # p9, - # p10, - # p11, - # p12, - # p13, - # p14, + p6, + p7, + p8, + p9, + p10, + p11, + p12, + p13, + p14, + p15, + p16, + p17, + p18, + p19, + p20, ): x0, x1 = X ### 2D polynomial with certain degree @@ -119,15 +191,21 @@ def curve_fit_func( + p3 * x0**2 + p4 * x0 * x1 + p5 * x1**2 - # + p6 * x0**3 - # + p7 * x0**2 * x1 - # + p8 * x0 * x1**2 - # + p9 * x1**3 - # + p10 * x0**4 - # + p11 * x0**3 * x1 - # + p12 * x0**2 * x1**2 - # + p13 * x0 * x1**3 - # + p14 * x1**4 + + p6 * x0**3 + + p7 * x0**2 * x1 + + p8 * x0 * x1**2 + + p9 * x1**3 + + p10 * x0**4 + + p11 * x0**3 * x1 + + p12 * x0**2 * x1**2 + + p13 * x0 * x1**3 + + p14 * x1**4 + + p15 * x0**5 + + p16 * x0**4 * x1 + + p17 * x0**3 * x1**2 + + p18 * x0**2 * x1**3 + + p19 * x0 * x1**4 + + p20 * x1**5 ) ** 3, np.min(z), @@ -164,20 +242,26 @@ def curve_fit_objective_function(individual): "p3", "p4", "p5", - # "p6", - # "p7", - # "p8", - # "p9", - # "p10", - # "p11", - # "p12", - # "p13", - # "p14", + "p6", + "p7", + "p8", + "p9", + "p10", + "p11", + "p12", + "p13", + "p14", + "p15", + "p16", + "p17", + "p18", + "p19", + "p20", ] ### run the optimization, get popt best_fitness = 1e9 ### create progress bar showing the best fitness - progress_bar = tqdm(range(100), total=100) + progress_bar = tqdm(range(10), total=10) for _ in progress_bar: deap_cma = DeapCma( lower=np.array([-1] * len(param_names)), @@ -257,6 +341,12 @@ def curve_fit_objective_function(individual): ), ) + ### print the regression equation and data normalization + print(f"popt: {popt}") + print(f"x_max: {x_max}, x_min: {x_min}") + print(f"y_max: {y_max}, y_min: {y_min}") + print(f"z_max: {z_max}, z_min: {z_min}") + def plot_2d_regression_image( x, y, z, sample_weight=None, vmin=None, vmax=None, degree=2, grid_size=100 @@ -374,6 +464,12 @@ def plot_2d_interpolated_image( x, y, c=z, cmap="viridis", vmin=vmin, vmax=vmax, edgecolor="k", marker="o" ) + # find local extrema of the surface using scipy + from scipy.signal import argrelextrema + + # find local maxima + maxima = argrelextrema(zi, np.greater, axis=None) + def generate_samples(n, p, m, mean_shift=0, std_scale=1): # Generate data samples @@ -457,8 +553,8 @@ def logarithmic_distribution(start, end, num_points): ### and n values. I will try to optimize the shift and scale for each n and p value. OPTIMIZE = False +PLOT_OPTIMIZED = False USE_REGRESSION = False -PLOT_OPTIMIZED = True PLOT_REGRESSION = False ### 1st optimize mean shift and std scale for each n and p value @@ -808,28 +904,24 @@ def logarithmic_distribution(start, end, num_points): plt.ylabel("p") plt.title("Error improvement") plt.subplot(4, 1, 3) - plot_2d_regression_image( + plot_2d_interpolated_image( x=n_list, y=p_list, z=mean_shift_reg_list, - sample_weight=error_list, vmin=-np.max(np.abs(mean_shift_reg_list)), vmax=np.max(np.abs(mean_shift_reg_list)), - degree=4, ) plt.colorbar() plt.xlabel("n") plt.ylabel("p") plt.title("Mean shift") plt.subplot(4, 1, 4) - plot_2d_regression_image( + plot_2d_interpolated_image( x=n_list, y=p_list, z=std_scale_reg_list, - sample_weight=error_list, vmin=1 - np.max(1 - np.array(std_scale_reg_list)), vmax=1 + np.max(np.array(std_scale_reg_list) - 1), - degree=4, ) plt.colorbar() plt.xlabel("n") From 594ab37f58ac2d06b91bc229fd0bbe826b5820ca Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 27 Jun 2024 09:42:20 +0200 Subject: [PATCH 08/21] . --- .../examples/model_configurator/test2.py | 1312 +++++++---------- .../model_configurator/test2_deap_opt.py | 91 ++ src/CompNeuroPy/extra_functions.py | 7 +- 3 files changed, 662 insertions(+), 748 deletions(-) create mode 100644 src/CompNeuroPy/examples/model_configurator/test2_deap_opt.py diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 6c60a59..ea3c7e3 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -1,7 +1,7 @@ import numpy as np import matplotlib.pyplot as plt import scipy.stats as stats -from CompNeuroPy import DeapCma, save_variables, load_variables +from CompNeuroPy import DeapCma, save_variables, load_variables, run_script_parallel from scipy.optimize import minimize, Bounds from scipy.interpolate import griddata from sklearn.preprocessing import PolynomialFeatures @@ -129,6 +129,95 @@ def gauss_1d(x, amp, mean, sig): return amp * np.exp(-((x - mean) ** 2) / (2 * sig**2)) +def log_normal_1d(x, amp, mean, sig): + return (amp / x) * np.exp(-((np.log(x) - mean) ** 2) / (2 * sig**2)) + + +deap_opt_path = "test2_deap_opt/" + + +def curve_fit_func( + X, + g0, + g1, + g2, + g3, + g4, + g5, + g6, + g7, + g8, + g9, + g10, + g11, + g12, + g13, + g14, + p0, + # p1, + # p2, + # p3, + # p4, + # p5, + # p6, + # p7, + # p8, + # p9, + # p10, + # p11, + # p12, + # p13, + # p14, + # p15, + # p16, + # p17, + # p18, + # p19, + # p20, +): + x0, x1 = X + + ### 2D polynomial with certain degree + return np.clip( + p0 + + gauss_1d( + ( + x0 + # p0 + # + p1 * x0 + # + p2 * x1 + # + p3 * x0**2 + # + p4 * x0 * x1 + # + p5 * x1**2 + # + p6 * x0**3 + # + p7 * x0**2 * x1 + # + p8 * x0 * x1**2 + # + p9 * x1**3 + # + p10 * x0**4 + # + p11 * x0**3 * x1 + # + p12 * x0**2 * x1**2 + # + p13 * x0 * x1**3 + # + p14 * x1**4 + # + p15 * x0**5 + # + p16 * x0**4 * x1 + # + p17 * x0**3 * x1**2 + # + p18 * x0**2 * x1**3 + # + p19 * x0 * x1**4 + # + p20 * x1**5 + ), + amp=g0, + mean=g1, + sig=g2, + ) + + gauss_1d(x1, amp=g3, mean=g4, sig=g5) + + gauss_1d(x0 * x1, amp=g6, mean=g7, sig=g8) + + gauss_1d(x0**2 * x1, amp=g9, mean=g10, sig=g11) + + gauss_1d(x0 * x1**2, amp=g12, mean=g13, sig=g14), + -1e20, + 1e20, + ) + + def plot_2d_curve_fit_regression( x, y, z, sample_weight=None, vmin=None, vmax=None, grid_size=100 ): @@ -157,153 +246,32 @@ def plot_2d_curve_fit_regression( z = (z - z_min) / (z_max - z_min) # Fit the curve_fit regression model - def curve_fit_func( - X, - p0, - p1, - p2, - p3, - p4, - p5, - p6, - p7, - p8, - p9, - p10, - p11, - p12, - p13, - p14, - p15, - p16, - p17, - p18, - p19, - p20, - ): - x0, x1 = X - ### 2D polynomial with certain degree - return np.clip( - ( - p0 - + p1 * x0 - + p2 * x1 - + p3 * x0**2 - + p4 * x0 * x1 - + p5 * x1**2 - + p6 * x0**3 - + p7 * x0**2 * x1 - + p8 * x0 * x1**2 - + p9 * x1**3 - + p10 * x0**4 - + p11 * x0**3 * x1 - + p12 * x0**2 * x1**2 - + p13 * x0 * x1**3 - + p14 * x1**4 - + p15 * x0**5 - + p16 * x0**4 * x1 - + p17 * x0**3 * x1**2 - + p18 * x0**2 * x1**3 - + p19 * x0 * x1**4 - + p20 * x1**5 - ) - ** 3, - np.min(z), - np.max(z), - ) - - def curve_fit_evaluate_function(population): - loss_list = [] - ### the population is a list of individuals which are lists of parameters - for individual in population: - loss_of_individual = curve_fit_objective_function(individual) - loss_list.append((loss_of_individual,)) - return loss_list - - def curve_fit_objective_function(individual): - is_data = curve_fit_func((x, y), *individual) - target_data = z - return np.sum((is_data - target_data) ** 2) - - # ### do opt with scipy curve_fit - # popt, pcov = curve_fit( - # curve_fit_func, - # (x, y), - # z, - # sigma=1 / sample_weight if sample_weight is not None else None, - # absolute_sigma=False, - # ) - - ### do opt with deap cma - param_names = [ - "p0", - "p1", - "p2", - "p3", - "p4", - "p5", - "p6", - "p7", - "p8", - "p9", - "p10", - "p11", - "p12", - "p13", - "p14", - "p15", - "p16", - "p17", - "p18", - "p19", - "p20", - ] - ### run the optimization, get popt - best_fitness = 1e9 - ### create progress bar showing the best fitness - progress_bar = tqdm(range(10), total=10) - for _ in progress_bar: - deap_cma = DeapCma( - lower=np.array([-1] * len(param_names)), - upper=np.array([1] * len(param_names)), - evaluate_function=curve_fit_evaluate_function, - param_names=param_names, - hard_bounds=False, - display_progress_bar=False, + ### do opt with deap cma in other script + save_variables( + name_list=["x", "y", "z"], variable_list=[x, y, z], path=deap_opt_path + ) + n_jobs = 4 + n_runs = 5 * n_jobs + args_list = [[f"{parallel_id}"] for parallel_id in range(n_runs)] + run_script_parallel( + script_path="test2_deap_opt.py", + n_jobs=4, + args_list=args_list, + ) + ### get best parameters + best_fitness = 1e6 + best_parallel_id = 0 + for parallel_id in range(n_runs): + loaded_variables = load_variables( + name_list=[f"best_fitness_{parallel_id}"], path=deap_opt_path ) - deap_cma_result = deap_cma.run(max_evals=2000) - if deap_cma_result["best_fitness"] < best_fitness: - best_fitness = deap_cma_result["best_fitness"] - popt = [deap_cma_result[param_name] for param_name in param_names] - progress_bar.set_description(f"Best Fitness: {best_fitness}") - - # ### solve polynomial using linalg - # x0 = x - # x1 = y - # n_samples = len(x0) - # X = np.column_stack( - # [ - # np.ones(n_samples), - # x0, - # x1, - # x0**2, - # x0 * x1, - # x1**2, - # x0**3, - # x0**2 * x1, - # x0 * x1**2, - # x1**3, - # x0**4, - # x0**3 * x1, - # x0**2 * x1**2, - # x0 * x1**3, - # x1**4, - # ] - # ) - - # # Solve the least squares problem - # coefficients, residuals, rank, s = np.linalg.lstsq(X, z, rcond=None) - # popt = coefficients + if loaded_variables[f"best_fitness_{parallel_id}"] < best_fitness: + best_fitness = loaded_variables[f"best_fitness_{parallel_id}"] + best_parallel_id = parallel_id + loaded_variables = load_variables( + name_list=[f"popt_{best_parallel_id}"], path=deap_opt_path + ) + popt = loaded_variables[f"popt_{best_parallel_id}"] # Create grid for plotting xi = np.linspace(min(x), max(x), grid_size) @@ -342,6 +310,7 @@ def curve_fit_objective_function(individual): ) ### print the regression equation and data normalization + print(f"best_fitness: {best_fitness}") print(f"popt: {popt}") print(f"x_max: {x_max}, x_min: {x_min}") print(f"y_max: {y_max}, y_min: {y_min}") @@ -464,12 +433,6 @@ def plot_2d_interpolated_image( x, y, c=z, cmap="viridis", vmin=vmin, vmax=vmax, edgecolor="k", marker="o" ) - # find local extrema of the surface using scipy - from scipy.signal import argrelextrema - - # find local maxima - maxima = argrelextrema(zi, np.greater, axis=None) - def generate_samples(n, p, m, mean_shift=0, std_scale=1): # Generate data samples @@ -551,60 +514,49 @@ def logarithmic_distribution(start, end, num_points): ### approximation of the binomial distribution. ### I think one can shift the mean and scale the standard deviation depending on the p ### and n values. I will try to optimize the shift and scale for each n and p value. +if __name__ == "__main__": + OPTIMIZE = False + PLOT_OPTIMIZED = True + USE_REGRESSION = False + PLOT_REGRESSION = False + + ### 1st optimize mean shift and std scale for each n and p value + n_arr = logarithmic_distribution(10, 1000, 20).astype(int) + p_arr = logarithmic_distribution(0.001, 0.1, 10) + + ### bounds for optimized parameters + shift_mean_bounds = [-1, 1] + scale_std_bounds = [0.5, 2] + lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) + ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) + # number of samples + M = 10000 + + ### 1st get errors for all n and p values without optimization + p_list = [] + n_list = [] + error_list = [] + for p in p_arr: + for n in n_arr: + ### set the global variables probability of success and number of trials + P = p + N = n + + ### get the error without optimization + error = objective_function(mean_shift=0, std_scale=1) + error_list.append(error) + + ### store the results + p_list.append(p) + n_list.append(n) -OPTIMIZE = False -PLOT_OPTIMIZED = False -USE_REGRESSION = False -PLOT_REGRESSION = False - -### 1st optimize mean shift and std scale for each n and p value -n_arr = logarithmic_distribution(10, 1000, 20).astype(int) -p_arr = logarithmic_distribution(0.001, 0.1, 10) - -### bounds for optimized parameters -shift_mean_bounds = [-1, 1] -scale_std_bounds = [0.5, 2] -lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) -ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) -# number of samples -M = 10000 - -### 1st get errors for all n and p values without optimization -p_list = [] -n_list = [] -error_list = [] -for p in p_arr: - for n in n_arr: - ### set the global variables probability of success and number of trials - P = p - N = n - - ### get the error without optimization - error = objective_function(mean_shift=0, std_scale=1) - error_list.append(error) - - ### store the results - p_list.append(p) - n_list.append(n) - -### save variables -save_variables( - variable_list=[ - p_list, - n_list, - error_list, - ], - name_list=[ - "p_list", - "n_list", - "error_list", - ], - path="data_optimize_binomial_normal", -) - -### 2nd optimize mean shift and std scale for each n and p value and get improved error -if OPTIMIZE: - loaded_variables = load_variables( + ### save variables + save_variables( + variable_list=[ + p_list, + n_list, + error_list, + ], name_list=[ "p_list", "n_list", @@ -612,564 +564,430 @@ def logarithmic_distribution(start, end, num_points): ], path="data_optimize_binomial_normal", ) - p_list = loaded_variables["p_list"] - n_list = loaded_variables["n_list"] - error_list = loaded_variables["error_list"] - mean_shift_list = [] - std_scale_list = [] - error_improved_list = [] - for p, n in zip(p_list, n_list): - ### set the global variables probability of success and number of trials - P = p - N = n - - # ### optimize the mean shift and standard deviation scale using scipy minimize - # result = minimize( - # objective_function_for_minimize, - # x0=[0, 1], - # bounds=Bounds(lb, ub), - # method="Nelder-Mead", - # ) - # mean_shift = result.x[0] - # std_scale = result.x[1] - # error_improved = result.fun - - ### create an instance of the DeapCma class - deap_cma = DeapCma( - lower=lb, - upper=ub, - evaluate_function=evaluate_function, - param_names=["mean_shift", "std_scale"], - hard_bounds=True, - ) - ### run the optimization - deap_cma_result = deap_cma.run(max_evals=1000) - - ### get the optimized parameters and best error - mean_shift = deap_cma_result["mean_shift"] - std_scale = deap_cma_result["std_scale"] - error_improved = deap_cma_result["best_fitness"] + ### 2nd optimize mean shift and std scale for each n and p value and get improved error + if OPTIMIZE: + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "error_list", + ], + path="data_optimize_binomial_normal", + ) + p_list = loaded_variables["p_list"] + n_list = loaded_variables["n_list"] + error_list = loaded_variables["error_list"] + mean_shift_list = [] + std_scale_list = [] + error_improved_list = [] + for p, n in zip(p_list, n_list): + ### set the global variables probability of success and number of trials + P = p + N = n + + # ### optimize the mean shift and standard deviation scale using scipy minimize + # result = minimize( + # objective_function_for_minimize, + # x0=[0, 1], + # bounds=Bounds(lb, ub), + # method="Nelder-Mead", + # ) + # mean_shift = result.x[0] + # std_scale = result.x[1] + # error_improved = result.fun + + ### create an instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + param_names=["mean_shift", "std_scale"], + hard_bounds=True, + ) - ### store the results - error_improved_list.append(error_improved) - mean_shift_list.append(mean_shift) - std_scale_list.append(std_scale) + ### run the optimization + deap_cma_result = deap_cma.run(max_evals=1000) + + ### get the optimized parameters and best error + mean_shift = deap_cma_result["mean_shift"] + std_scale = deap_cma_result["std_scale"] + error_improved = deap_cma_result["best_fitness"] + + ### store the results + error_improved_list.append(error_improved) + mean_shift_list.append(mean_shift) + std_scale_list.append(std_scale) + + ### save variables + save_variables( + variable_list=[ + mean_shift_list, + std_scale_list, + error_improved_list, + ], + name_list=[ + "mean_shift_list", + "std_scale_list", + "error_improved_list", + ], + path="data_optimize_binomial_normal", + ) - ### save variables - save_variables( - variable_list=[ - mean_shift_list, - std_scale_list, - error_improved_list, - ], - name_list=[ - "mean_shift_list", - "std_scale_list", - "error_improved_list", - ], - path="data_optimize_binomial_normal", - ) + ### 3rd use regression for mean shift and std scale and recalculate the improved error + if USE_REGRESSION: + ### load the optimized parameters and corresponding original and optimized errors + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + ], + path="data_optimize_binomial_normal", + ) + p_list = loaded_variables["p_list"] + n_list = loaded_variables["n_list"] + + ### use regression equations to recalculate mean shift and std scale + mean_shift_reg_list = [] + std_scale_reg_list = [] + error_improved_reg_list = [] + for p, n in zip(p_list, n_list): + ### set the global variables probability of success and number of trials + P = p + N = n + + ### get the optimized parameters and best error + mean_shift = mean_shift_regression(n, p) + std_scale = std_scale_regression(n, p) + error_improved = objective_function( + mean_shift=mean_shift, std_scale=std_scale + ) + ### store the results + error_improved_reg_list.append(error_improved) + mean_shift_reg_list.append(mean_shift) + std_scale_reg_list.append(std_scale) + + ### save variables + save_variables( + variable_list=[ + mean_shift_reg_list, + std_scale_reg_list, + error_improved_reg_list, + ], + name_list=[ + "mean_shift_reg_list", + "std_scale_reg_list", + "error_improved_reg_list", + ], + path="data_optimize_binomial_normal", + ) -### 3rd use regression for mean shift and std scale and recalculate the improved error -if USE_REGRESSION: - ### load the optimized parameters and corresponding original and optimized errors + ### 4th plot the original error + # original error -> interpolation plot loaded_variables = load_variables( name_list=[ "p_list", "n_list", + "error_list", ], path="data_optimize_binomial_normal", ) p_list = loaded_variables["p_list"] n_list = loaded_variables["n_list"] - - ### use regression equations to recalculate mean shift and std scale - mean_shift_reg_list = [] - std_scale_reg_list = [] - error_improved_reg_list = [] - for p, n in zip(p_list, n_list): - ### set the global variables probability of success and number of trials - P = p - N = n - - ### get the optimized parameters and best error - mean_shift = mean_shift_regression(n, p) - std_scale = std_scale_regression(n, p) - error_improved = objective_function(mean_shift=mean_shift, std_scale=std_scale) - - ### store the results - error_improved_reg_list.append(error_improved) - mean_shift_reg_list.append(mean_shift) - std_scale_reg_list.append(std_scale) - - ### save variables - save_variables( - variable_list=[ - mean_shift_reg_list, - std_scale_reg_list, - error_improved_reg_list, - ], - name_list=[ - "mean_shift_reg_list", - "std_scale_reg_list", - "error_improved_reg_list", - ], - path="data_optimize_binomial_normal", - ) - -### 4th plot the original error -# original error -> interpolation plot -loaded_variables = load_variables( - name_list=[ - "p_list", - "n_list", - "error_list", - ], - path="data_optimize_binomial_normal", -) -p_list = loaded_variables["p_list"] -n_list = loaded_variables["n_list"] -error_list = loaded_variables["error_list"] -plt.figure(figsize=(6.4 * 2, 4.8 * 2)) -plt.subplot(1, 1, 1) -plot_2d_interpolated_image( - x=n_list, y=p_list, z=error_list, vmin=0, vmax=np.max(error_list) -) -plt.colorbar() -plt.xlabel("n") -plt.ylabel("p") -plt.title(f"Error original\n(max: {np.max(error_list)})") -plt.tight_layout() -plt.savefig("test2_01_error_original.png", dpi=300) - -### 5th plot the optimized error with optimized mean shift and std scale -if PLOT_OPTIMIZED: - # fitting improved error -> interpolation plot - # fitting improvement -> interpolation plot - # fitting mean shift -> regression plot - # fitting std scale -> regression plot - loaded_variables = load_variables( - name_list=[ - "error_improved_list", - "mean_shift_list", - "std_scale_list", - ], - path="data_optimize_binomial_normal", - ) - error_improved_list = loaded_variables["error_improved_list"] - mean_shift_list = loaded_variables["mean_shift_list"] - std_scale_list = loaded_variables["std_scale_list"] - error_change_arr = np.array(error_improved_list) - np.array(error_list) - improvement_arr = -np.clip(error_change_arr, None, 0) - improvement_arr_norm = improvement_arr / np.max(improvement_arr) - - ### scale the mean shift and std scale by the error improvement - ### --> only keep the transformations which improve the error - alpha = improvement_arr_norm - mean_shift_list = alpha * np.array(mean_shift_list) + (1 - alpha) * 0 - std_scale_list = alpha * np.array(std_scale_list) + (1 - alpha) * 1 - - plt.figure(figsize=(6.4 * 2 * 2, 4.8 * 2 * 4)) - plt.subplot(4, 2, 1) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_improved_list, - vmin=0, - vmax=np.max(error_improved_list), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title(f"Error optimized\n(max: {np.max(error_improved_list)})") - plt.subplot(4, 2, 3) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_change_arr, - vmin=-np.max(np.abs(error_change_arr)), - vmax=np.max(np.abs(error_change_arr)), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error improvement") - plt.subplot(4, 2, 5) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=mean_shift_list, - vmin=-np.max(np.abs(mean_shift_list)), - vmax=np.max(np.abs(mean_shift_list)), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift") - plt.subplot(4, 2, 7) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=std_scale_list, - vmin=1 - np.max(1 - np.array(std_scale_list)), - vmax=1 + np.max(np.array(std_scale_list) - 1), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Standard deviation scale") - plt.subplot(4, 2, 6) - plot_2d_curve_fit_regression( - x=n_list, - y=p_list, - z=mean_shift_list, - vmin=-np.max(np.abs(mean_shift_list)), - vmax=np.max(np.abs(mean_shift_list)), - # sample_weight=-np.clip(error_change_arr, None, 0) - # + 0.01 * np.max(improvement_arr), - # degree=3, - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift regression") - plt.subplot(4, 2, 8) - plot_2d_curve_fit_regression( - x=n_list, - y=p_list, - z=std_scale_list, - vmin=1 - np.max(1 - np.array(std_scale_list)), - vmax=1 + np.max(np.array(std_scale_list) - 1), - # sample_weight=-np.clip(error_change_arr, None, 0) - # + 0.01 * np.max(improvement_arr), - # degree=3, - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Standard deviation scale regression") - plt.tight_layout() - plt.savefig("test2_02_error_optimized.png", dpi=300) - -### 6th plot the regression error with regressed mean shift and std scale and compare it -### with the optimized error -if PLOT_REGRESSION: - # regression improved error -> interpolation plot - # regression improvement -> interpolation plot - # regression mean shift -> regression plot - # regression std scale -> regression plot - loaded_variables = load_variables( - name_list=[ - "error_improved_list", - "mean_shift_list", - "std_scale_list", - "error_improved_reg_list", - "mean_shift_reg_list", - "std_scale_reg_list", - ], - path="data_optimize_binomial_normal", - ) - error_improved_list = loaded_variables["error_improved_list"] - mean_shift_list = loaded_variables["mean_shift_list"] - std_scale_list = loaded_variables["std_scale_list"] - error_improved_reg_list = loaded_variables["error_improved_reg_list"] - mean_shift_reg_list = loaded_variables["mean_shift_reg_list"] - std_scale_reg_list = loaded_variables["std_scale_reg_list"] - - plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) - plt.subplot(4, 1, 1) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_improved_reg_list, - vmin=0, - vmax=np.max(error_improved_reg_list), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title(f"Error optimized\n(max: {np.max(error_improved_reg_list)})") - plt.subplot(4, 1, 2) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(error_improved_reg_list) - np.array(error_list), - vmin=-np.max(np.abs(np.array(error_improved_reg_list) - np.array(error_list))), - vmax=np.max(np.abs(np.array(error_improved_reg_list) - np.array(error_list))), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error improvement") - plt.subplot(4, 1, 3) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=mean_shift_reg_list, - vmin=-np.max(np.abs(mean_shift_reg_list)), - vmax=np.max(np.abs(mean_shift_reg_list)), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift") - plt.subplot(4, 1, 4) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=std_scale_reg_list, - vmin=1 - np.max(1 - np.array(std_scale_reg_list)), - vmax=1 + np.max(np.array(std_scale_reg_list) - 1), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Standard deviation scale") - plt.tight_layout() - plt.savefig("test2_03_error_regression.png", dpi=300) - - # difference fitting/regression improved error -> interpolation plot - # difference fitting/regression improvement -> interpolation plot - # difference fitting/regression mean shift -> interpolation plot - # difference fitting/regression std scale -> interpolation plot - plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) - plt.subplot(4, 1, 1) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(error_improved_list) - np.array(error_improved_reg_list), - vmin=-np.max( - np.abs(np.array(error_improved_list) - np.array(error_improved_reg_list)) - ), - vmax=np.max( - np.abs(np.array(error_improved_list) - np.array(error_improved_reg_list)) - ), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error difference between optimized and regression") - plt.subplot(4, 1, 2) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(error_improved_list) - - np.array(error_list) - - np.array(error_improved_reg_list) - + np.array(error_list), - vmin=-np.max( - np.abs( - np.array(error_improved_list) - - np.array(error_list) - - np.array(error_improved_reg_list) - + np.array(error_list) - ) - ), - vmax=np.max( - np.abs( - np.array(error_improved_list) - - np.array(error_list) - - np.array(error_improved_reg_list) - + np.array(error_list) - ) - ), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error improvement difference between optimized and regression") - plt.subplot(4, 1, 3) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(mean_shift_list) - np.array(mean_shift_reg_list), - vmin=-np.max(np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list))), - vmax=np.max(np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list))), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift difference between optimized and regression") - plt.subplot(4, 1, 4) + error_list = loaded_variables["error_list"] + plt.figure(figsize=(6.4 * 2, 4.8 * 2)) + plt.subplot(1, 1, 1) plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(std_scale_list) - np.array(std_scale_reg_list), - vmin=-np.max(np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list))), - vmax=np.max(np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list))), + x=n_list, y=p_list, z=error_list, vmin=0, vmax=np.max(error_list) ) plt.colorbar() plt.xlabel("n") plt.ylabel("p") - plt.title("Standard deviation scale difference between optimized and regression") + plt.title(f"Error original\n(max: {np.max(error_list)})") plt.tight_layout() - plt.savefig("test2_04_error_difference.png", dpi=300) - -quit() -# Plot the error as a function of p and n as a heatmap -plt.figure(figsize=(20, 15)) -### scatter plot with max from error_list and error_improved_list -plt.subplot(2, 2, 1) - -plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_list, - vmin=0, - vmax=np.max(error_list + error_improved_list), -) -print(np.max(error_list + error_improved_list)) -plt.scatter( - n_list, - p_list, - c=error_list, - cmap="viridis", - vmin=0, - vmax=np.max(error_list + error_improved_list), - edgecolor="k", - marker="o", -) -plt.colorbar() -plt.xlabel("n") -plt.ylabel("p") -plt.title(f"Error original\n(max: {np.max(error_list)})") -plt.subplot(2, 2, 2) -plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_improved_list, - vmin=0, - vmax=np.max(error_list + error_improved_list), -) -plt.scatter( - n_list, - p_list, - c=error_improved_list, - cmap="viridis", - vmin=0, - vmax=np.max(error_list + error_improved_list), - edgecolor="k", - marker="o", -) -plt.colorbar() -plt.xlabel("n") -plt.ylabel("p") -plt.title(f"Error optimized\n(max: {np.max(error_improved_list)})") -plt.subplot(2, 2, 3) -plot_2d_regression_image( - x=n_list, - y=p_list, - z=mean_shift_list, - sample_weight=error_list, - vmin=-np.max(np.abs(mean_shift_list)), - vmax=np.max(np.abs(mean_shift_list)), - degree=4, -) -plt.scatter( - n_list, - p_list, - c=mean_shift_list, - cmap="viridis", - vmin=-np.max(np.abs(mean_shift_list)), - vmax=np.max(np.abs(mean_shift_list)), - edgecolor="k", - marker="o", -) -plt.colorbar() -plt.xlabel("n") -plt.ylabel("p") -plt.title("Mean shift") -plt.subplot(2, 2, 4) -plot_2d_regression_image( - x=n_list, - y=p_list, - z=std_scale_list, - sample_weight=error_list, - vmin=1 - np.max(1 - np.array(std_scale_list)), - vmax=1 + np.max(np.array(std_scale_list) - 1), - degree=4, -) -plt.scatter( - n_list, - p_list, - c=std_scale_list, - cmap="viridis", - vmin=1 - np.max(1 - np.array(std_scale_list)), - vmax=1 + np.max(np.array(std_scale_list) - 1), - edgecolor="k", - marker="o", -) -plt.colorbar() -plt.xlabel("n") -plt.ylabel("p") -plt.title("Standard deviation scale") -plt.tight_layout() -plt.savefig("error_heatmap.png") - -quit() - -# Statistical comparison -# Calculate descriptive statistics -binomial_mean = np.mean(binomial_sample) -binomial_std = np.std(binomial_sample) -normal_mean = np.mean(normal_sample) -normal_std = np.std(normal_sample) - -print(f"Binomial Sample Mean: {binomial_mean}, Standard Deviation: {binomial_std}") -print(f"Normal Sample Mean: {normal_mean}, Standard Deviation: {normal_std}") - -# Perform a Kolmogorov-Smirnov test -ks_statistic, p_value = stats.ks_2samp(binomial_sample, normal_sample) -print(f"KS Statistic: {ks_statistic}, P-value: {p_value}") - -# Interpretation -if p_value > 0.05: - print("The two samples are similar (fail to reject H0).") -else: - print("The two samples are different (reject H0).") - - -# sort both samples and calculate the root mean squared difference -binomial_sample.sort() -normal_sample.sort() -rmsd = np.sqrt(np.mean((binomial_sample - normal_sample) ** 2)) -print(f"Root Mean Squared Difference: {rmsd}") - - -# Visual comparison -plt.figure(figsize=(12, 10)) - -# Histogram of binomial sample -plt.hist( - binomial_sample, - bins=plot_max + 1, - range=(-0.5, plot_max + 0.5), - density=True, - alpha=0.5, - color="b", - label="Binomial", -) -plt.hist( - normal_sample, - bins=plot_max + 1, - range=(-0.5, plot_max + 0.5), - density=True, - alpha=0.5, - color="r", - label="Normal", -) -# set the y ticks every 0.1 -plt.yticks(np.arange(0, 1.1, 0.1)) -plt.grid() - -plt.legend() -plt.ylim(0, 1) -plt.xlim(-0.5, plot_max + 0.5) -plt.title("Binomial Distribution") -plt.xlabel("Value") -plt.ylabel("Frequency") - -plt.tight_layout() -plt.show() + plt.savefig("test2_01_error_original.png", dpi=300) + + ### 5th plot the optimized error with optimized mean shift and std scale + if PLOT_OPTIMIZED: + # fitting improved error -> interpolation plot + # fitting improvement -> interpolation plot + # fitting mean shift -> regression plot + # fitting std scale -> regression plot + loaded_variables = load_variables( + name_list=[ + "error_improved_list", + "mean_shift_list", + "std_scale_list", + ], + path="data_optimize_binomial_normal", + ) + error_improved_list = loaded_variables["error_improved_list"] + mean_shift_list = loaded_variables["mean_shift_list"] + std_scale_list = loaded_variables["std_scale_list"] + error_change_arr = np.array(error_improved_list) - np.array(error_list) + improvement_arr = -np.clip(error_change_arr, None, 0) + improvement_arr_norm = improvement_arr / np.max(improvement_arr) + + ### scale the mean shift and std scale by the error improvement + ### --> only keep the transformations which improve the error + alpha = improvement_arr_norm + mean_shift_list = alpha * np.array(mean_shift_list) + (1 - alpha) * 0 + std_scale_list = alpha * np.array(std_scale_list) + (1 - alpha) * 1 + + ### the mean shift is mostly 0 and at some positions negative, multiply it by -1 + mean_shift_list = -mean_shift_list + + plt.figure(figsize=(6.4 * 2 * 2, 4.8 * 2 * 4)) + plt.subplot(4, 2, 1) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_improved_list, + vmin=0, + vmax=np.max(error_improved_list), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title(f"Error optimized\n(max: {np.max(error_improved_list)})") + plt.subplot(4, 2, 3) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_change_arr, + vmin=-np.max(np.abs(error_change_arr)), + vmax=np.max(np.abs(error_change_arr)), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error improvement") + plt.subplot(4, 2, 5) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=mean_shift_list, + vmin=-np.max(np.abs(mean_shift_list)), + vmax=np.max(np.abs(mean_shift_list)), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift") + plt.subplot(4, 2, 7) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=std_scale_list, + vmin=1 - np.max(1 - np.array(std_scale_list)), + vmax=1 + np.max(np.array(std_scale_list) - 1), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Standard deviation scale") + plt.subplot(4, 2, 6) + plot_2d_curve_fit_regression( + x=n_list, + y=p_list, + z=mean_shift_list, + vmin=-np.max(np.abs(mean_shift_list)), + vmax=np.max(np.abs(mean_shift_list)), + # sample_weight=-np.clip(error_change_arr, None, 0) + # + 0.01 * np.max(improvement_arr), + # degree=3, + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift regression") + plt.subplot(4, 2, 8) + plot_2d_curve_fit_regression( + x=n_list, + y=p_list, + z=std_scale_list, + vmin=1 - np.max(1 - np.array(std_scale_list)), + vmax=1 + np.max(np.array(std_scale_list) - 1), + # sample_weight=-np.clip(error_change_arr, None, 0) + # + 0.01 * np.max(improvement_arr), + # degree=3, + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Standard deviation scale regression") + plt.tight_layout() + plt.savefig("test2_02_error_optimized.png", dpi=300) + + ### 6th plot the regression error with regressed mean shift and std scale and compare it + ### with the optimized error + if PLOT_REGRESSION: + # regression improved error -> interpolation plot + # regression improvement -> interpolation plot + # regression mean shift -> regression plot + # regression std scale -> regression plot + loaded_variables = load_variables( + name_list=[ + "error_improved_list", + "mean_shift_list", + "std_scale_list", + "error_improved_reg_list", + "mean_shift_reg_list", + "std_scale_reg_list", + ], + path="data_optimize_binomial_normal", + ) + error_improved_list = loaded_variables["error_improved_list"] + mean_shift_list = loaded_variables["mean_shift_list"] + std_scale_list = loaded_variables["std_scale_list"] + error_improved_reg_list = loaded_variables["error_improved_reg_list"] + mean_shift_reg_list = loaded_variables["mean_shift_reg_list"] + std_scale_reg_list = loaded_variables["std_scale_reg_list"] + + plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) + plt.subplot(4, 1, 1) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=error_improved_reg_list, + vmin=0, + vmax=np.max(error_improved_reg_list), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title(f"Error optimized\n(max: {np.max(error_improved_reg_list)})") + plt.subplot(4, 1, 2) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(error_improved_reg_list) - np.array(error_list), + vmin=-np.max( + np.abs(np.array(error_improved_reg_list) - np.array(error_list)) + ), + vmax=np.max( + np.abs(np.array(error_improved_reg_list) - np.array(error_list)) + ), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error improvement") + plt.subplot(4, 1, 3) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=mean_shift_reg_list, + vmin=-np.max(np.abs(mean_shift_reg_list)), + vmax=np.max(np.abs(mean_shift_reg_list)), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift") + plt.subplot(4, 1, 4) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=std_scale_reg_list, + vmin=1 - np.max(1 - np.array(std_scale_reg_list)), + vmax=1 + np.max(np.array(std_scale_reg_list) - 1), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Standard deviation scale") + plt.tight_layout() + plt.savefig("test2_03_error_regression.png", dpi=300) + + # difference fitting/regression improved error -> interpolation plot + # difference fitting/regression improvement -> interpolation plot + # difference fitting/regression mean shift -> interpolation plot + # difference fitting/regression std scale -> interpolation plot + plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) + plt.subplot(4, 1, 1) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(error_improved_list) - np.array(error_improved_reg_list), + vmin=-np.max( + np.abs( + np.array(error_improved_list) - np.array(error_improved_reg_list) + ) + ), + vmax=np.max( + np.abs( + np.array(error_improved_list) - np.array(error_improved_reg_list) + ) + ), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error difference between optimized and regression") + plt.subplot(4, 1, 2) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(error_improved_list) + - np.array(error_list) + - np.array(error_improved_reg_list) + + np.array(error_list), + vmin=-np.max( + np.abs( + np.array(error_improved_list) + - np.array(error_list) + - np.array(error_improved_reg_list) + + np.array(error_list) + ) + ), + vmax=np.max( + np.abs( + np.array(error_improved_list) + - np.array(error_list) + - np.array(error_improved_reg_list) + + np.array(error_list) + ) + ), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Error improvement difference between optimized and regression") + plt.subplot(4, 1, 3) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(mean_shift_list) - np.array(mean_shift_reg_list), + vmin=-np.max( + np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list)) + ), + vmax=np.max( + np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list)) + ), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title("Mean shift difference between optimized and regression") + plt.subplot(4, 1, 4) + plot_2d_interpolated_image( + x=n_list, + y=p_list, + z=np.array(std_scale_list) - np.array(std_scale_reg_list), + vmin=-np.max( + np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list)) + ), + vmax=np.max( + np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list)) + ), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title( + "Standard deviation scale difference between optimized and regression" + ) + plt.tight_layout() + plt.savefig("test2_04_error_difference.png", dpi=300) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt.py new file mode 100644 index 0000000..9cdf4dd --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt.py @@ -0,0 +1,91 @@ +from CompNeuroPy import DeapCma, load_variables, save_variables +import numpy as np +from test2 import deap_opt_path, curve_fit_func +import sys + +# Load the variables +variables = load_variables( + name_list=[ + "x", + "y", + "z", + ], + path=deap_opt_path, +) +x = variables["x"] +y = variables["y"] +z = variables["z"] + + +param_names = [ + "g0", + "g1", + "g2", + "g3", + "g4", + "g5", + "g6", + "g7", + "g8", + "g9", + "g10", + "g11", + "g12", + "g13", + "g14", + "p0", + # "p1", + # "p2", + # "p3", + # "p4", + # "p5", + # "p6", + # "p7", + # "p8", + # "p9", + # "p10", + # "p11", + # "p12", + # "p13", + # "p14", + # "p15", + # "p16", + # "p17", + # "p18", + # "p19", + # "p20", +] + + +def curve_fit_evaluate_function(population): + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = curve_fit_objective_function(individual) + loss_list.append((loss_of_individual,)) + return loss_list + + +def curve_fit_objective_function(individual): + is_data = curve_fit_func((x, y), *individual) + target_data = z + return np.sum((is_data - target_data) ** 2) + + +deap_cma = DeapCma( + lower=np.array([-1] * len(param_names)), + upper=np.array([1] * len(param_names)), + evaluate_function=curve_fit_evaluate_function, + param_names=param_names, + hard_bounds=False, + display_progress_bar=False, +) +deap_cma_result = deap_cma.run(max_evals=2000) +popt = [deap_cma_result[param_name] for param_name in param_names] + +# Save the variables +save_variables( + name_list=[f"popt_{sys.argv[1]}", f"best_fitness_{sys.argv[1]}"], + variable_list=[popt, deap_cma_result["best_fitness"]], + path=deap_opt_path, +) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index ae455d2..4526d69 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -1102,7 +1102,12 @@ def _deap_ea_generate_update( halloffame.update(population) ### Update the strategy with the evaluated individuals - toolbox.update(population) + try: + toolbox.update(population) + except: + ### stop if update fails + early_stop = True + break ### Stop if diagD is too small if np.min(strategy.diagD) < 1e-5: From ef0dc175a7151f31882ae0eabde98469fe924782 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 27 Jun 2024 11:12:38 +0200 Subject: [PATCH 09/21] model_configurator: oreoared opt of mean shift/std scale for hinton --- .../examples/model_configurator/test2.py | 141 ++++++++---------- ..._deap_opt.py => test2_deap_opt_regress.py} | 6 +- .../test2_deap_opt_transform.py | 64 ++++++++ 3 files changed, 131 insertions(+), 80 deletions(-) rename src/CompNeuroPy/examples/model_configurator/{test2_deap_opt.py => test2_deap_opt_regress.py} (93%) create mode 100644 src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index ea3c7e3..6804fa0 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -133,7 +133,8 @@ def log_normal_1d(x, amp, mean, sig): return (amp / x) * np.exp(-((np.log(x) - mean) ** 2) / (2 * sig**2)) -deap_opt_path = "test2_deap_opt/" +deap_opt_regress_path = "test2_deap_opt_regress/" +deap_opt_transform_path = "test2_deap_opt_transform/" def curve_fit_func( @@ -248,14 +249,14 @@ def plot_2d_curve_fit_regression( # Fit the curve_fit regression model ### do opt with deap cma in other script save_variables( - name_list=["x", "y", "z"], variable_list=[x, y, z], path=deap_opt_path + name_list=["x", "y", "z"], variable_list=[x, y, z], path=deap_opt_regress_path ) - n_jobs = 4 - n_runs = 5 * n_jobs + n_jobs = 15 + n_runs = 100 * n_jobs args_list = [[f"{parallel_id}"] for parallel_id in range(n_runs)] run_script_parallel( - script_path="test2_deap_opt.py", - n_jobs=4, + script_path="test2_deap_opt_regress.py", + n_jobs=n_jobs, args_list=args_list, ) ### get best parameters @@ -263,13 +264,13 @@ def plot_2d_curve_fit_regression( best_parallel_id = 0 for parallel_id in range(n_runs): loaded_variables = load_variables( - name_list=[f"best_fitness_{parallel_id}"], path=deap_opt_path + name_list=[f"best_fitness_{parallel_id}"], path=deap_opt_regress_path ) if loaded_variables[f"best_fitness_{parallel_id}"] < best_fitness: best_fitness = loaded_variables[f"best_fitness_{parallel_id}"] best_parallel_id = parallel_id loaded_variables = load_variables( - name_list=[f"popt_{best_parallel_id}"], path=deap_opt_path + name_list=[f"popt_{best_parallel_id}"], path=deap_opt_regress_path ) popt = loaded_variables[f"popt_{best_parallel_id}"] @@ -449,7 +450,7 @@ def generate_samples(n, p, m, mean_shift=0, std_scale=1): return binomial_sample, normal_sample -def get_error_of_samples(binomial_sample, normal_sample, m): +def get_error_of_samples(binomial_sample, normal_sample, n, m): diff = ( np.histogram(binomial_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] - np.histogram(normal_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] @@ -457,33 +458,17 @@ def get_error_of_samples(binomial_sample, normal_sample, m): return np.sum(np.abs(diff)) / (2 * m) -def objective_function(mean_shift, std_scale): +def objective_function(mean_shift, std_scale, n, p, m): # Generate data samples binomial_sample, normal_sample = generate_samples( - n=N, p=P, m=M, mean_shift=mean_shift, std_scale=std_scale + n=n, p=p, m=m, mean_shift=mean_shift, std_scale=std_scale ) # Calculate error - error = get_error_of_samples(binomial_sample, normal_sample, m=M) + error = get_error_of_samples(binomial_sample, normal_sample, n=n, m=M) return error -def objective_function_for_minimize(x): - # print(f"P: {P}, N: {N}, mean_shift: {x[0]}, std_scale: {x[1]}") - return objective_function(mean_shift=x[0], std_scale=x[1]) - - -def evaluate_function(population): - loss_list = [] - ### the population is a list of individuals which are lists of parameters - for individual in population: - loss_of_individual = objective_function( - mean_shift=individual[0], std_scale=individual[1] - ) - loss_list.append((loss_of_individual,)) - return loss_list - - def logarithmic_distribution(start, end, num_points): """ Generate a list of logarithmically spaced points between a start and end point. @@ -514,8 +499,11 @@ def logarithmic_distribution(start, end, num_points): ### approximation of the binomial distribution. ### I think one can shift the mean and scale the standard deviation depending on the p ### and n values. I will try to optimize the shift and scale for each n and p value. + +# number of samples +M = 10000 if __name__ == "__main__": - OPTIMIZE = False + OPTIMIZE = True PLOT_OPTIMIZED = True USE_REGRESSION = False PLOT_REGRESSION = False @@ -524,26 +512,14 @@ def logarithmic_distribution(start, end, num_points): n_arr = logarithmic_distribution(10, 1000, 20).astype(int) p_arr = logarithmic_distribution(0.001, 0.1, 10) - ### bounds for optimized parameters - shift_mean_bounds = [-1, 1] - scale_std_bounds = [0.5, 2] - lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) - ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) - # number of samples - M = 10000 - ### 1st get errors for all n and p values without optimization p_list = [] n_list = [] error_list = [] for p in p_arr: for n in n_arr: - ### set the global variables probability of success and number of trials - P = p - N = n - ### get the error without optimization - error = objective_function(mean_shift=0, std_scale=1) + error = objective_function(mean_shift=0, std_scale=1, n=n, p=p, m=M) error_list.append(error) ### store the results @@ -582,37 +558,47 @@ def logarithmic_distribution(start, end, num_points): std_scale_list = [] error_improved_list = [] for p, n in zip(p_list, n_list): - ### set the global variables probability of success and number of trials - P = p - N = n - - # ### optimize the mean shift and standard deviation scale using scipy minimize - # result = minimize( - # objective_function_for_minimize, - # x0=[0, 1], - # bounds=Bounds(lb, ub), - # method="Nelder-Mead", - # ) - # mean_shift = result.x[0] - # std_scale = result.x[1] - # error_improved = result.fun - - ### create an instance of the DeapCma class - deap_cma = DeapCma( - lower=lb, - upper=ub, - evaluate_function=evaluate_function, - param_names=["mean_shift", "std_scale"], - hard_bounds=True, + ### save p and n to be availyble in optimization script + save_variables( + variable_list=[p, n], + name_list=["p", "n"], + path=deap_opt_transform_path, ) - - ### run the optimization - deap_cma_result = deap_cma.run(max_evals=1000) - - ### get the optimized parameters and best error - mean_shift = deap_cma_result["mean_shift"] - std_scale = deap_cma_result["std_scale"] - error_improved = deap_cma_result["best_fitness"] + ### run optimization + n_jobs = 15 + n_runs = 100 * n_jobs + args_list = [[f"{parallel_id}"] for parallel_id in range(n_runs)] + run_script_parallel( + script_path="test2_deap_opt_transform.py", + n_jobs=n_jobs, + args_list=args_list, + ) + ### get best parameters + best_fitness = 1e6 + best_parallel_id = 0 + for parallel_id in range(n_runs): + loaded_variables = load_variables( + name_list=[ + f"error_improved_{parallel_id}", + ], + path=deap_opt_transform_path, + ) + error_improved = loaded_variables[f"error_improved_{parallel_id}"] + + if error_improved < best_fitness: + best_fitness = error_improved + best_parallel_id = parallel_id + loaded_variables = load_variables( + name_list=[ + f"mean_shift_{best_parallel_id}", + f"std_scale_{best_parallel_id}", + f"error_improved_{best_parallel_id}", + ], + path=deap_opt_transform_path, + ) + mean_shift = loaded_variables[f"mean_shift_{best_parallel_id}"] + std_scale = loaded_variables[f"std_scale_{best_parallel_id}"] + error_improved = loaded_variables[f"error_improved_{best_parallel_id}"] ### store the results error_improved_list.append(error_improved) @@ -652,15 +638,11 @@ def logarithmic_distribution(start, end, num_points): std_scale_reg_list = [] error_improved_reg_list = [] for p, n in zip(p_list, n_list): - ### set the global variables probability of success and number of trials - P = p - N = n - ### get the optimized parameters and best error mean_shift = mean_shift_regression(n, p) std_scale = std_scale_regression(n, p) error_improved = objective_function( - mean_shift=mean_shift, std_scale=std_scale + mean_shift=mean_shift, std_scale=std_scale, n=n, p=p, m=M ) ### store the results @@ -725,6 +707,11 @@ def logarithmic_distribution(start, end, num_points): error_improved_list = loaded_variables["error_improved_list"] mean_shift_list = loaded_variables["mean_shift_list"] std_scale_list = loaded_variables["std_scale_list"] + ### TODO tmp + error_improved_list = np.random.rand(len(error_improved_list)) + mean_shift_list = np.random.rand(len(mean_shift_list)) + std_scale_list = np.random.rand(len(std_scale_list)) + ### TODO tmp error_change_arr = np.array(error_improved_list) - np.array(error_list) improvement_arr = -np.clip(error_change_arr, None, 0) improvement_arr_norm = improvement_arr / np.max(improvement_arr) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py similarity index 93% rename from src/CompNeuroPy/examples/model_configurator/test2_deap_opt.py rename to src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py index 9cdf4dd..ac589ce 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py @@ -1,6 +1,6 @@ from CompNeuroPy import DeapCma, load_variables, save_variables import numpy as np -from test2 import deap_opt_path, curve_fit_func +from test2 import deap_opt_regress_path, curve_fit_func import sys # Load the variables @@ -10,7 +10,7 @@ "y", "z", ], - path=deap_opt_path, + path=deap_opt_regress_path, ) x = variables["x"] y = variables["y"] @@ -87,5 +87,5 @@ def curve_fit_objective_function(individual): save_variables( name_list=[f"popt_{sys.argv[1]}", f"best_fitness_{sys.argv[1]}"], variable_list=[popt, deap_cma_result["best_fitness"]], - path=deap_opt_path, + path=deap_opt_regress_path, ) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py new file mode 100644 index 0000000..e5c685a --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py @@ -0,0 +1,64 @@ +from CompNeuroPy import DeapCma, load_variables, save_variables +import numpy as np +from test2 import deap_opt_transform_path, objective_function, M +import sys + +# # Load the variables +# variables = load_variables( +# name_list=[ +# "n", +# "p", +# ], +# path=deap_opt_transform_path, +# ) +# n = variables["n"] +# p = variables["p"] + + +# def evaluate_function(population): +# loss_list = [] +# ### the population is a list of individuals which are lists of parameters +# for individual in population: +# loss_of_individual = objective_function( +# mean_shift=individual[0], std_scale=individual[1], n=n, p=p, m=M +# ) +# loss_list.append((loss_of_individual,)) +# return loss_list + + +# ### bounds for optimized parameters +# shift_mean_bounds = [-1, 1] +# scale_std_bounds = [0.5, 2] +# lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) +# ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) + +# ### create an instance of the DeapCma class +# deap_cma = DeapCma( +# lower=lb, +# upper=ub, +# evaluate_function=evaluate_function, +# param_names=["mean_shift", "std_scale"], +# hard_bounds=True, +# ) + +# ### run the optimization +# deap_cma_result = deap_cma.run(max_evals=1000) + +# ### get the optimized parameters and best error +# mean_shift = deap_cma_result["mean_shift"] +# std_scale = deap_cma_result["std_scale"] +# error_improved = deap_cma_result["best_fitness"] +mean_shift = 0.0 +std_scale = 1.0 +error_improved = 0.0 + +# Save the variables +save_variables( + name_list=[ + f"mean_shift_{sys.argv[1]}", + f"std_scale_{sys.argv[1]}", + f"error_improved_{sys.argv[1]}", + ], + variable_list=[mean_shift, std_scale, error_improved], + path=deap_opt_transform_path, +) From 11bd9e624f75015cda8fd6b5ff441c5f4358c1f0 Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 27 Jun 2024 11:15:26 +0200 Subject: [PATCH 10/21] . --- .../test2_deap_opt_transform.py | 89 +++++++++---------- 1 file changed, 43 insertions(+), 46 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py index e5c685a..5f7d6ef 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py @@ -3,54 +3,51 @@ from test2 import deap_opt_transform_path, objective_function, M import sys -# # Load the variables -# variables = load_variables( -# name_list=[ -# "n", -# "p", -# ], -# path=deap_opt_transform_path, -# ) -# n = variables["n"] -# p = variables["p"] - - -# def evaluate_function(population): -# loss_list = [] -# ### the population is a list of individuals which are lists of parameters -# for individual in population: -# loss_of_individual = objective_function( -# mean_shift=individual[0], std_scale=individual[1], n=n, p=p, m=M -# ) -# loss_list.append((loss_of_individual,)) -# return loss_list - - -# ### bounds for optimized parameters -# shift_mean_bounds = [-1, 1] -# scale_std_bounds = [0.5, 2] -# lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) -# ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) - -# ### create an instance of the DeapCma class -# deap_cma = DeapCma( -# lower=lb, -# upper=ub, -# evaluate_function=evaluate_function, -# param_names=["mean_shift", "std_scale"], -# hard_bounds=True, -# ) +# Load the variables +variables = load_variables( + name_list=[ + "n", + "p", + ], + path=deap_opt_transform_path, +) +n = variables["n"] +p = variables["p"] + + +def evaluate_function(population): + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = objective_function( + mean_shift=individual[0], std_scale=individual[1], n=n, p=p, m=M + ) + loss_list.append((loss_of_individual,)) + return loss_list + + +### bounds for optimized parameters +shift_mean_bounds = [-1, 1] +scale_std_bounds = [0.5, 2] +lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) +ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) + +### create an instance of the DeapCma class +deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + param_names=["mean_shift", "std_scale"], + hard_bounds=True, +) -# ### run the optimization -# deap_cma_result = deap_cma.run(max_evals=1000) +### run the optimization +deap_cma_result = deap_cma.run(max_evals=1000) -# ### get the optimized parameters and best error -# mean_shift = deap_cma_result["mean_shift"] -# std_scale = deap_cma_result["std_scale"] -# error_improved = deap_cma_result["best_fitness"] -mean_shift = 0.0 -std_scale = 1.0 -error_improved = 0.0 +### get the optimized parameters and best error +mean_shift = deap_cma_result["mean_shift"] +std_scale = deap_cma_result["std_scale"] +error_improved = deap_cma_result["best_fitness"] # Save the variables save_variables( From f0b558f20060f7501b8dc8f93b36a97800f64955 Mon Sep 17 00:00:00 2001 From: olimaol Date: Fri, 9 Aug 2024 06:59:03 +0200 Subject: [PATCH 11/21] system_functions: create_data_raw_folder - ask whether to delete the existing folder --- src/CompNeuroPy/system_functions.py | 31 ++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index 35d6055..1af6d5b 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -11,6 +11,7 @@ import signal from typing import List import threading +import sys def clear_dir(path): @@ -334,6 +335,11 @@ def _is_git_repo(): return False +def _timeout_handler(signum, frame): + print("\nTime's up! No response received. Exiting the program.") + sys.exit() + + def create_data_raw_folder( folder_name: str, **kwargs, @@ -385,7 +391,30 @@ def create_data_raw_folder( """ ### check if folder already exists if os.path.isdir(folder_name): - raise FileExistsError(f"{folder_name} already exists") + print(f"'{folder_name}' already exists.") + + # Set the signal for timeout + signal.signal(signal.SIGALRM, _timeout_handler) + while True: + + signal.alarm(60) + user_input = input( + "Do you want to (q)uit or (d)elete the folder and continue? " + ).lower() + signal.alarm(0) + + if user_input == "q": + print("Exiting the program.") + raise FileExistsError(f"'{folder_name}' already exists") + elif user_input == "d": + print(f"Deleting '{folder_name}' and continuing.") + shutil.rmtree(folder_name) + break + else: + print( + "Invalid input. Please enter 'q' to quit or 'd' to delete and continue." + ) + ### create folder create_dir(folder_name) From bedb58e711ca171f13ba2d71538c61f7aeca210e Mon Sep 17 00:00:00 2001 From: olmai Date: Mon, 12 Aug 2024 16:47:34 +0200 Subject: [PATCH 12/21] model_configurator: started restructure fit mean shift and std scale --- .gitignore | 1 + .../examples/model_configurator/test2.py | 347 +++++++++++++----- src/CompNeuroPy/system_functions.py | 5 + 3 files changed, 253 insertions(+), 100 deletions(-) diff --git a/.gitignore b/.gitignore index 69bc5cd..a9c60ed 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ dist/ *.pkl *json *.log +*test2_data*/ diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 6804fa0..8cb1215 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -1,7 +1,14 @@ import numpy as np import matplotlib.pyplot as plt import scipy.stats as stats -from CompNeuroPy import DeapCma, save_variables, load_variables, run_script_parallel +from CompNeuroPy import ( + DeapCma, + save_variables, + load_variables, + run_script_parallel, + create_data_raw_folder, + create_dir, +) from scipy.optimize import minimize, Bounds from scipy.interpolate import griddata from sklearn.preprocessing import PolynomialFeatures @@ -435,12 +442,29 @@ def plot_2d_interpolated_image( ) -def generate_samples(n, p, m, mean_shift=0, std_scale=1): +def generate_samples(n, p, s, mean_shift=0, std_scale=1): + """ + Generate samples of a binomial and a normal distribution. The normal distribution is + generated to best approximate the binomial distribution. Further the normal + distribution is shifted by mean_shift and scaled by std_scale. + + Args: + n (int): + The number of trials of the binomial distribution. + p (float): + The probability of success of the binomial distribution. + s (int): + The sample size for both distributions. + mean_shift (float): + The shift of the mean of the normal distribution. + std_scale (float): + The scaling of the standard deviation of the normal distribution. + """ # Generate data samples - binomial_sample = np.random.binomial(n, p, m) + binomial_sample = np.random.binomial(n, p, s) mean = n * p std_dev = np.sqrt(n * p * (1 - p)) - normal_sample = np.random.normal(mean + mean_shift, std_dev * std_scale, m) + normal_sample = np.random.normal(mean + mean_shift, std_dev * std_scale, s) ### round and clip the normal sample normal_sample = np.round(normal_sample) @@ -450,36 +474,70 @@ def generate_samples(n, p, m, mean_shift=0, std_scale=1): return binomial_sample, normal_sample -def get_error_of_samples(binomial_sample, normal_sample, n, m): +def get_difference_of_samples(binomial_sample, normal_sample, n): + """ + Calculate the difference between samples of a binomial and a normal distribution. + + Args: + binomial_sample (array): + The sample of the binomial distribution. + normal_sample (array): + The sample of the normal distribution. + n (int): + The number of trials of the binomial distribution. + """ diff = ( np.histogram(binomial_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] - np.histogram(normal_sample, bins=n + 1, range=(-0.5, n + 0.5))[0] ) - return np.sum(np.abs(diff)) / (2 * m) + return np.sum(np.abs(diff)) / (2 * len(binomial_sample)) -def objective_function(mean_shift, std_scale, n, p, m): +def difference_binomial_normal(mean_shift, std_scale, n, p, s): + """ + Calculate the difference between samples of a binomial and a normal distribution. + The binomial distribution is generated with parameters n and p. + The normal distribution is generated to best approximate the binomial distribution. + Further the normal distribution is shifted by mean_shift and scaled by std_scale. + + Args: + mean_shift (float): + The shift of the mean of the normal distribution. + std_scale (float): + The scaling of the standard deviation of the normal distribution. + n (int): + The number of trials of the binomial distribution. + p (float): + The probability of success of the binomial distribution. + s (int): + The sample size for both distributions. + """ # Generate data samples binomial_sample, normal_sample = generate_samples( - n=n, p=p, m=m, mean_shift=mean_shift, std_scale=std_scale + n=n, p=p, s=s, mean_shift=mean_shift, std_scale=std_scale ) - # Calculate error - error = get_error_of_samples(binomial_sample, normal_sample, n=n, m=M) - return error + # Calculate difference + diff = get_difference_of_samples(binomial_sample, normal_sample, n=n) + return diff -def logarithmic_distribution(start, end, num_points): +def logarithmic_arange(start, end, num_points): """ - Generate a list of logarithmically spaced points between a start and end point. + Generate a list of logarithmically spaced points between a start and end point. The + smaller side of the range is denser with points. - Parameters: - start (float): The starting point of the distribution. - end (float): The ending point of the distribution. - num_points (int): The number of points to generate. + Args: + start (float): + The starting point of the distribution. + end (float): + The ending point of the distribution. + num_points (int): + The number of points to generate. Returns: - list: A list of logarithmically spaced points. + points (list): + A list of logarithmically spaced points. """ if start <= 0 or end <= 0: raise ValueError("Start and end points must be positive numbers.") @@ -500,17 +558,116 @@ def logarithmic_distribution(start, end, num_points): ### I think one can shift the mean and scale the standard deviation depending on the p ### and n values. I will try to optimize the shift and scale for each n and p value. -# number of samples -M = 10000 +### global paramters +COMPARE_ORIGINAL = True +OPTIMIZE = True +REGRESS = True +PLOT_COMPARE_ORIGINAL = True +PLOT_OPTIMIZE = True +PLOT_REGRESS = True +COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" +PLOTS_FOLDER = "test2_plots" +S = 10000 + if __name__ == "__main__": - OPTIMIZE = True + + ### TODO: restructure this thing + + # 1st compare binomial and normal samples for various n and p values, save: p_list, n_list and diff_list + # 2nd optimize mean shift and std scale for each n and p value and get improved error, save: mean_shift_list, std_scale_list and error_improved_list + # 3rd make a 2D regression for the optimized mean shift and std scale, get mean_shift_regress(n, p) and std_scale_regress(n, p), save: the optimized parameters of the regression equations + # 4th plot: (1) error depending on n and p, (2) optimized mean shift and std scale depending on n and p and corresponding error improvement, (3) regressed mean shift and std scale depending on n and p and corresponding error improvement + + if COMPARE_ORIGINAL: # TODO implement seed for rng + ### create the save folder + folder_name = create_data_raw_folder( + COMPARE_ORIGINAL_FOLDER, + COMPARE_ORIGINAL=COMPARE_ORIGINAL, + OPTIMIZE=OPTIMIZE, + REGRESS=REGRESS, + PLOT_COMPARE_ORIGINAL=PLOT_COMPARE_ORIGINAL, + PLOT_OPTIMIZE=PLOT_OPTIMIZE, + PLOT_REGRESS=PLOT_REGRESS, + COMPARE_ORIGINAL_FOLDER=COMPARE_ORIGINAL_FOLDER, + PLOTS_FOLDER=PLOTS_FOLDER, + S=S, + ) + + ### create the n/p pairs + n_arr = logarithmic_arange(10, 1000, 20).astype(int) + p_arr = logarithmic_arange(0.001, 0.1, 10) + + ### get difference between binomial and normal distribution for each n/p pair + p_list = [] + n_list = [] + diff_list = [] + for p in p_arr: + for n in n_arr: + ### get the error without optimization + error = difference_binomial_normal( + mean_shift=0, std_scale=1, n=n, p=p, s=S + ) + diff_list.append(error) + + ### store the results + p_list.append(p) + n_list.append(n) + + ### save variables + save_variables( + variable_list=[ + p_list, + n_list, + diff_list, + ], + name_list=[ + "p_list", + "n_list", + "diff_list", + ], + path=folder_name, + ) + + if PLOT_COMPARE_ORIGINAL: + ### load the data + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "diff_list", + ], + path=COMPARE_ORIGINAL_FOLDER, + ) + ### plot the data + plt.figure(figsize=(6.4 * 2, 4.8 * 2)) + plt.subplot(1, 1, 1) + plot_2d_interpolated_image( + x=loaded_variables["n_list"], + y=loaded_variables["p_list"], + z=loaded_variables["diff_list"], + vmin=0, + vmax=np.max(loaded_variables["diff_list"]), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title( + f"Difference original\n(max: {np.max(loaded_variables['diff_list'])})" + ) + plt.tight_layout() + create_dir(PLOTS_FOLDER) + plt.savefig(f"{PLOTS_FOLDER}/difference_original.png", dpi=300) + + quit() + + OPTIMIZE = False PLOT_OPTIMIZED = True USE_REGRESSION = False PLOT_REGRESSION = False ### 1st optimize mean shift and std scale for each n and p value - n_arr = logarithmic_distribution(10, 1000, 20).astype(int) - p_arr = logarithmic_distribution(0.001, 0.1, 10) + n_arr = logarithmic_arange(10, 1000, 20).astype(int) + p_arr = logarithmic_arange(0.001, 0.1, 10) ### 1st get errors for all n and p values without optimization p_list = [] @@ -541,6 +698,20 @@ def logarithmic_distribution(start, end, num_points): path="data_optimize_binomial_normal", ) + ### plot the original error + # original error -> interpolation plot + plt.figure(figsize=(6.4 * 2, 4.8 * 2)) + plt.subplot(1, 1, 1) + plot_2d_interpolated_image( + x=n_list, y=p_list, z=error_list, vmin=0, vmax=np.max(error_list) + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title(f"Error original\n(max: {np.max(error_list)})") + plt.tight_layout() + plt.savefig("test2_01_error_original.png", dpi=300) + ### 2nd optimize mean shift and std scale for each n and p value and get improved error if OPTIMIZE: loaded_variables = load_variables( @@ -620,77 +791,8 @@ def logarithmic_distribution(start, end, num_points): path="data_optimize_binomial_normal", ) - ### 3rd use regression for mean shift and std scale and recalculate the improved error - if USE_REGRESSION: - ### load the optimized parameters and corresponding original and optimized errors - loaded_variables = load_variables( - name_list=[ - "p_list", - "n_list", - ], - path="data_optimize_binomial_normal", - ) - p_list = loaded_variables["p_list"] - n_list = loaded_variables["n_list"] - - ### use regression equations to recalculate mean shift and std scale - mean_shift_reg_list = [] - std_scale_reg_list = [] - error_improved_reg_list = [] - for p, n in zip(p_list, n_list): - ### get the optimized parameters and best error - mean_shift = mean_shift_regression(n, p) - std_scale = std_scale_regression(n, p) - error_improved = objective_function( - mean_shift=mean_shift, std_scale=std_scale, n=n, p=p, m=M - ) - - ### store the results - error_improved_reg_list.append(error_improved) - mean_shift_reg_list.append(mean_shift) - std_scale_reg_list.append(std_scale) - - ### save variables - save_variables( - variable_list=[ - mean_shift_reg_list, - std_scale_reg_list, - error_improved_reg_list, - ], - name_list=[ - "mean_shift_reg_list", - "std_scale_reg_list", - "error_improved_reg_list", - ], - path="data_optimize_binomial_normal", - ) - - ### 4th plot the original error - # original error -> interpolation plot - loaded_variables = load_variables( - name_list=[ - "p_list", - "n_list", - "error_list", - ], - path="data_optimize_binomial_normal", - ) - p_list = loaded_variables["p_list"] - n_list = loaded_variables["n_list"] - error_list = loaded_variables["error_list"] - plt.figure(figsize=(6.4 * 2, 4.8 * 2)) - plt.subplot(1, 1, 1) - plot_2d_interpolated_image( - x=n_list, y=p_list, z=error_list, vmin=0, vmax=np.max(error_list) - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title(f"Error original\n(max: {np.max(error_list)})") - plt.tight_layout() - plt.savefig("test2_01_error_original.png", dpi=300) - - ### 5th plot the optimized error with optimized mean shift and std scale + ### 4th plot the optimized error with optimized mean shift and std scale + ### also calculate the ŕegression for the optimized mean shift and std scale if PLOT_OPTIMIZED: # fitting improved error -> interpolation plot # fitting improvement -> interpolation plot @@ -707,11 +809,11 @@ def logarithmic_distribution(start, end, num_points): error_improved_list = loaded_variables["error_improved_list"] mean_shift_list = loaded_variables["mean_shift_list"] std_scale_list = loaded_variables["std_scale_list"] - ### TODO tmp - error_improved_list = np.random.rand(len(error_improved_list)) - mean_shift_list = np.random.rand(len(mean_shift_list)) - std_scale_list = np.random.rand(len(std_scale_list)) - ### TODO tmp + # ### TODO tmp + # error_improved_list = np.random.rand(len(error_improved_list)) + # mean_shift_list = np.random.rand(len(mean_shift_list)) + # std_scale_list = np.random.rand(len(std_scale_list)) + # ### TODO tmp error_change_arr = np.array(error_improved_list) - np.array(error_list) improvement_arr = -np.clip(error_change_arr, None, 0) improvement_arr_norm = improvement_arr / np.max(improvement_arr) @@ -807,7 +909,52 @@ def logarithmic_distribution(start, end, num_points): plt.tight_layout() plt.savefig("test2_02_error_optimized.png", dpi=300) - ### 6th plot the regression error with regressed mean shift and std scale and compare it + ### 3rd use regression for mean shift and std scale and recalculate the improved error + if USE_REGRESSION: + ### load the optimized parameters and corresponding original and optimized errors + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + ], + path="data_optimize_binomial_normal", + ) + p_list = loaded_variables["p_list"] + n_list = loaded_variables["n_list"] + + ### use regression equations to recalculate mean shift and std scale + mean_shift_reg_list = [] + std_scale_reg_list = [] + error_improved_reg_list = [] + for p, n in zip(p_list, n_list): + ### get the optimized parameters and best error + mean_shift = mean_shift_regression(n, p) + std_scale = std_scale_regression(n, p) + error_improved = objective_function( + mean_shift=mean_shift, std_scale=std_scale, n=n, p=p, m=M + ) + + ### store the results + error_improved_reg_list.append(error_improved) + mean_shift_reg_list.append(mean_shift) + std_scale_reg_list.append(std_scale) + + ### save variables + save_variables( + variable_list=[ + mean_shift_reg_list, + std_scale_reg_list, + error_improved_reg_list, + ], + name_list=[ + "mean_shift_reg_list", + "std_scale_reg_list", + "error_improved_reg_list", + ], + path="data_optimize_binomial_normal", + ) + + ### 5th plot the regression error with regressed mean shift and std scale and compare it ### with the optimized error if PLOT_REGRESSION: # regression improved error -> interpolation plot diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index 1af6d5b..f51bd2c 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -371,6 +371,10 @@ def create_data_raw_folder( **kwargs (Any, optional): Global variables of the caller script. + Returns: + folder_name (str): + Name of the created folder. + Example: ```python from CompNeuroPy import create_data_raw_folder @@ -609,6 +613,7 @@ def create_data_raw_folder( f.write("# CompNeuroPy was installed locally with commit:\n") compneuropy_commit = compneuropy_git_log[0].replace("\n", "") f.write(f"# {compneuropy_commit}") + return folder_name def _find_folder_with_prefix(base_path, prefix): From 748d05235c0c9ded28a89a8ff8878cb6995e7f4c Mon Sep 17 00:00:00 2001 From: olimaol Date: Tue, 13 Aug 2024 07:31:35 +0200 Subject: [PATCH 13/21] CompNeuroExp: only set model_state if populations adn/or projections are True create_data_raw_folder: improved question for deleting existing folder --- src/CompNeuroPy/experiment.py | 18 +++++++++++++++++- src/CompNeuroPy/system_functions.py | 8 ++++---- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/src/CompNeuroPy/experiment.py b/src/CompNeuroPy/experiment.py index 952c0ce..5d8df91 100644 --- a/src/CompNeuroPy/experiment.py +++ b/src/CompNeuroPy/experiment.py @@ -119,6 +119,12 @@ def reset( reset_kwargs["synapses"] = synapses reset_kwargs["monitors"] = True + if synapses is True and projections is False: + print( + "Warning: synapses=True and projections=False, projections are automatically set to True!" + ) + reset_kwargs["projections"] = True + ### reset CompNeuroMonitors and ANNarchy model if self.monitors is not None: ### there are monitors, therefore use theri reset function @@ -126,7 +132,17 @@ def reset( ### after reset, set the state of the model to the stored state if model_state and self._model_state is not None and model is True: ### if parameters=False, they are not set - mf._set_all_attributes(self._model_state, parameters=parameters) + mf._set_all_attributes( + { + "populations": ( + self._model_state["populations"] if populations else {} + ), + "projections": ( + self._model_state["projections"] if projections else {} + ), + }, + parameters=parameters, + ) elif model is True: if parameters is False: ### if parameters=False, get parameters before reset and set them after diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index 1af6d5b..d931649 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -399,20 +399,20 @@ def create_data_raw_folder( signal.alarm(60) user_input = input( - "Do you want to (q)uit or (d)elete the folder and continue? " + "You want to delete the folder and continue? (y/n)" ).lower() signal.alarm(0) - if user_input == "q": + if user_input == "n": print("Exiting the program.") raise FileExistsError(f"'{folder_name}' already exists") - elif user_input == "d": + elif user_input == "y": print(f"Deleting '{folder_name}' and continuing.") shutil.rmtree(folder_name) break else: print( - "Invalid input. Please enter 'q' to quit or 'd' to delete and continue." + "Invalid input. Please enter 'y' to delete the folder or 'n' to exit." ) ### create folder From 28ea91b15a8bdc5582fcf4a43bcc11c18d3ef3d6 Mon Sep 17 00:00:00 2001 From: olimaol Date: Tue, 13 Aug 2024 09:40:14 +0200 Subject: [PATCH 14/21] model_configurator: continued restructure opt mean shift, std scale --- .../examples/model_configurator/test2.py | 338 +++++++++++++----- .../test2_deap_opt_transform.py | 28 +- 2 files changed, 257 insertions(+), 109 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 8cb1215..811c5fa 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -8,6 +8,7 @@ run_script_parallel, create_data_raw_folder, create_dir, + RNG, ) from scipy.optimize import minimize, Bounds from scipy.interpolate import griddata @@ -141,7 +142,6 @@ def log_normal_1d(x, amp, mean, sig): deap_opt_regress_path = "test2_deap_opt_regress/" -deap_opt_transform_path = "test2_deap_opt_transform/" def curve_fit_func( @@ -410,14 +410,25 @@ def plot_2d_interpolated_image( x, y, z, vmin=None, vmax=None, grid_size=100, method="linear" ): """ - Plots a 2D color-coded image of the data with interpolation and extrapolation. + Plots a 2D color-coded image of "D grid data. - Parameters: - - x: list or array of x coordinates - - y: list or array of y coordinates - - z: list or array of z values corresponding to the (x, y) coordinates - - grid_size: size of the interpolation grid (default: 100) - - method: interpolation method, options are 'linear', 'nearest', 'cubic' (default: 'linear') + Args: + x (array): + The x-coordinates of the data points. + y (array): + The y-coordinates of the data points. + z (array): + The z-values corresponding to the (x, y) coordinates. + vmin (float, optional): + The minimum value for the color scale. If not provided, the minimum value of + z is used. + vmax (float): + The maximum value for the color scale. If not provided, the maximum value of + z is used. + grid_size (int): + The size of the grid for plotting (default: 100). + method (str): + The interpolation method to use (default: "linear"). """ # Define the grid for interpolation xi = np.linspace(min(x), max(x), grid_size) @@ -442,7 +453,7 @@ def plot_2d_interpolated_image( ) -def generate_samples(n, p, s, mean_shift=0, std_scale=1): +def generate_samples(n, p, mean_shift=0, std_scale=1): """ Generate samples of a binomial and a normal distribution. The normal distribution is generated to best approximate the binomial distribution. Further the normal @@ -453,18 +464,16 @@ def generate_samples(n, p, s, mean_shift=0, std_scale=1): The number of trials of the binomial distribution. p (float): The probability of success of the binomial distribution. - s (int): - The sample size for both distributions. mean_shift (float): The shift of the mean of the normal distribution. std_scale (float): The scaling of the standard deviation of the normal distribution. """ # Generate data samples - binomial_sample = np.random.binomial(n, p, s) + binomial_sample = RNG(seed=SEED).rng.binomial(n, p, S) mean = n * p std_dev = np.sqrt(n * p * (1 - p)) - normal_sample = np.random.normal(mean + mean_shift, std_dev * std_scale, s) + normal_sample = RNG(seed=SEED).rng.normal(mean + mean_shift, std_dev * std_scale, S) ### round and clip the normal sample normal_sample = np.round(normal_sample) @@ -493,7 +502,54 @@ def get_difference_of_samples(binomial_sample, normal_sample, n): return np.sum(np.abs(diff)) / (2 * len(binomial_sample)) -def difference_binomial_normal(mean_shift, std_scale, n, p, s): +def difference_binomial_normal_optimize(n, p): + print(f"Optimize for n={n}, p={p}") + ### save p and n to be availyble in optimization script + save_variables( + variable_list=[p, n], + name_list=["p", "n"], + path=OPTIMIZE_FOLDER, + ) + ### run optimization + args_list = [[f"{parallel_id}"] for parallel_id in range(N_RUNS)] + run_script_parallel( + script_path="test2_deap_opt_transform.py", + n_jobs=N_JOBS, + args_list=args_list, + ) + ### get best mean_shift and std_scale by loading all optimizations and check best + ### fitness + best_fitness = 1e6 + best_parallel_id = 0 + for parallel_id in range(N_RUNS): + loaded_variables = load_variables( + name_list=[ + f"error_opt_{parallel_id}", + ], + path=OPTIMIZE_FOLDER, + ) + error_opt = loaded_variables[f"error_opt_{parallel_id}"] + print(f"n={n}, p={p}, error_opt_{parallel_id}: {error_opt}") + if error_opt < best_fitness: + best_fitness = error_opt + best_parallel_id = parallel_id + + loaded_variables = load_variables( + name_list=[ + f"mean_shift_opt_{best_parallel_id}", + f"std_scale_opt_{best_parallel_id}", + f"error_opt_{best_parallel_id}", + ], + path=OPTIMIZE_FOLDER, + ) + mean_shift_opt = loaded_variables[f"mean_shift_opt_{best_parallel_id}"] + std_scale_opt = loaded_variables[f"std_scale_opt_{best_parallel_id}"] + error_opt = loaded_variables[f"error_opt_{best_parallel_id}"] + + return mean_shift_opt, std_scale_opt, error_opt + + +def difference_binomial_normal(mean_shift, std_scale, n, p): """ Calculate the difference between samples of a binomial and a normal distribution. The binomial distribution is generated with parameters n and p. @@ -509,12 +565,10 @@ def difference_binomial_normal(mean_shift, std_scale, n, p, s): The number of trials of the binomial distribution. p (float): The probability of success of the binomial distribution. - s (int): - The sample size for both distributions. """ # Generate data samples binomial_sample, normal_sample = generate_samples( - n=n, p=p, s=s, mean_shift=mean_shift, std_scale=std_scale + n=n, p=p, mean_shift=mean_shift, std_scale=std_scale ) # Calculate difference @@ -553,110 +607,200 @@ def logarithmic_arange(start, end, num_points): return points -### TODO I have the problem that for very small p the normal distribution is not a good -### approximation of the binomial distribution. -### I think one can shift the mean and scale the standard deviation depending on the p -### and n values. I will try to optimize the shift and scale for each n and p value. - -### global paramters -COMPARE_ORIGINAL = True -OPTIMIZE = True -REGRESS = True -PLOT_COMPARE_ORIGINAL = True -PLOT_OPTIMIZE = True -PLOT_REGRESS = True -COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" -PLOTS_FOLDER = "test2_plots" -S = 10000 +def plot_optimize(): + """ + Plot the difference between the binomial and normal distribution for various n and + p values. Further plots the optimized mean_shift and std_scale values. Load the data + from the OPTIMIZE_FOLDER and save the plot to the PLOTS_FOLDER. + """ + ### load the data + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "mean_shift_opt_list", + "std_scale_opt_list", + "diff_opt_list", + ], + path=OPTIMIZE_FOLDER, + ) + ### plot the data + plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 3)) + for idx, title, key in [ + (1, "Mean Shift optimized", "mean_shift_opt_list"), + (2, "Std Scale optimized", "std_scale_opt_list"), + (3, "Difference optimized", "diff_opt_list"), + ]: + plt.subplot(3, 1, idx) + plot_2d_interpolated_image( + x=loaded_variables["n_list"], + y=loaded_variables["p_list"], + z=loaded_variables[key], + vmin=0, + vmax=np.max(loaded_variables[key]), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title(f"{title}\n(max: {np.max(loaded_variables[key])})") + plt.tight_layout() + create_dir(PLOTS_FOLDER) + plt.savefig(f"{PLOTS_FOLDER}/difference_optimized.png", dpi=300) -if __name__ == "__main__": - ### TODO: restructure this thing +def plot_compare_original(): + """ + Plot the difference between the binomial and normal distribution for various n and + p values. Load the data from the COMPARE_ORIGINAL_FOLDER and save the plot to the + PLOTS_FOLDER. + """ + ### load the data + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "diff_list", + ], + path=COMPARE_ORIGINAL_FOLDER, + ) + ### plot the data + plt.figure(figsize=(6.4 * 2, 4.8 * 2)) + plt.subplot(1, 1, 1) + plot_2d_interpolated_image( + x=loaded_variables["n_list"], + y=loaded_variables["p_list"], + z=loaded_variables["diff_list"], + vmin=0, + vmax=np.max(loaded_variables["diff_list"]), + ) + plt.colorbar() + plt.xlabel("n") + plt.ylabel("p") + plt.title(f"Difference original\n(max: {np.max(loaded_variables['diff_list'])})") + plt.tight_layout() + create_dir(PLOTS_FOLDER) + plt.savefig(f"{PLOTS_FOLDER}/difference_original.png", dpi=300) - # 1st compare binomial and normal samples for various n and p values, save: p_list, n_list and diff_list - # 2nd optimize mean shift and std scale for each n and p value and get improved error, save: mean_shift_list, std_scale_list and error_improved_list - # 3rd make a 2D regression for the optimized mean shift and std scale, get mean_shift_regress(n, p) and std_scale_regress(n, p), save: the optimized parameters of the regression equations - # 4th plot: (1) error depending on n and p, (2) optimized mean shift and std scale depending on n and p and corresponding error improvement, (3) regressed mean shift and std scale depending on n and p and corresponding error improvement - if COMPARE_ORIGINAL: # TODO implement seed for rng - ### create the save folder - folder_name = create_data_raw_folder( +def compare_with_or_without_optimization(): + """ + Compare the difference between the binomial and normal distribution for various n and + p values with and without optimization. Save the data to the COMPARE_ORIGINAL_FOLDER + and OPTIMIZE_FOLDER. + """ + ### create the save folder(s) + if COMPARE_ORIGINAL: + create_data_raw_folder( COMPARE_ORIGINAL_FOLDER, - COMPARE_ORIGINAL=COMPARE_ORIGINAL, - OPTIMIZE=OPTIMIZE, - REGRESS=REGRESS, - PLOT_COMPARE_ORIGINAL=PLOT_COMPARE_ORIGINAL, - PLOT_OPTIMIZE=PLOT_OPTIMIZE, - PLOT_REGRESS=PLOT_REGRESS, - COMPARE_ORIGINAL_FOLDER=COMPARE_ORIGINAL_FOLDER, - PLOTS_FOLDER=PLOTS_FOLDER, - S=S, + ) + if OPTIMIZE: + create_data_raw_folder( + OPTIMIZE_FOLDER, ) - ### create the n/p pairs - n_arr = logarithmic_arange(10, 1000, 20).astype(int) - p_arr = logarithmic_arange(0.001, 0.1, 10) + ### create the n/p pairs + n_arr = logarithmic_arange(*N_VALUES).astype(int) + p_arr = logarithmic_arange(*P_VALUES) - ### get difference between binomial and normal distribution for each n/p pair - p_list = [] - n_list = [] - diff_list = [] - for p in p_arr: - for n in n_arr: + ### get difference between binomial and normal distribution for each n/p pair + p_list = [] + n_list = [] + diff_original_list = [] + mean_shift_opt_list = [] + std_scale_opt_list = [] + diff_opt_list = [] + for p in p_arr: + for n in n_arr: + p_list.append(p) + n_list.append(n) + if COMPARE_ORIGINAL: ### get the error without optimization - error = difference_binomial_normal( - mean_shift=0, std_scale=1, n=n, p=p, s=S + error = difference_binomial_normal(mean_shift=0, std_scale=1, n=n, p=p) + diff_original_list.append(error) + if OPTIMIZE: + ### get the error with optimization + mean_shift_opt, std_scale_opt, error_opt = ( + difference_binomial_normal_optimize(n=n, p=p) ) - diff_list.append(error) - - ### store the results - p_list.append(p) - n_list.append(n) + mean_shift_opt_list.append(mean_shift_opt) + std_scale_opt_list.append(std_scale_opt) + diff_opt_list.append(error_opt) - ### save variables + ### save variables + if COMPARE_ORIGINAL: save_variables( variable_list=[ p_list, n_list, - diff_list, + diff_original_list, ], name_list=[ "p_list", "n_list", "diff_list", ], - path=folder_name, + path=COMPARE_ORIGINAL_FOLDER, ) - - if PLOT_COMPARE_ORIGINAL: - ### load the data - loaded_variables = load_variables( + if OPTIMIZE: + save_variables( + variable_list=[ + p_list, + n_list, + mean_shift_opt_list, + std_scale_opt_list, + diff_opt_list, + ], name_list=[ "p_list", "n_list", - "diff_list", + "mean_shift_opt_list", + "std_scale_opt_list", + "diff_opt_list", ], - path=COMPARE_ORIGINAL_FOLDER, + path=OPTIMIZE_FOLDER, ) - ### plot the data - plt.figure(figsize=(6.4 * 2, 4.8 * 2)) - plt.subplot(1, 1, 1) - plot_2d_interpolated_image( - x=loaded_variables["n_list"], - y=loaded_variables["p_list"], - z=loaded_variables["diff_list"], - vmin=0, - vmax=np.max(loaded_variables["diff_list"]), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title( - f"Difference original\n(max: {np.max(loaded_variables['diff_list'])})" - ) - plt.tight_layout() - create_dir(PLOTS_FOLDER) - plt.savefig(f"{PLOTS_FOLDER}/difference_original.png", dpi=300) + + +### TODO I have the problem that for very small p the normal distribution is not a good +### approximation of the binomial distribution. +### I think one can shift the mean and scale the standard deviation depending on the p +### and n values. I will try to optimize the shift and scale for each n and p value. + +### global paramters +COMPARE_ORIGINAL = True +OPTIMIZE = True +REGRESS = True +PLOT_COMPARE_ORIGINAL = True +PLOT_OPTIMIZE = True +PLOT_REGRESS = True +COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" +OPTIMIZE_FOLDER = "test2_data_optimize" +PLOTS_FOLDER = "test2_plots" +S = 10000 +SEED = 1234 +N_VALUES = [10, 1000, 2] # 20] +P_VALUES = [0.001, 0.1, 2] # 10] +N_JOBS = 2 +N_RUNS = 100 * N_JOBS + + +if __name__ == "__main__": + + ### TODO: restructure this thing + + # 1st compare binomial and normal samples for various n and p values, save: p_list, n_list and diff_list + # 2nd optimize mean shift and std scale for each n and p value and get improved error, save: mean_shift_list, std_scale_list and error_improved_list + # 3rd make a 2D regression for the optimized mean shift and std scale, get mean_shift_regress(n, p) and std_scale_regress(n, p), save: the optimized parameters of the regression equations + # 4th plot: (1) error depending on n and p, (2) optimized mean shift and std scale depending on n and p and corresponding error improvement, (3) regressed mean shift and std scale depending on n and p and corresponding error improvement + + if COMPARE_ORIGINAL or OPTIMIZE: + compare_with_or_without_optimization() + + if PLOT_COMPARE_ORIGINAL: + plot_compare_original() + + if PLOT_OPTIMIZE: + plot_optimize() quit() @@ -733,7 +877,7 @@ def logarithmic_arange(start, end, num_points): save_variables( variable_list=[p, n], name_list=["p", "n"], - path=deap_opt_transform_path, + path=OPTIMIZE_FOLDER, ) ### run optimization n_jobs = 15 @@ -752,7 +896,7 @@ def logarithmic_arange(start, end, num_points): name_list=[ f"error_improved_{parallel_id}", ], - path=deap_opt_transform_path, + path=OPTIMIZE_FOLDER, ) error_improved = loaded_variables[f"error_improved_{parallel_id}"] @@ -765,7 +909,7 @@ def logarithmic_arange(start, end, num_points): f"std_scale_{best_parallel_id}", f"error_improved_{best_parallel_id}", ], - path=deap_opt_transform_path, + path=OPTIMIZE_FOLDER, ) mean_shift = loaded_variables[f"mean_shift_{best_parallel_id}"] std_scale = loaded_variables[f"std_scale_{best_parallel_id}"] diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py index 5f7d6ef..9ae7104 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py @@ -1,6 +1,6 @@ +from test2 import OPTIMIZE_FOLDER, difference_binomial_normal from CompNeuroPy import DeapCma, load_variables, save_variables import numpy as np -from test2 import deap_opt_transform_path, objective_function, M import sys # Load the variables @@ -9,7 +9,7 @@ "n", "p", ], - path=deap_opt_transform_path, + path=OPTIMIZE_FOLDER, ) n = variables["n"] p = variables["p"] @@ -19,8 +19,8 @@ def evaluate_function(population): loss_list = [] ### the population is a list of individuals which are lists of parameters for individual in population: - loss_of_individual = objective_function( - mean_shift=individual[0], std_scale=individual[1], n=n, p=p, m=M + loss_of_individual = difference_binomial_normal( + mean_shift=individual[0], std_scale=individual[1], n=n, p=p ) loss_list.append((loss_of_individual,)) return loss_list @@ -31,6 +31,9 @@ def evaluate_function(population): scale_std_bounds = [0.5, 2] lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) +p0 = np.random.default_rng().uniform( + low=lb + 0.25 * (ub - lb), high=ub - 0.25 * (ub - lb), size=2 +) ### create an instance of the DeapCma class deap_cma = DeapCma( @@ -39,23 +42,24 @@ def evaluate_function(population): evaluate_function=evaluate_function, param_names=["mean_shift", "std_scale"], hard_bounds=True, + display_progress_bar=False, ) ### run the optimization deap_cma_result = deap_cma.run(max_evals=1000) ### get the optimized parameters and best error -mean_shift = deap_cma_result["mean_shift"] -std_scale = deap_cma_result["std_scale"] -error_improved = deap_cma_result["best_fitness"] +mean_shift_opt = deap_cma_result["mean_shift"] +std_scale_opt = deap_cma_result["std_scale"] +error_opt = deap_cma_result["best_fitness"] # Save the variables save_variables( name_list=[ - f"mean_shift_{sys.argv[1]}", - f"std_scale_{sys.argv[1]}", - f"error_improved_{sys.argv[1]}", + f"mean_shift_opt_{sys.argv[1]}", + f"std_scale_opt_{sys.argv[1]}", + f"error_opt_{sys.argv[1]}", ], - variable_list=[mean_shift, std_scale, error_improved], - path=deap_opt_transform_path, + variable_list=[mean_shift_opt, std_scale_opt, error_opt], + path=OPTIMIZE_FOLDER, ) From a4f0401089993e5293f221686b5ec665f2dcf4b8 Mon Sep 17 00:00:00 2001 From: olimaol Date: Tue, 13 Aug 2024 11:32:07 +0200 Subject: [PATCH 15/21] replaced separate ANNarchy imports with a single silent ANNarchy import --- src/CompNeuroPy/__init__.py | 37 + src/CompNeuroPy/analysis_functions.py | 12 +- src/CompNeuroPy/dbs.py | 158 ++-- src/CompNeuroPy/experiment.py | 4 +- src/CompNeuroPy/extra_functions.py | 36 +- src/CompNeuroPy/full_models/bgm_22/bgm.py | 52 +- .../bgm_22/model_creation_functions.py | 886 +++++++++--------- .../full_models/hodgkin_huxley_single_pop.py | 16 +- src/CompNeuroPy/generate_model.py | 20 +- src/CompNeuroPy/generate_simulation.py | 6 +- src/CompNeuroPy/model_functions.py | 42 +- src/CompNeuroPy/monitors.py | 47 +- .../experimental_models/fit_Bogacz_nm.py | 6 +- .../experimental_models/fit_Corbit_nm.py | 28 +- .../experimental_models/fit_Hjorth_nm.py | 22 +- .../final_models/H_and_H_like_nm.py | 4 +- .../final_models/artificial_nm.py | 13 +- .../final_models/izhikevich_2003_like_nm.py | 16 +- .../final_models/izhikevich_2007_like_nm.py | 24 +- src/CompNeuroPy/opt_neuron.py | 22 +- src/CompNeuroPy/simulation_functions.py | 56 +- src/CompNeuroPy/simulation_requirements.py | 4 +- .../synapse_models/synapse_models.py | 4 +- 23 files changed, 774 insertions(+), 741 deletions(-) diff --git a/src/CompNeuroPy/__init__.py b/src/CompNeuroPy/__init__.py index d704cac..dbf4dfc 100644 --- a/src/CompNeuroPy/__init__.py +++ b/src/CompNeuroPy/__init__.py @@ -1,3 +1,40 @@ +### ANNarchy +import os +import sys +from contextlib import contextmanager + + +@contextmanager +def suppress_stdout(): + """ + Suppresses the print output of a function + + Example: + ```python + with suppress_stdout(): + print("this will not be printed") + ``` + """ + with open(os.devnull, "w") as devnull: + old_stdout = sys.stdout + sys.stdout = devnull + try: + yield + finally: + sys.stdout = old_stdout + + +with suppress_stdout(): + import ANNarchy as ann + from ANNarchy.core import ConnectorMethods as ann_ConnectorMethods + + if ann.__version__ >= "4.8": + from ANNarchy.intern.NetworkManager import NetworkManager as ann_NetworkManager + else: + from ANNarchy.core import Global as ann_Global + + from ANNarchy.core import Random as ann_Random + ### functions from CompNeuroPy.analysis_functions import ( my_raster_plot, diff --git a/src/CompNeuroPy/analysis_functions.py b/src/CompNeuroPy/analysis_functions.py index 9741955..9853578 100644 --- a/src/CompNeuroPy/analysis_functions.py +++ b/src/CompNeuroPy/analysis_functions.py @@ -1,7 +1,7 @@ import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator -from ANNarchy import raster_plot, dt, inter_spike_interval, coefficient_of_variation +from CompNeuroPy import ann import warnings from CompNeuroPy import system_functions as sf from CompNeuroPy import extra_functions as ef @@ -27,9 +27,9 @@ def my_raster_plot(spikes: dict): n (array): ranks of the neurons """ - t, n = raster_plot(spikes) + t, n = ann.raster_plot(spikes) np.zeros(10) - t = np.round(t / dt(), 0).astype(int) + t = np.round(t / ann.dt(), 0).astype(int) return t, n @@ -357,7 +357,7 @@ def _get_pop_rate_old(spikes, duration, dt=1, t_start=0, t_smooth_ms=-1): duration_init = duration temp_duration = duration + t_start - t, n = raster_plot(spikes) + t, n = ann.raster_plot(spikes) if len(t) > 1: # check if there are spikes in population at all if t_smooth_ms == -1: ISIs = [] @@ -1562,7 +1562,7 @@ def _interspike_interval_plot(self, compartment, data): ### set title plt.title(f"Interspike interval histogram {compartment} ({len(data)})") ### get interspike intervals - interspike_intervals_list = inter_spike_interval(spikes=data) + interspike_intervals_list = ann.inter_spike_interval(spikes=data) ### plot histogram plt.hist( interspike_intervals_list, @@ -1590,7 +1590,7 @@ def _coefficient_of_variation_plot(self, compartment, data): ### set title plt.title(f"Coefficient of variation histogram {compartment} ({len(data)})") ### get coefficient of variation - coefficient_of_variation_dict = coefficient_of_variation( + coefficient_of_variation_dict = ann.coefficient_of_variation( spikes=data, per_neuron=True, ) diff --git a/src/CompNeuroPy/dbs.py b/src/CompNeuroPy/dbs.py index 0aed84a..0252b40 100644 --- a/src/CompNeuroPy/dbs.py +++ b/src/CompNeuroPy/dbs.py @@ -1,17 +1,4 @@ -from ANNarchy import ( - Neuron, - Population, - dt, - add_function, - Projection, - get_population, - Constant, - Synapse, - projections, - populations, - get_projection, -) -from ANNarchy.core import ConnectorMethods +from CompNeuroPy import ann, ann_ConnectorMethods import numpy as np from CompNeuroPy import model_functions as mf from CompNeuroPy.generate_model import CompNeuroModel @@ -19,18 +6,18 @@ import inspect _connector_methods_dict = { - "One-to-One": ConnectorMethods.connect_one_to_one, - "All-to-All": ConnectorMethods.connect_all_to_all, - "Gaussian": ConnectorMethods.connect_gaussian, - "Difference-of-Gaussian": ConnectorMethods.connect_dog, - "Random": ConnectorMethods.connect_fixed_probability, - "Random Convergent": ConnectorMethods.connect_fixed_number_pre, - "Random Divergent": ConnectorMethods.connect_fixed_number_post, - "User-defined": ConnectorMethods.connect_with_func, - "MatrixMarket": ConnectorMethods.connect_from_matrix_market, - "Connectivity matrix": ConnectorMethods.connect_from_matrix, - "Sparse connectivity matrix": ConnectorMethods.connect_from_sparse, - "From File": ConnectorMethods.connect_from_file, + "One-to-One": ann_ConnectorMethods.connect_one_to_one, + "All-to-All": ann_ConnectorMethods.connect_all_to_all, + "Gaussian": ann_ConnectorMethods.connect_gaussian, + "Difference-of-Gaussian": ann_ConnectorMethods.connect_dog, + "Random": ann_ConnectorMethods.connect_fixed_probability, + "Random Convergent": ann_ConnectorMethods.connect_fixed_number_pre, + "Random Divergent": ann_ConnectorMethods.connect_fixed_number_post, + "User-defined": ann_ConnectorMethods.connect_with_func, + "MatrixMarket": ann_ConnectorMethods.connect_from_matrix_market, + "Connectivity matrix": ann_ConnectorMethods.connect_from_matrix, + "Sparse connectivity matrix": ann_ConnectorMethods.connect_from_sparse, + "From File": ann_ConnectorMethods.connect_from_file, } @@ -54,10 +41,10 @@ class _CreateDBSmodel: def __init__( self, - stimulated_population: Population, - excluded_populations_list: list[Population], - passing_fibres_list: list[Projection], - axon_rate_amp: float | dict[Population | str, float], + stimulated_population: ann.Population, + excluded_populations_list: list[ann.Population], + passing_fibres_list: list[ann.Projection], + axon_rate_amp: float | dict[ann.Population | str, float], ) -> None: """ Prepare model for DBS stimulation @@ -89,21 +76,22 @@ def __init__( self.recreate_model() ### get variables containing Populations and Projections - self.stimulated_population: Population = get_population( + self.stimulated_population: ann.Population = ann.get_population( self.stimulated_population_name ) - self.excluded_populations_list: list[Population] = [ - get_population(pop_name) for pop_name in self.excluded_populations_name_list + self.excluded_populations_list: list[ann.Population] = [ + ann.get_population(pop_name) + for pop_name in self.excluded_populations_name_list ] - self.passing_fibres_list: list[Projection] = [ - get_projection(proj_name) for proj_name in self.passing_fibres_name_list + self.passing_fibres_list: list[ann.Projection] = [ + ann.get_projection(proj_name) for proj_name in self.passing_fibres_name_list ] if isinstance(self.axon_rate_amp, type(None)): ### self.axon_rate_amp is None --> use the axon_rate_amp_pop_name_list and axon_rate_amp_value_list to create the dict - self.axon_rate_amp: dict[Population | str, float] = { + self.axon_rate_amp: dict[ann.Population | str, float] = { ### key is either a Populaiton or the string "default" ( - get_population(pop_name[4:]) + ann.get_population(pop_name[4:]) if pop_name.startswith("pop;") else pop_name ): axon_rate_amp_val @@ -114,10 +102,10 @@ def __init__( def analyze_model( self, - stimulated_population: Population, - excluded_populations_list: list[Population], - passing_fibres_list: list[Projection], - axon_rate_amp: float | dict[Population | str, float], + stimulated_population: ann.Population, + excluded_populations_list: list[ann.Population], + passing_fibres_list: list[ann.Projection], + axon_rate_amp: float | dict[ann.Population | str, float], ): """ Analyze the model to be able to recreate it. @@ -152,7 +140,7 @@ def analyze_model( self.axon_rate_amp_pop_name_list = [ ( f"pop;{axon_rate_amp_key.name}" - if isinstance(axon_rate_amp_key, Population) + if isinstance(axon_rate_amp_key, ann.Population) else axon_rate_amp_key ) for axon_rate_amp_key in axon_rate_amp.keys() @@ -192,8 +180,8 @@ def get_all_population_and_projection_names(self): projection_name_list (list): List of all projection names """ - population_name_list: list[str] = [pop.name for pop in populations()] - projection_name_list: list[str] = [proj.name for proj in projections()] + population_name_list: list[str] = [pop.name for pop in ann.populations()] + projection_name_list: list[str] = [proj.name for proj in ann.projections()] return population_name_list, projection_name_list @@ -210,18 +198,18 @@ def analyze_populations(self): ### for loop over all populations for pop_name in self.population_name_list: - pop: Population = get_population(pop_name) + pop: ann.Population = ann.get_population(pop_name) ### get the neuron model attributes (parameters/variables) neuron_model_attr_dict[pop.name] = pop.init ### get a dict of all paramters of the __init__ function of the Neuron - init_params = inspect.signature(Neuron.__init__).parameters + init_params = inspect.signature(ann.Neuron.__init__).parameters neuron_model_init_parameter_dict[pop.name] = { param: getattr(pop.neuron_type, param) for param in init_params if param != "self" } ### get a dict of all paramters of the __init__ function of the Population - init_params = inspect.signature(Population.__init__).parameters + init_params = inspect.signature(ann.Population.__init__).parameters pop_init_parameter_dict[pop.name] = { param: getattr(pop, param) for param in init_params @@ -253,18 +241,18 @@ def analyze_projections(self): ### loop over all projections for proj_name in self.projection_name_list: - proj: Projection = get_projection(proj_name) + proj: ann.Projection = ann.get_projection(proj_name) ### get the synapse model attributes (parameters/variables) synapse_model_attr_dict[proj.name] = proj.init ### get a dict of all paramters of the __init__ function of the Synapse - init_params = inspect.signature(Synapse.__init__).parameters + init_params = inspect.signature(ann.Synapse.__init__).parameters synapse_init_parameter_dict[proj.name] = { param: getattr(proj.synapse_type, param) for param in init_params if param != "self" } ### get a dict of all paramters of the __init__ function of the Projection - init_params = inspect.signature(Projection.__init__).parameters + init_params = inspect.signature(ann.Projection.__init__).parameters proj_init_parameter_dict[proj_name] = { param: getattr(proj, param) for param in init_params @@ -304,7 +292,7 @@ def analyze_projections(self): pre_post_pop_name_dict, ) - def get_connector_parameters(self, proj: Projection): + def get_connector_parameters(self, proj: ann.Projection): """ Get the parameters of the given connector function. @@ -430,7 +418,7 @@ def recreate_population(self, pop_name): neuron_model_init_parameter_dict ) ### create the new neuron model - neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + neuron_model_new = ann.Neuron(**neuron_model_init_parameter_dict) ### 2nd recreate the population ### get the stored parameters of the __init__ function of the Population @@ -438,7 +426,7 @@ def recreate_population(self, pop_name): ### replace the neuron model with the new neuron model pop_init_parameter_dict["neuron"] = neuron_model_new ### create the new population - pop_new = Population(**pop_init_parameter_dict) + pop_new = ann.Population(**pop_init_parameter_dict) ### 3rd set the parameters and variables of the population's neurons ### get the stored parameters and variables @@ -707,20 +695,20 @@ def recreate_projection(self, proj_name): synapse_init_parameter_dict, ) ### create the new synapse model - synapse_new = Synapse(**synapse_init_parameter_dict) + synapse_new = ann.Synapse(**synapse_init_parameter_dict) ### 2nd recreate projection ### replace the synapse model with the new synapse model proj_init_parameter_dict["synapse"] = synapse_new ### replace pre and post to new populations - proj_init_parameter_dict["pre"] = get_population( + proj_init_parameter_dict["pre"] = ann.get_population( self.pre_post_pop_name_dict[proj_name][0] ) - proj_init_parameter_dict["post"] = get_population( + proj_init_parameter_dict["post"] = ann.get_population( self.pre_post_pop_name_dict[proj_name][1] ) ### create the new projection - proj_new = Projection(**proj_init_parameter_dict) + proj_new = ann.Projection(**proj_init_parameter_dict) ### 3rd connect the projection with the connector function ### get the connector function @@ -884,10 +872,10 @@ class _CreateDBSmodelcnp(_CreateDBSmodel): def __init__( self, model: CompNeuroModel, - stimulated_population: Population, - excluded_populations_list: list[Population], - passing_fibres_list: list[Projection], - axon_rate_amp: float | dict[Population | str, float], + stimulated_population: ann.Population, + excluded_populations_list: list[ann.Population], + passing_fibres_list: list[ann.Projection], + axon_rate_amp: float | dict[ann.Population | str, float], ) -> None: """ Prepare model for DBS stimulation. @@ -998,22 +986,22 @@ class DBSstimulator: @check_types() def __init__( self, - stimulated_population: Population, + stimulated_population: ann.Population, population_proportion: float = 1.0, - excluded_populations_list: list[Population] = [], + excluded_populations_list: list[ann.Population] = [], dbs_depolarization: float = 0.0, orthodromic: bool = False, antidromic: bool = False, efferents: bool = False, afferents: bool = False, passing_fibres: bool = False, - passing_fibres_list: list[Projection] = [], + passing_fibres_list: list[ann.Projection] = [], passing_fibres_strength: float | list[float] = 1.0, sum_branches: bool = True, dbs_pulse_frequency_Hz: float = 130.0, dbs_pulse_width_us: float = 300.0, axon_spikes_per_pulse: float = 1.0, - axon_rate_amp: float | dict[Population | str, float] = 1.0, + axon_rate_amp: float | dict[ann.Population | str, float] = 1.0, seed: int | None = None, auto_implement: bool = False, model: CompNeuroModel | None = None, @@ -1206,13 +1194,13 @@ def _set_constants(self, dbs_pulse_frequency_Hz: float): Frequency of the DBS pulse in Hz """ # pulse frequency: - Constant("dbs_pulse_frequency_Hz", dbs_pulse_frequency_Hz) + ann.Constant("dbs_pulse_frequency_Hz", dbs_pulse_frequency_Hz) # pulse width: # Neumant et al.. 2023: 60us but Meier et al. 2022: 300us... 60us = 0.06ms is very small for ANNarchy simulations - Constant("dbs_pulse_width_us", self.dbs_pulse_width_us) + ann.Constant("dbs_pulse_width_us", self.dbs_pulse_width_us) ### add global function for DBS pulse - add_function( + ann.add_function( "pulse(time_ms) = ite(modulo(time_ms*1000, 1000000./dbs_pulse_frequency_Hz) < dbs_pulse_width_us, 1., 0.)" ) @@ -1230,7 +1218,7 @@ def _axon_spikes_per_pulse_to_prob(self, axon_spikes_per_pulse: float): Probability of axon spikes per timestep during DBS pulse """ return np.clip( - (axon_spikes_per_pulse * 1000 * dt() / self.dbs_pulse_width_us), 0, 1 + (axon_spikes_per_pulse * 1000 * ann.dt() / self.dbs_pulse_width_us), 0, 1 ) def _set_depolarization(self, dbs_depolarization: float | None = None): @@ -1247,7 +1235,7 @@ def _set_depolarization(self, dbs_depolarization: float | None = None): dbs_depolarization = self.dbs_depolarization ### set depolarization of population - for pop in populations(): + for pop in ann.populations(): if pop == self.stimulated_population: pop.dbs_depolarization = dbs_depolarization else: @@ -1263,7 +1251,7 @@ def _set_axon_spikes( passing_fibres_strength: float | list[float] | None = None, sum_branches: bool | None = None, axon_spikes_per_pulse: float | None = None, - axon_rate_amp: float | dict[Population | str, float] | None = None, + axon_rate_amp: float | dict[ann.Population | str, float] | None = None, ): """ Set axon spikes forwarding orthodromic @@ -1386,7 +1374,7 @@ def _deactivate_axon_DBS(self): """ Deactivate axon spikes forwarding for both orthodromic and antidromic. """ - for pop in populations(): + for pop in ann.populations(): ### deactivate axon spike genearation for all populations pop.prob_axon_spike = 0 pop.axon_rate_amp = 0 @@ -1395,7 +1383,7 @@ def _deactivate_axon_DBS(self): pop.antidromic_prob = 0 ### deactivate orthodromic transmission for all projections - for proj in projections(): + for proj in ann.projections(): proj.axon_transmission = 0 proj.p_axon_spike_trans = 0 @@ -1406,7 +1394,7 @@ def _set_orthodromic( passing_fibres: bool, passing_fibres_strength: list[float], axon_spikes_per_pulse: float, - axon_rate_amp: dict[Population | str, float], + axon_rate_amp: dict[ann.Population | str, float], ): """ Set orthodromic axon spikes forwarding. @@ -1437,7 +1425,7 @@ def _set_orthodromic( """ if efferents: ### activate all efferent projections - projection_list = projections(pre=self.stimulated_population) + projection_list = ann.projections(pre=self.stimulated_population) for proj in projection_list: ### skip excluded populations if proj.post in self.excluded_populations_list: @@ -1459,7 +1447,7 @@ def _set_orthodromic( if afferents: ### activate all afferent projections - projection_list = projections(post=self.stimulated_population) + projection_list = ann.projections(post=self.stimulated_population) for proj in projection_list: ### skip excluded populations if proj.pre in self.excluded_populations_list: @@ -1542,7 +1530,7 @@ def _set_antidromic( if afferents: ### activate all afferent projections, i.e. all presynaptic populations of stimulated population ### get presynaptic projections - projection_list = projections(post=self.stimulated_population) + projection_list = ann.projections(post=self.stimulated_population) ### get presynaptic populations from projections presyn_pop_list = [] presyn_pop_name_list = [] @@ -1632,7 +1620,7 @@ def on( passing_fibres_strength: float | list[float] | None = None, sum_branches: bool | None = None, axon_spikes_per_pulse: float | None = None, - axon_rate_amp: float | dict[Population | str, float] | None = None, + axon_rate_amp: float | dict[ann.Population | str, float] | None = None, seed: int | None = None, ): """ @@ -1744,7 +1732,7 @@ def _set_dbs_on(self, population_proportion: float | None, seed: int | None): dbs_on_array = self._create_dbs_on_array(population_proportion, seed) ### set DBS on for all populations - for pop in populations(): + for pop in ann.populations(): ### of the stimulated population only the specified proportion is affected by DBS if pop == self.stimulated_population: pop.dbs_on = dbs_on_array @@ -1756,7 +1744,7 @@ def off(self): Deactivate DBS. """ ### set DBS off for all populations - for pop in populations(): + for pop in ann.populations(): pop.dbs_on = 0 pop.prob_axon_spike = 0 pop.axon_rate_amp = 0 @@ -1777,13 +1765,13 @@ def update_pointers(self, pointer_list): List of pointers to populations and projections of the new model """ ### update pointers - pointer_list_new: list[Population | Projection] = [] + pointer_list_new: list[ann.Population | ann.Projection] = [] for pointer in pointer_list: compartment_name = pointer.name - if isinstance(pointer, Population): - pointer_list_new.append(get_population(compartment_name)) - elif isinstance(pointer, Projection): - pointer_list_new.append(get_projection(compartment_name)) + if isinstance(pointer, ann.Population): + pointer_list_new.append(ann.get_population(compartment_name)) + elif isinstance(pointer, ann.Projection): + pointer_list_new.append(ann.get_projection(compartment_name)) else: raise TypeError( f"Pointer {pointer} is neither a Population nor a Projection" diff --git a/src/CompNeuroPy/experiment.py b/src/CompNeuroPy/experiment.py index 5d8df91..ee1b6e3 100644 --- a/src/CompNeuroPy/experiment.py +++ b/src/CompNeuroPy/experiment.py @@ -1,4 +1,4 @@ -from ANNarchy import reset +from CompNeuroPy import ann from CompNeuroPy.monitors import RecordingTimes from CompNeuroPy.monitors import CompNeuroMonitors from CompNeuroPy import model_functions as mf @@ -150,7 +150,7 @@ def reset( parameters_dict = mf._get_all_parameters() ### there are no monitors, but model should be resetted, therefore use ### ANNarchy's reset function - reset(**reset_kwargs) + ann.reset(**reset_kwargs) if parameters is False: ### if parameters=False, set parameters after reset mf._set_all_parameters(parameters_dict) diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 4526d69..89e9c84 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -19,7 +19,7 @@ from deap import creator from deap import tools from deap import cma -from ANNarchy import Neuron, Population, simulate, setup, get_population +from CompNeuroPy import ann from sympy import symbols, Symbol, solve, sympify, Eq, lambdify, factor from scipy.interpolate import griddata from scipy.optimize import brentq @@ -1158,7 +1158,7 @@ class VClampParamSearch: @check_types() def __init__( self, - neuron_model: Neuron, + neuron_model: ann.Neuron, equations: str = """ C*dv/dt = k*(v - v_r)*(v - v_t) - u + I du/dt = a*(b*(v - v_r) - u) @@ -1380,7 +1380,7 @@ def _create_neuron_model_with_tuned_parameters(self): ) ### also add the external current variable parameters = parameters + "\n" + f"{self.external_current_var} = 0" - neuron_mondel = Neuron( + neuron_mondel = ann.Neuron( parameters=parameters, equations=self.equations + "\nr=0", ) @@ -1700,14 +1700,14 @@ def _simulations(self): ### simulate both models at the same time ### for pop_normal nothing happens (resting state) ### for pop_clamp the voltage is set to v_0 and then to v_step for each neuron - get_population("pop_clamp").v = self._v_0_arr - simulate(duration) - get_population("pop_clamp").v = self._v_step_arr - simulate(self._timestep) - I_clamp_inst_arr = get_population("pop_clamp").I_clamp - simulate(duration - self._timestep) - I_clamp_hold_arr = get_population("pop_clamp").I_clamp - v_rest = get_population("pop_normal").v[0] + ann.get_population("pop_clamp").v = self._v_0_arr + ann.simulate(duration) + ann.get_population("pop_clamp").v = self._v_step_arr + ann.simulate(self._timestep) + I_clamp_inst_arr = ann.get_population("pop_clamp").I_clamp + ann.simulate(duration - self._timestep) + I_clamp_hold_arr = ann.get_population("pop_clamp").I_clamp + v_rest = ann.get_population("pop_normal").v[0] ### get unique values of v_step and their indices v_step_unique, v_step_unique_idx = np.unique( @@ -1859,10 +1859,10 @@ def _create_model(self): model containing the population with the voltage clamped neuron model """ ### setup ANNarchy - setup(dt=self._timestep, seed=1234) + ann.setup(dt=self._timestep, seed=1234) ### create a population with the normal neuron model model_normal = CompNeuroModel( - model_creation_function=lambda: Population( + model_creation_function=lambda: ann.Population( 1, self._neuron_model, name="pop_normal" ), name="model_normal", @@ -1870,7 +1870,7 @@ def _create_model(self): ) ### create a population with the voltage clamped neuron model model_clamp = CompNeuroModel( - model_creation_function=lambda: Population( + model_creation_function=lambda: ann.Population( len(self._v_0_arr), self._neuron_model_clamp, name="pop_clamp" ), name="model_clamp", @@ -1879,7 +1879,7 @@ def _create_model(self): return model_normal, model_clamp - def _get_neuron_model_attributes(self, neuron_model: Neuron): + def _get_neuron_model_attributes(self, neuron_model: ann.Neuron): """ Get a list of the attributes (parameters and variables) of the given neuron model. @@ -1896,7 +1896,7 @@ def _get_neuron_model_attributes(self, neuron_model: Neuron): attributes.append(var["name"]) return attributes - def _get_neuron_model_arguments(self, neuron_model: Neuron): + def _get_neuron_model_arguments(self, neuron_model: ann.Neuron): """ Get a dictionary of the initial arguments of the given neuron model. @@ -1909,7 +1909,7 @@ def _get_neuron_model_arguments(self, neuron_model: Neuron): dictionary of the initial arguments of the given neuron model """ ### get the names of the arguments of a Neuron class - init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) + init_arguments_name_list = list(ann.Neuron.__init__.__code__.co_varnames) init_arguments_name_list.remove("self") init_arguments_name_list.remove("name") init_arguments_name_list.remove("description") @@ -1942,7 +1942,7 @@ def _get_neuron_model_clamp(self): init_arguments_dict["equations"] = "\n".join(equations_line_split_list) ### create neuron model with new equations - neuron_model_clamp = Neuron(**init_arguments_dict) + neuron_model_clamp = ann.Neuron(**init_arguments_dict) if self.verbose: print(f"Neuron model with voltage clamp equations:\n{neuron_model_clamp}") diff --git a/src/CompNeuroPy/full_models/bgm_22/bgm.py b/src/CompNeuroPy/full_models/bgm_22/bgm.py index d798ad1..6e907ae 100644 --- a/src/CompNeuroPy/full_models/bgm_22/bgm.py +++ b/src/CompNeuroPy/full_models/bgm_22/bgm.py @@ -1,14 +1,6 @@ import numpy as np -from ANNarchy import get_population, get_projection -from ANNarchy.core.Random import ( - Uniform, - DiscreteUniform, - Normal, - LogNormal, - Exponential, - Gamma, -) -from CompNeuroPy import CompNeuroModel +from CompNeuroPy import ann, ann_Random +from CompNeuroPy.generate_model import CompNeuroModel import csv import os import importlib @@ -146,12 +138,12 @@ def _add_name_appendix(self): populations_new = [] for pop_name in self.populations: populations_new.append(pop_name + self._name_appendix_to_add) - get_population(pop_name).name = pop_name + self._name_appendix_to_add + ann.get_population(pop_name).name = pop_name + self._name_appendix_to_add self.populations = populations_new projections_new = [] for proj_name in self.projections: projections_new.append(proj_name + self._name_appendix_to_add) - get_projection(proj_name).name = proj_name + self._name_appendix_to_add + ann.get_projection(proj_name).name = proj_name + self._name_appendix_to_add self.projections = projections_new ### rename parameter keys except general params_new = {} @@ -229,7 +221,7 @@ def _set_params(self): ### if param_object is a pop in network if param_object in self.populations: ### and the param_name is an attribute of the pop --> set param of pop - if param_name in vars(get_population(param_object))["attributes"]: + if param_name in vars(ann.get_population(param_object))["attributes"]: ### if parameter values are given as distribution --> get numpy array if isinstance(param_val, str): if ( @@ -240,9 +232,21 @@ def _set_params(self): or "Exponential" in param_val or "Gamma" in param_val ): - distribution = eval(param_val) + param_val_tmp = param_val + for distribution in [ + "Uniform", + "DiscreteUniform", + "Normal", + "LogNormal", + "Exponential", + "Gamma", + ]: + param_val_tmp = param_val_tmp.replace( + distribution, "ann_Random." + distribution + ) + distribution = eval(param_val_tmp) param_val = distribution.get_values( - shape=get_population(param_object).geometry + shape=ann.get_population(param_object).geometry ) self.set_param( compartment=param_object, @@ -287,7 +291,7 @@ def _set_noise_values(self): compartment=param_object, parameter_name="rates_noise", parameter_value=self._rng.normal( - mean, sd, get_population(param_object).size + mean, sd, ann.get_population(param_object).size ), ) else: @@ -296,7 +300,7 @@ def _set_noise_values(self): parameter_name="rates_noise", parameter_value=mean, ) - elif param_name in vars(get_population(param_object))["attributes"]: + elif param_name in vars(ann.get_population(param_object))["attributes"]: ### noise parameters which are actual attributes of the pop are simply set self.set_param( compartment=param_object, @@ -395,7 +399,7 @@ def _set_connections(self): if ( param_object in self.projections and not (param_name in already_set_params[param_object]) - and param_name in vars(get_projection(param_object))["attributes"] + and param_name in vars(ann.get_projection(param_object))["attributes"] ): self.set_param( compartment=param_object, @@ -468,12 +472,12 @@ def _get_params(self, name): def _needed_imports(self): for import_val in [ - Uniform, - DiscreteUniform, - Normal, - LogNormal, - Exponential, - Gamma, + ann_Random.Uniform, + ann_Random.DiscreteUniform, + ann_Random.Normal, + ann_Random.LogNormal, + ann_Random.Exponential, + ann_Random.Gamma, importlib, ]: print(import_val) diff --git a/src/CompNeuroPy/full_models/bgm_22/model_creation_functions.py b/src/CompNeuroPy/full_models/bgm_22/model_creation_functions.py index 500575d..f59b4b9 100644 --- a/src/CompNeuroPy/full_models/bgm_22/model_creation_functions.py +++ b/src/CompNeuroPy/full_models/bgm_22/model_creation_functions.py @@ -1,4 +1,4 @@ -from ANNarchy import Population, Projection +from CompNeuroPy import ann from CompNeuroPy.neuron_models import ( poisson_neuron_up_down, Izhikevich2007_noisy_AMPA, @@ -24,50 +24,52 @@ def BGM_v01(self): """ ####### POPULATIONS ###### ### cortex / input populations - cor_go = Population( + cor_go = ann.Population( self.params["cor_go.size"], poisson_neuron_up_down, name="cor_go" ) - cor_pause = Population( + cor_pause = ann.Population( self.params["cor_pause.size"], poisson_neuron_up_down, name="cor_pause" ) - cor_stop = Population( + cor_stop = ann.Population( self.params["cor_stop.size"], poisson_neuron_up_down, name="cor_stop" ) ### Str Populations - str_d1 = Population( + str_d1 = ann.Population( self.params["str_d1.size"], Izhikevich2007_noisy_AMPA, name="str_d1" ) - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA, name="str_d2" ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_fsi_noisy_AMPA, name="str_fsi" ) ### BG Populations - stn = Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") - snr = Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") - gpe_proto = Population( + stn = ann.Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") + snr = ann.Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_proto", ) - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_arky", ) - gpe_cp = Population( + gpe_cp = ann.Population( self.params["gpe_cp.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_cp" ) - thal = Population(self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal") + thal = ann.Population( + self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal" + ) ### integrator Neurons - integrator_go = Population( + integrator_go = ann.Population( self.params["integrator_go.size"], integrator_neuron, stop_condition="decision>=0 : any", name="integrator_go", ) - integrator_stop = Population( + integrator_stop = ann.Population( self.params["integrator_stop.size"], integrator_neuron, stop_condition="decision>=0 : any", @@ -76,28 +78,28 @@ def BGM_v01(self): ###### PROJECTIONS ###### ### cortex go output - cor_go__str_d1 = Projection( + cor_go__str_d1 = ann.Projection( pre=cor_go, post=str_d1, target="ampa", synapse=factor_synapse, name="cor_go__str_d1", ) - cor_go__str_d2 = Projection( + cor_go__str_d2 = ann.Projection( pre=cor_go, post=str_d2, target="ampa", synapse=factor_synapse, name="cor_go__str_d2", ) - cor_go__str_fsi = Projection( + cor_go__str_fsi = ann.Projection( pre=cor_go, post=str_fsi, target="ampa", synapse=factor_synapse, name="cor_go__str_fsi", ) - cor_go__thal = Projection( + cor_go__thal = ann.Projection( pre=cor_go, post=thal, target="ampa", @@ -105,14 +107,14 @@ def BGM_v01(self): name="cor_go__thal", ) ### cortex stop output - cor_stop__gpe_arky = Projection( + cor_stop__gpe_arky = ann.Projection( pre=cor_stop, post=gpe_arky, target="ampa", synapse=factor_synapse, name="cor_stop__gpe_arky", ) - cor_stop__gpe_cp = Projection( + cor_stop__gpe_cp = ann.Projection( pre=cor_stop, post=gpe_cp, target="ampa", @@ -120,7 +122,7 @@ def BGM_v01(self): name="cor_stop__gpe_cp", ) ### cortex pause output - cor_pause__stn = Projection( + cor_pause__stn = ann.Projection( pre=cor_pause, post=stn, target="ampa", @@ -128,24 +130,24 @@ def BGM_v01(self): name="cor_pause__stn", ) ### str d1 output - str_d1__snr = Projection( + str_d1__snr = ann.Projection( pre=str_d1, post=snr, target="gaba", synapse=factor_synapse, name="str_d1__snr" ) - str_d1__gpe_cp = Projection( + str_d1__gpe_cp = ann.Projection( pre=str_d1, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d1__gpe_cp", ) - str_d1__str_d1 = Projection( + str_d1__str_d1 = ann.Projection( pre=str_d1, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d1__str_d1", ) - str_d1__str_d2 = Projection( + str_d1__str_d2 = ann.Projection( pre=str_d1, post=str_d2, target="gaba", @@ -153,35 +155,35 @@ def BGM_v01(self): name="str_d1__str_d2", ) ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse, name="str_d2__gpe_proto", ) - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse, name="str_d2__gpe_arky", ) - str_d2__gpe_cp = Projection( + str_d2__gpe_cp = ann.Projection( pre=str_d2, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d2__gpe_cp", ) - str_d2__str_d1 = Projection( + str_d2__str_d1 = ann.Projection( pre=str_d2, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d2__str_d1", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -189,21 +191,21 @@ def BGM_v01(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d1 = Projection( + str_fsi__str_d1 = ann.Projection( pre=str_fsi, post=str_d1, target="gaba", synapse=factor_synapse, name="str_fsi__str_d1", ) - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -211,56 +213,56 @@ def BGM_v01(self): name="str_fsi__str_fsi", ) ### stn output - stn__snr = Projection( + stn__snr = ann.Projection( pre=stn, post=snr, target="ampa", synapse=factor_synapse, name="stn__snr" ) - stn__gpe_proto = Projection( + stn__gpe_proto = ann.Projection( pre=stn, post=gpe_proto, target="ampa", synapse=factor_synapse, name="stn__gpe_proto", ) - stn__gpe_arky = Projection( + stn__gpe_arky = ann.Projection( pre=stn, post=gpe_arky, target="ampa", synapse=factor_synapse, name="stn__gpe_arky", ) - stn__gpe_cp = Projection( + stn__gpe_cp = ann.Projection( pre=stn, post=gpe_cp, target="ampa", synapse=factor_synapse, name="stn__gpe_cp" ) ### gpe proto output - gpe_proto__stn = Projection( + gpe_proto__stn = ann.Projection( pre=gpe_proto, post=stn, target="gaba", synapse=factor_synapse, name="gpe_proto__stn", ) - gpe_proto__snr = Projection( + gpe_proto__snr = ann.Projection( pre=gpe_proto, post=snr, target="gaba", synapse=factor_synapse, name="gpe_proto__snr", ) - gpe_proto__gpe_arky = Projection( + gpe_proto__gpe_arky = ann.Projection( pre=gpe_proto, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_arky", ) - gpe_proto__gpe_cp = Projection( + gpe_proto__gpe_cp = ann.Projection( pre=gpe_proto, post=gpe_cp, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_cp", ) - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", @@ -268,35 +270,35 @@ def BGM_v01(self): name="gpe_proto__str_fsi", ) ### gpe arky output - gpe_arky__str_d1 = Projection( + gpe_arky__str_d1 = ann.Projection( pre=gpe_arky, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d1", ) - gpe_arky__str_d2 = Projection( + gpe_arky__str_d2 = ann.Projection( pre=gpe_arky, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d2", ) - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_proto = Projection( + gpe_arky__gpe_proto = ann.Projection( pre=gpe_arky, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_arky__gpe_proto", ) - gpe_arky__gpe_cp = Projection( + gpe_arky__gpe_cp = ann.Projection( pre=gpe_arky, post=gpe_cp, target="gaba", @@ -304,42 +306,42 @@ def BGM_v01(self): name="gpe_arky__gpe_cp", ) ### gpe cp output - gpe_cp__str_d1 = Projection( + gpe_cp__str_d1 = ann.Projection( pre=gpe_cp, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d1", ) - gpe_cp__str_d2 = Projection( + gpe_cp__str_d2 = ann.Projection( pre=gpe_cp, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d2", ) - gpe_cp__str_fsi = Projection( + gpe_cp__str_fsi = ann.Projection( pre=gpe_cp, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_cp__str_fsi", ) - gpe_cp__gpe_proto = Projection( + gpe_cp__gpe_proto = ann.Projection( pre=gpe_cp, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_proto", ) - gpe_cp__gpe_arky = Projection( + gpe_cp__gpe_arky = ann.Projection( pre=gpe_cp, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_arky", ) - gpe_cp__integrator_stop = Projection( + gpe_cp__integrator_stop = ann.Projection( pre=gpe_cp, post=integrator_stop, target="ampa", @@ -347,32 +349,32 @@ def BGM_v01(self): name="gpe_cp__integrator_stop", ) ### snr output - snr__thal = Projection( + snr__thal = ann.Projection( pre=snr, post=thal, target="gaba", synapse=factor_synapse, name="snr__thal" ) ### thal output - thal__integrator_go = Projection( + thal__integrator_go = ann.Projection( pre=thal, post=integrator_go, target="ampa", synapse=factor_synapse, name="thal__integrator_go", ) - thal__str_d1 = Projection( + thal__str_d1 = ann.Projection( pre=thal, post=str_d1, target="ampa", synapse=factor_synapse, name="thal__str_d1", ) - thal__str_d2 = Projection( + thal__str_d2 = ann.Projection( pre=thal, post=str_d2, target="ampa", synapse=factor_synapse, name="thal__str_d2", ) - thal__str_fsi = Projection( + thal__str_fsi = ann.Projection( pre=thal, post=str_fsi, target="ampa", @@ -390,52 +392,54 @@ def BGM_v02(self): """ ####### POPULATIONS ###### ### cortex / input populations - cor_go = Population( + cor_go = ann.Population( self.params["cor_go.size"], poisson_neuron_up_down, name="cor_go" ) - cor_pause = Population( + cor_pause = ann.Population( self.params["cor_pause.size"], poisson_neuron_up_down, name="cor_pause" ) - cor_stop = Population( + cor_stop = ann.Population( self.params["cor_stop.size"], poisson_neuron_up_down, name="cor_stop" ) ### Str Populations - str_d1 = Population( + str_d1 = ann.Population( self.params["str_d1.size"], Izhikevich2007_noisy_AMPA, name="str_d1" ) - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA, name="str_d2" ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_Corbit_FSI_noisy_AMPA, name="str_fsi", ) ### BG Populations - stn = Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") - snr = Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") - gpe_proto = Population( + stn = ann.Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") + snr = ann.Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_proto", ) - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_arky", ) - gpe_cp = Population( + gpe_cp = ann.Population( self.params["gpe_cp.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_cp" ) - thal = Population(self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal") + thal = ann.Population( + self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal" + ) ### integrator Neurons - integrator_go = Population( + integrator_go = ann.Population( self.params["integrator_go.size"], integrator_neuron, stop_condition="decision>=0 : any", name="integrator_go", ) - integrator_stop = Population( + integrator_stop = ann.Population( self.params["integrator_stop.size"], integrator_neuron, stop_condition="decision>=0 : any", @@ -444,28 +448,28 @@ def BGM_v02(self): ###### PROJECTIONS ###### ### cortex go output - cor_go__str_d1 = Projection( + cor_go__str_d1 = ann.Projection( pre=cor_go, post=str_d1, target="ampa", synapse=factor_synapse, name="cor_go__str_d1", ) - cor_go__str_d2 = Projection( + cor_go__str_d2 = ann.Projection( pre=cor_go, post=str_d2, target="ampa", synapse=factor_synapse, name="cor_go__str_d2", ) - cor_go__str_fsi = Projection( + cor_go__str_fsi = ann.Projection( pre=cor_go, post=str_fsi, target="ampa", synapse=factor_synapse, name="cor_go__str_fsi", ) - cor_go__thal = Projection( + cor_go__thal = ann.Projection( pre=cor_go, post=thal, target="ampa", @@ -473,14 +477,14 @@ def BGM_v02(self): name="cor_go__thal", ) ### cortex stop output - cor_stop__gpe_arky = Projection( + cor_stop__gpe_arky = ann.Projection( pre=cor_stop, post=gpe_arky, target="ampa", synapse=factor_synapse, name="cor_stop__gpe_arky", ) - cor_stop__gpe_cp = Projection( + cor_stop__gpe_cp = ann.Projection( pre=cor_stop, post=gpe_cp, target="ampa", @@ -488,7 +492,7 @@ def BGM_v02(self): name="cor_stop__gpe_cp", ) ### cortex pause output - cor_pause__stn = Projection( + cor_pause__stn = ann.Projection( pre=cor_pause, post=stn, target="ampa", @@ -496,24 +500,24 @@ def BGM_v02(self): name="cor_pause__stn", ) ### str d1 output - str_d1__snr = Projection( + str_d1__snr = ann.Projection( pre=str_d1, post=snr, target="gaba", synapse=factor_synapse, name="str_d1__snr" ) - str_d1__gpe_cp = Projection( + str_d1__gpe_cp = ann.Projection( pre=str_d1, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d1__gpe_cp", ) - str_d1__str_d1 = Projection( + str_d1__str_d1 = ann.Projection( pre=str_d1, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d1__str_d1", ) - str_d1__str_d2 = Projection( + str_d1__str_d2 = ann.Projection( pre=str_d1, post=str_d2, target="gaba", @@ -521,35 +525,35 @@ def BGM_v02(self): name="str_d1__str_d2", ) ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse, name="str_d2__gpe_proto", ) - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse, name="str_d2__gpe_arky", ) - str_d2__gpe_cp = Projection( + str_d2__gpe_cp = ann.Projection( pre=str_d2, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d2__gpe_cp", ) - str_d2__str_d1 = Projection( + str_d2__str_d1 = ann.Projection( pre=str_d2, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d2__str_d1", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -557,21 +561,21 @@ def BGM_v02(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d1 = Projection( + str_fsi__str_d1 = ann.Projection( pre=str_fsi, post=str_d1, target="gaba", synapse=factor_synapse, name="str_fsi__str_d1", ) - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -579,56 +583,56 @@ def BGM_v02(self): name="str_fsi__str_fsi", ) ### stn output - stn__snr = Projection( + stn__snr = ann.Projection( pre=stn, post=snr, target="ampa", synapse=factor_synapse, name="stn__snr" ) - stn__gpe_proto = Projection( + stn__gpe_proto = ann.Projection( pre=stn, post=gpe_proto, target="ampa", synapse=factor_synapse, name="stn__gpe_proto", ) - stn__gpe_arky = Projection( + stn__gpe_arky = ann.Projection( pre=stn, post=gpe_arky, target="ampa", synapse=factor_synapse, name="stn__gpe_arky", ) - stn__gpe_cp = Projection( + stn__gpe_cp = ann.Projection( pre=stn, post=gpe_cp, target="ampa", synapse=factor_synapse, name="stn__gpe_cp" ) ### gpe proto output - gpe_proto__stn = Projection( + gpe_proto__stn = ann.Projection( pre=gpe_proto, post=stn, target="gaba", synapse=factor_synapse, name="gpe_proto__stn", ) - gpe_proto__snr = Projection( + gpe_proto__snr = ann.Projection( pre=gpe_proto, post=snr, target="gaba", synapse=factor_synapse, name="gpe_proto__snr", ) - gpe_proto__gpe_arky = Projection( + gpe_proto__gpe_arky = ann.Projection( pre=gpe_proto, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_arky", ) - gpe_proto__gpe_cp = Projection( + gpe_proto__gpe_cp = ann.Projection( pre=gpe_proto, post=gpe_cp, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_cp", ) - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", @@ -636,35 +640,35 @@ def BGM_v02(self): name="gpe_proto__str_fsi", ) ### gpe arky output - gpe_arky__str_d1 = Projection( + gpe_arky__str_d1 = ann.Projection( pre=gpe_arky, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d1", ) - gpe_arky__str_d2 = Projection( + gpe_arky__str_d2 = ann.Projection( pre=gpe_arky, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d2", ) - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_proto = Projection( + gpe_arky__gpe_proto = ann.Projection( pre=gpe_arky, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_arky__gpe_proto", ) - gpe_arky__gpe_cp = Projection( + gpe_arky__gpe_cp = ann.Projection( pre=gpe_arky, post=gpe_cp, target="gaba", @@ -672,42 +676,42 @@ def BGM_v02(self): name="gpe_arky__gpe_cp", ) ### gpe cp output - gpe_cp__str_d1 = Projection( + gpe_cp__str_d1 = ann.Projection( pre=gpe_cp, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d1", ) - gpe_cp__str_d2 = Projection( + gpe_cp__str_d2 = ann.Projection( pre=gpe_cp, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d2", ) - gpe_cp__str_fsi = Projection( + gpe_cp__str_fsi = ann.Projection( pre=gpe_cp, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_cp__str_fsi", ) - gpe_cp__gpe_proto = Projection( + gpe_cp__gpe_proto = ann.Projection( pre=gpe_cp, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_proto", ) - gpe_cp__gpe_arky = Projection( + gpe_cp__gpe_arky = ann.Projection( pre=gpe_cp, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_arky", ) - gpe_cp__integrator_stop = Projection( + gpe_cp__integrator_stop = ann.Projection( pre=gpe_cp, post=integrator_stop, target="ampa", @@ -715,32 +719,32 @@ def BGM_v02(self): name="gpe_cp__integrator_stop", ) ### snr output - snr__thal = Projection( + snr__thal = ann.Projection( pre=snr, post=thal, target="gaba", synapse=factor_synapse, name="snr__thal" ) ### thal output - thal__integrator_go = Projection( + thal__integrator_go = ann.Projection( pre=thal, post=integrator_go, target="ampa", synapse=factor_synapse, name="thal__integrator_go", ) - thal__str_d1 = Projection( + thal__str_d1 = ann.Projection( pre=thal, post=str_d1, target="ampa", synapse=factor_synapse, name="thal__str_d1", ) - thal__str_d2 = Projection( + thal__str_d2 = ann.Projection( pre=thal, post=str_d2, target="ampa", synapse=factor_synapse, name="thal__str_d2", ) - thal__str_fsi = Projection( + thal__str_fsi = ann.Projection( pre=thal, post=str_fsi, target="ampa", @@ -758,48 +762,52 @@ def BGM_vTEST(self): """ ####### POPULATIONS ###### ### cortex / input populations - cor_go = Population(self.params["cor_go.size"], poisson_neuron_sin, name="cor_go") - cor_pause = Population( + cor_go = ann.Population( + self.params["cor_go.size"], poisson_neuron_sin, name="cor_go" + ) + cor_pause = ann.Population( self.params["cor_pause.size"], poisson_neuron_up_down, name="cor_pause" ) - cor_stop = Population( + cor_stop = ann.Population( self.params["cor_stop.size"], poisson_neuron_up_down, name="cor_stop" ) ### Str Populations - str_d1 = Population( + str_d1 = ann.Population( self.params["str_d1.size"], Izhikevich2007_noisy_AMPA, name="str_d1" ) - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA, name="str_d2" ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_fsi_noisy_AMPA, name="str_fsi" ) ### BG Populations - stn = Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") - snr = Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") - gpe_proto = Population( + stn = ann.Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") + snr = ann.Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_proto", ) - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_arky", ) - gpe_cp = Population( + gpe_cp = ann.Population( self.params["gpe_cp.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_cp" ) - thal = Population(self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal") + thal = ann.Population( + self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal" + ) ### integrator Neurons - integrator_go = Population( + integrator_go = ann.Population( self.params["integrator_go.size"], integrator_neuron, stop_condition="decision>=0 : any", name="integrator_go", ) - integrator_stop = Population( + integrator_stop = ann.Population( self.params["integrator_stop.size"], integrator_neuron, stop_condition="decision>=0 : any", @@ -808,28 +816,28 @@ def BGM_vTEST(self): ###### PROJECTIONS ###### ### cortex go output - cor_go__str_d1 = Projection( + cor_go__str_d1 = ann.Projection( pre=cor_go, post=str_d1, target="ampa", synapse=factor_synapse, name="cor_go__str_d1", ) - cor_go__str_d2 = Projection( + cor_go__str_d2 = ann.Projection( pre=cor_go, post=str_d2, target="ampa", synapse=factor_synapse, name="cor_go__str_d2", ) - cor_go__str_fsi = Projection( + cor_go__str_fsi = ann.Projection( pre=cor_go, post=str_fsi, target="ampa", synapse=factor_synapse, name="cor_go__str_fsi", ) - cor_go__thal = Projection( + cor_go__thal = ann.Projection( pre=cor_go, post=thal, target="ampa", @@ -837,14 +845,14 @@ def BGM_vTEST(self): name="cor_go__thal", ) ### cortex stop output - cor_stop__gpe_arky = Projection( + cor_stop__gpe_arky = ann.Projection( pre=cor_stop, post=gpe_arky, target="ampa", synapse=factor_synapse, name="cor_stop__gpe_arky", ) - cor_stop__gpe_cp = Projection( + cor_stop__gpe_cp = ann.Projection( pre=cor_stop, post=gpe_cp, target="ampa", @@ -852,7 +860,7 @@ def BGM_vTEST(self): name="cor_stop__gpe_cp", ) ### cortex pause output - cor_pause__stn = Projection( + cor_pause__stn = ann.Projection( pre=cor_pause, post=stn, target="ampa", @@ -860,24 +868,24 @@ def BGM_vTEST(self): name="cor_pause__stn", ) ### str d1 output - str_d1__snr = Projection( + str_d1__snr = ann.Projection( pre=str_d1, post=snr, target="gaba", synapse=factor_synapse, name="str_d1__snr" ) - str_d1__gpe_cp = Projection( + str_d1__gpe_cp = ann.Projection( pre=str_d1, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d1__gpe_cp", ) - str_d1__str_d1 = Projection( + str_d1__str_d1 = ann.Projection( pre=str_d1, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d1__str_d1", ) - str_d1__str_d2 = Projection( + str_d1__str_d2 = ann.Projection( pre=str_d1, post=str_d2, target="gaba", @@ -885,35 +893,35 @@ def BGM_vTEST(self): name="str_d1__str_d2", ) ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse, name="str_d2__gpe_proto", ) - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse, name="str_d2__gpe_arky", ) - str_d2__gpe_cp = Projection( + str_d2__gpe_cp = ann.Projection( pre=str_d2, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d2__gpe_cp", ) - str_d2__str_d1 = Projection( + str_d2__str_d1 = ann.Projection( pre=str_d2, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d2__str_d1", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -921,21 +929,21 @@ def BGM_vTEST(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d1 = Projection( + str_fsi__str_d1 = ann.Projection( pre=str_fsi, post=str_d1, target="gaba", synapse=factor_synapse, name="str_fsi__str_d1", ) - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -943,56 +951,56 @@ def BGM_vTEST(self): name="str_fsi__str_fsi", ) ### stn output - stn__snr = Projection( + stn__snr = ann.Projection( pre=stn, post=snr, target="ampa", synapse=factor_synapse, name="stn__snr" ) - stn__gpe_proto = Projection( + stn__gpe_proto = ann.Projection( pre=stn, post=gpe_proto, target="ampa", synapse=factor_synapse, name="stn__gpe_proto", ) - stn__gpe_arky = Projection( + stn__gpe_arky = ann.Projection( pre=stn, post=gpe_arky, target="ampa", synapse=factor_synapse, name="stn__gpe_arky", ) - stn__gpe_cp = Projection( + stn__gpe_cp = ann.Projection( pre=stn, post=gpe_cp, target="ampa", synapse=factor_synapse, name="stn__gpe_cp" ) ### gpe proto output - gpe_proto__stn = Projection( + gpe_proto__stn = ann.Projection( pre=gpe_proto, post=stn, target="gaba", synapse=factor_synapse, name="gpe_proto__stn", ) - gpe_proto__snr = Projection( + gpe_proto__snr = ann.Projection( pre=gpe_proto, post=snr, target="gaba", synapse=factor_synapse, name="gpe_proto__snr", ) - gpe_proto__gpe_arky = Projection( + gpe_proto__gpe_arky = ann.Projection( pre=gpe_proto, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_arky", ) - gpe_proto__gpe_cp = Projection( + gpe_proto__gpe_cp = ann.Projection( pre=gpe_proto, post=gpe_cp, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_cp", ) - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", @@ -1000,35 +1008,35 @@ def BGM_vTEST(self): name="gpe_proto__str_fsi", ) ### gpe arky output - gpe_arky__str_d1 = Projection( + gpe_arky__str_d1 = ann.Projection( pre=gpe_arky, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d1", ) - gpe_arky__str_d2 = Projection( + gpe_arky__str_d2 = ann.Projection( pre=gpe_arky, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d2", ) - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_proto = Projection( + gpe_arky__gpe_proto = ann.Projection( pre=gpe_arky, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_arky__gpe_proto", ) - gpe_arky__gpe_cp = Projection( + gpe_arky__gpe_cp = ann.Projection( pre=gpe_arky, post=gpe_cp, target="gaba", @@ -1036,42 +1044,42 @@ def BGM_vTEST(self): name="gpe_arky__gpe_cp", ) ### gpe cp output - gpe_cp__str_d1 = Projection( + gpe_cp__str_d1 = ann.Projection( pre=gpe_cp, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d1", ) - gpe_cp__str_d2 = Projection( + gpe_cp__str_d2 = ann.Projection( pre=gpe_cp, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d2", ) - gpe_cp__str_fsi = Projection( + gpe_cp__str_fsi = ann.Projection( pre=gpe_cp, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_cp__str_fsi", ) - gpe_cp__gpe_proto = Projection( + gpe_cp__gpe_proto = ann.Projection( pre=gpe_cp, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_proto", ) - gpe_cp__gpe_arky = Projection( + gpe_cp__gpe_arky = ann.Projection( pre=gpe_cp, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_arky", ) - gpe_cp__integrator_stop = Projection( + gpe_cp__integrator_stop = ann.Projection( pre=gpe_cp, post=integrator_stop, target="ampa", @@ -1079,32 +1087,32 @@ def BGM_vTEST(self): name="gpe_cp__integrator_stop", ) ### snr output - snr__thal = Projection( + snr__thal = ann.Projection( pre=snr, post=thal, target="gaba", synapse=factor_synapse, name="snr__thal" ) ### thal output - thal__integrator_go = Projection( + thal__integrator_go = ann.Projection( pre=thal, post=integrator_go, target="ampa", synapse=factor_synapse, name="thal__integrator_go", ) - thal__str_d1 = Projection( + thal__str_d1 = ann.Projection( pre=thal, post=str_d1, target="ampa", synapse=factor_synapse, name="thal__str_d1", ) - thal__str_d2 = Projection( + thal__str_d2 = ann.Projection( pre=thal, post=str_d2, target="ampa", synapse=factor_synapse, name="thal__str_d2", ) - thal__str_fsi = Projection( + thal__str_fsi = ann.Projection( pre=thal, post=str_fsi, target="ampa", @@ -1125,56 +1133,58 @@ def BGM_v03(self): """ ####### POPULATIONS ###### ### cortex / input populations - cor_go = Population( + cor_go = ann.Population( self.params["cor_go.size"], poisson_neuron_up_down, name="cor_go" ) - cor_pause = Population( + cor_pause = ann.Population( self.params["cor_pause.size"], poisson_neuron_up_down, name="cor_pause" ) - cor_stop = Population( + cor_stop = ann.Population( self.params["cor_stop.size"], poisson_neuron_up_down, name="cor_stop" ) ### Str Populations - str_d1 = Population( + str_d1 = ann.Population( self.params["str_d1.size"], Izhikevich2007_noisy_AMPA_oscillating, name="str_d1", # NEW NEURON MODEL ) - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA_oscillating, name="str_d2", # NEW NEURON MODEL ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_Corbit_FSI_noisy_AMPA, name="str_fsi", ) ### BG Populations - stn = Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") - snr = Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") - gpe_proto = Population( + stn = ann.Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") + snr = ann.Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_proto", ) - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_arky", ) - gpe_cp = Population( + gpe_cp = ann.Population( self.params["gpe_cp.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_cp" ) - thal = Population(self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal") + thal = ann.Population( + self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal" + ) ### integrator Neurons - integrator_go = Population( + integrator_go = ann.Population( self.params["integrator_go.size"], integrator_neuron, stop_condition="decision>=0 : any", name="integrator_go", ) - integrator_stop = Population( + integrator_stop = ann.Population( self.params["integrator_stop.size"], integrator_neuron, stop_condition="decision>=0 : any", @@ -1183,28 +1193,28 @@ def BGM_v03(self): ###### PROJECTIONS ###### ### cortex go output - cor_go__str_d1 = Projection( + cor_go__str_d1 = ann.Projection( pre=cor_go, post=str_d1, target="ampa", synapse=factor_synapse, name="cor_go__str_d1", ) - cor_go__str_d2 = Projection( + cor_go__str_d2 = ann.Projection( pre=cor_go, post=str_d2, target="ampa", synapse=factor_synapse, name="cor_go__str_d2", ) - cor_go__str_fsi = Projection( + cor_go__str_fsi = ann.Projection( pre=cor_go, post=str_fsi, target="ampa", synapse=factor_synapse, name="cor_go__str_fsi", ) - cor_go__thal = Projection( + cor_go__thal = ann.Projection( pre=cor_go, post=thal, target="ampa", @@ -1212,14 +1222,14 @@ def BGM_v03(self): name="cor_go__thal", ) ### cortex stop output - cor_stop__gpe_arky = Projection( + cor_stop__gpe_arky = ann.Projection( pre=cor_stop, post=gpe_arky, target="ampa", synapse=factor_synapse, name="cor_stop__gpe_arky", ) - cor_stop__gpe_cp = Projection( + cor_stop__gpe_cp = ann.Projection( pre=cor_stop, post=gpe_cp, target="ampa", @@ -1227,7 +1237,7 @@ def BGM_v03(self): name="cor_stop__gpe_cp", ) ### cortex pause output - cor_pause__stn = Projection( + cor_pause__stn = ann.Projection( pre=cor_pause, post=stn, target="ampa", @@ -1235,24 +1245,24 @@ def BGM_v03(self): name="cor_pause__stn", ) ### str d1 output - str_d1__snr = Projection( + str_d1__snr = ann.Projection( pre=str_d1, post=snr, target="gaba", synapse=factor_synapse, name="str_d1__snr" ) - str_d1__gpe_cp = Projection( + str_d1__gpe_cp = ann.Projection( pre=str_d1, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d1__gpe_cp", ) - str_d1__str_d1 = Projection( + str_d1__str_d1 = ann.Projection( pre=str_d1, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d1__str_d1", ) - str_d1__str_d2 = Projection( + str_d1__str_d2 = ann.Projection( pre=str_d1, post=str_d2, target="gaba", @@ -1260,35 +1270,35 @@ def BGM_v03(self): name="str_d1__str_d2", ) ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse, name="str_d2__gpe_proto", ) - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse, name="str_d2__gpe_arky", ) - str_d2__gpe_cp = Projection( + str_d2__gpe_cp = ann.Projection( pre=str_d2, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d2__gpe_cp", ) - str_d2__str_d1 = Projection( + str_d2__str_d1 = ann.Projection( pre=str_d2, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d2__str_d1", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -1296,21 +1306,21 @@ def BGM_v03(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d1 = Projection( + str_fsi__str_d1 = ann.Projection( pre=str_fsi, post=str_d1, target="gaba", synapse=factor_synapse, name="str_fsi__str_d1", ) - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -1318,56 +1328,56 @@ def BGM_v03(self): name="str_fsi__str_fsi", ) ### stn output - stn__snr = Projection( + stn__snr = ann.Projection( pre=stn, post=snr, target="ampa", synapse=factor_synapse, name="stn__snr" ) - stn__gpe_proto = Projection( + stn__gpe_proto = ann.Projection( pre=stn, post=gpe_proto, target="ampa", synapse=factor_synapse, name="stn__gpe_proto", ) - stn__gpe_arky = Projection( + stn__gpe_arky = ann.Projection( pre=stn, post=gpe_arky, target="ampa", synapse=factor_synapse, name="stn__gpe_arky", ) - stn__gpe_cp = Projection( + stn__gpe_cp = ann.Projection( pre=stn, post=gpe_cp, target="ampa", synapse=factor_synapse, name="stn__gpe_cp" ) ### gpe proto output - gpe_proto__stn = Projection( + gpe_proto__stn = ann.Projection( pre=gpe_proto, post=stn, target="gaba", synapse=factor_synapse, name="gpe_proto__stn", ) - gpe_proto__snr = Projection( + gpe_proto__snr = ann.Projection( pre=gpe_proto, post=snr, target="gaba", synapse=factor_synapse, name="gpe_proto__snr", ) - gpe_proto__gpe_arky = Projection( + gpe_proto__gpe_arky = ann.Projection( pre=gpe_proto, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_arky", ) - gpe_proto__gpe_cp = Projection( + gpe_proto__gpe_cp = ann.Projection( pre=gpe_proto, post=gpe_cp, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_cp", ) - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", @@ -1375,35 +1385,35 @@ def BGM_v03(self): name="gpe_proto__str_fsi", ) ### gpe arky output - gpe_arky__str_d1 = Projection( + gpe_arky__str_d1 = ann.Projection( pre=gpe_arky, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d1", ) - gpe_arky__str_d2 = Projection( + gpe_arky__str_d2 = ann.Projection( pre=gpe_arky, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d2", ) - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_proto = Projection( + gpe_arky__gpe_proto = ann.Projection( pre=gpe_arky, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_arky__gpe_proto", ) - gpe_arky__gpe_cp = Projection( + gpe_arky__gpe_cp = ann.Projection( pre=gpe_arky, post=gpe_cp, target="gaba", @@ -1411,42 +1421,42 @@ def BGM_v03(self): name="gpe_arky__gpe_cp", ) ### gpe cp output - gpe_cp__str_d1 = Projection( + gpe_cp__str_d1 = ann.Projection( pre=gpe_cp, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d1", ) - gpe_cp__str_d2 = Projection( + gpe_cp__str_d2 = ann.Projection( pre=gpe_cp, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d2", ) - gpe_cp__str_fsi = Projection( + gpe_cp__str_fsi = ann.Projection( pre=gpe_cp, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_cp__str_fsi", ) - gpe_cp__gpe_proto = Projection( + gpe_cp__gpe_proto = ann.Projection( pre=gpe_cp, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_proto", ) - gpe_cp__gpe_arky = Projection( + gpe_cp__gpe_arky = ann.Projection( pre=gpe_cp, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_arky", ) - gpe_cp__integrator_stop = Projection( + gpe_cp__integrator_stop = ann.Projection( pre=gpe_cp, post=integrator_stop, target="ampa", @@ -1454,32 +1464,32 @@ def BGM_v03(self): name="gpe_cp__integrator_stop", ) ### snr output - snr__thal = Projection( + snr__thal = ann.Projection( pre=snr, post=thal, target="gaba", synapse=factor_synapse, name="snr__thal" ) ### thal output - thal__integrator_go = Projection( + thal__integrator_go = ann.Projection( pre=thal, post=integrator_go, target="ampa", synapse=factor_synapse, name="thal__integrator_go", ) - thal__str_d1 = Projection( + thal__str_d1 = ann.Projection( pre=thal, post=str_d1, target="ampa", synapse=factor_synapse, name="thal__str_d1", ) - thal__str_d2 = Projection( + thal__str_d2 = ann.Projection( pre=thal, post=str_d2, target="ampa", synapse=factor_synapse, name="thal__str_d2", ) - thal__str_fsi = Projection( + thal__str_fsi = ann.Projection( pre=thal, post=str_fsi, target="ampa", @@ -1494,52 +1504,54 @@ def BGM_v04(self): """ ####### POPULATIONS ###### ### cortex / input populations - cor_go = Population( + cor_go = ann.Population( self.params["cor_go.size"], poisson_neuron_up_down, name="cor_go" ) - cor_pause = Population( + cor_pause = ann.Population( self.params["cor_pause.size"], poisson_neuron_up_down, name="cor_pause" ) - cor_stop = Population( + cor_stop = ann.Population( self.params["cor_stop.size"], poisson_neuron_up_down, name="cor_stop" ) ### Str Populations - str_d1 = Population( + str_d1 = ann.Population( self.params["str_d1.size"], Izhikevich2007_noisy_AMPA, name="str_d1" ) - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA, name="str_d2" ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_Corbit_FSI_noisy_AMPA, name="str_fsi", ) ### BG Populations - stn = Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") - snr = Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") - gpe_proto = Population( + stn = ann.Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") + snr = ann.Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_proto", ) - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_arky", ) - gpe_cp = Population( + gpe_cp = ann.Population( self.params["gpe_cp.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_cp" ) - thal = Population(self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal") + thal = ann.Population( + self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal" + ) ### integrator Neurons - integrator_go = Population( + integrator_go = ann.Population( self.params["integrator_go.size"], integrator_neuron, stop_condition="decision>=0 : any", name="integrator_go", ) - integrator_stop = Population( + integrator_stop = ann.Population( self.params["integrator_stop.size"], integrator_neuron, stop_condition="decision>=0 : any", @@ -1548,28 +1560,28 @@ def BGM_v04(self): ###### PROJECTIONS ###### ### cortex go output - cor_go__str_d1 = Projection( + cor_go__str_d1 = ann.Projection( pre=cor_go, post=str_d1, target="ampa", synapse=factor_synapse, name="cor_go__str_d1", ) - cor_go__str_d2 = Projection( + cor_go__str_d2 = ann.Projection( pre=cor_go, post=str_d2, target="ampa", synapse=factor_synapse, name="cor_go__str_d2", ) - cor_go__str_fsi = Projection( + cor_go__str_fsi = ann.Projection( pre=cor_go, post=str_fsi, target="ampa", synapse=factor_synapse, name="cor_go__str_fsi", ) - cor_go__thal = Projection( + cor_go__thal = ann.Projection( pre=cor_go, post=thal, target="ampa", @@ -1577,14 +1589,14 @@ def BGM_v04(self): name="cor_go__thal", ) ### cortex stop output - cor_stop__gpe_arky = Projection( + cor_stop__gpe_arky = ann.Projection( pre=cor_stop, post=gpe_arky, target="ampa", synapse=factor_synapse, name="cor_stop__gpe_arky", ) - cor_stop__gpe_cp = Projection( + cor_stop__gpe_cp = ann.Projection( pre=cor_stop, post=gpe_cp, target="ampa", @@ -1592,7 +1604,7 @@ def BGM_v04(self): name="cor_stop__gpe_cp", ) - cor_stop__gpe_proto = Projection( # NEW ! + cor_stop__gpe_proto = ann.Projection( # NEW ! pre=cor_stop, post=gpe_proto, target="ampa", @@ -1600,7 +1612,7 @@ def BGM_v04(self): name="cor_stop__gpe_proto", ) ### cortex pause output - cor_pause__stn = Projection( + cor_pause__stn = ann.Projection( pre=cor_pause, post=stn, target="ampa", @@ -1608,24 +1620,24 @@ def BGM_v04(self): name="cor_pause__stn", ) ### str d1 output - str_d1__snr = Projection( + str_d1__snr = ann.Projection( pre=str_d1, post=snr, target="gaba", synapse=factor_synapse, name="str_d1__snr" ) - str_d1__gpe_cp = Projection( + str_d1__gpe_cp = ann.Projection( pre=str_d1, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d1__gpe_cp", ) - str_d1__str_d1 = Projection( + str_d1__str_d1 = ann.Projection( pre=str_d1, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d1__str_d1", ) - str_d1__str_d2 = Projection( + str_d1__str_d2 = ann.Projection( pre=str_d1, post=str_d2, target="gaba", @@ -1633,35 +1645,35 @@ def BGM_v04(self): name="str_d1__str_d2", ) ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse, name="str_d2__gpe_proto", ) - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse, name="str_d2__gpe_arky", ) - str_d2__gpe_cp = Projection( + str_d2__gpe_cp = ann.Projection( pre=str_d2, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d2__gpe_cp", ) - str_d2__str_d1 = Projection( + str_d2__str_d1 = ann.Projection( pre=str_d2, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d2__str_d1", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -1669,21 +1681,21 @@ def BGM_v04(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d1 = Projection( + str_fsi__str_d1 = ann.Projection( pre=str_fsi, post=str_d1, target="gaba", synapse=factor_synapse, name="str_fsi__str_d1", ) - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -1691,49 +1703,49 @@ def BGM_v04(self): name="str_fsi__str_fsi", ) ### stn output - stn__snr = Projection( + stn__snr = ann.Projection( pre=stn, post=snr, target="ampa", synapse=factor_synapse, name="stn__snr" ) - stn__gpe_proto = Projection( + stn__gpe_proto = ann.Projection( pre=stn, post=gpe_proto, target="ampa", synapse=factor_synapse, name="stn__gpe_proto", ) - stn__gpe_arky = Projection( + stn__gpe_arky = ann.Projection( pre=stn, post=gpe_arky, target="ampa", synapse=factor_synapse, name="stn__gpe_arky", ) - stn__gpe_cp = Projection( + stn__gpe_cp = ann.Projection( pre=stn, post=gpe_cp, target="ampa", synapse=factor_synapse, name="stn__gpe_cp" ) ### gpe proto output - gpe_proto__stn = Projection( + gpe_proto__stn = ann.Projection( pre=gpe_proto, post=stn, target="gaba", synapse=factor_synapse, name="gpe_proto__stn", ) - gpe_proto__snr = Projection( + gpe_proto__snr = ann.Projection( pre=gpe_proto, post=snr, target="gaba", synapse=factor_synapse, name="gpe_proto__snr", ) - gpe_proto__gpe_arky = Projection( + gpe_proto__gpe_arky = ann.Projection( pre=gpe_proto, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_arky", ) - gpe_proto__gpe_cp = Projection( + gpe_proto__gpe_cp = ann.Projection( pre=gpe_proto, post=gpe_cp, target="gaba", @@ -1741,7 +1753,7 @@ def BGM_v04(self): name="gpe_proto__gpe_cp", ) - gpe_proto__gpe_proto = Projection( + gpe_proto__gpe_proto = ann.Projection( pre=gpe_proto, post=gpe_proto, target="gaba", @@ -1749,7 +1761,7 @@ def BGM_v04(self): name="gpe_proto__gpe_proto", ) - gpe_arky__gpe_arky = Projection( # NEW, not in original BGM + gpe_arky__gpe_arky = ann.Projection( # NEW, not in original BGM pre=gpe_arky, post=gpe_arky, target="gaba", @@ -1757,7 +1769,7 @@ def BGM_v04(self): name="gpe_arky__gpe_arky", ) - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", @@ -1765,35 +1777,35 @@ def BGM_v04(self): name="gpe_proto__str_fsi", ) ### gpe arky output - gpe_arky__str_d1 = Projection( + gpe_arky__str_d1 = ann.Projection( pre=gpe_arky, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d1", ) - gpe_arky__str_d2 = Projection( + gpe_arky__str_d2 = ann.Projection( pre=gpe_arky, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d2", ) - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_proto = Projection( + gpe_arky__gpe_proto = ann.Projection( pre=gpe_arky, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_arky__gpe_proto", ) - gpe_arky__gpe_cp = Projection( + gpe_arky__gpe_cp = ann.Projection( pre=gpe_arky, post=gpe_cp, target="gaba", @@ -1801,42 +1813,42 @@ def BGM_v04(self): name="gpe_arky__gpe_cp", ) ### gpe cp output - gpe_cp__str_d1 = Projection( + gpe_cp__str_d1 = ann.Projection( pre=gpe_cp, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d1", ) - gpe_cp__str_d2 = Projection( + gpe_cp__str_d2 = ann.Projection( pre=gpe_cp, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d2", ) - gpe_cp__str_fsi = Projection( + gpe_cp__str_fsi = ann.Projection( pre=gpe_cp, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_cp__str_fsi", ) - gpe_cp__gpe_proto = Projection( + gpe_cp__gpe_proto = ann.Projection( pre=gpe_cp, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_proto", ) - gpe_cp__gpe_arky = Projection( + gpe_cp__gpe_arky = ann.Projection( pre=gpe_cp, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_arky", ) - gpe_cp__integrator_stop = Projection( + gpe_cp__integrator_stop = ann.Projection( pre=gpe_cp, post=integrator_stop, target="ampa", @@ -1844,32 +1856,32 @@ def BGM_v04(self): name="gpe_cp__integrator_stop", ) ### snr output - snr__thal = Projection( + snr__thal = ann.Projection( pre=snr, post=thal, target="gaba", synapse=factor_synapse, name="snr__thal" ) ### thal output - thal__integrator_go = Projection( + thal__integrator_go = ann.Projection( pre=thal, post=integrator_go, target="ampa", synapse=factor_synapse, name="thal__integrator_go", ) - thal__str_d1 = Projection( + thal__str_d1 = ann.Projection( pre=thal, post=str_d1, target="ampa", synapse=factor_synapse, name="thal__str_d1", ) - thal__str_d2 = Projection( + thal__str_d2 = ann.Projection( pre=thal, post=str_d2, target="ampa", synapse=factor_synapse, name="thal__str_d2", ) - thal__str_fsi = Projection( + thal__str_fsi = ann.Projection( pre=thal, post=str_fsi, target="ampa", @@ -1884,18 +1896,18 @@ def BGM_v04oliver(self): """ ####### POPULATIONS ###### ### Str Populations - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA, name="str_d2", ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_Corbit_FSI_noisy_AMPA, name="str_fsi", ) ### BG Populations - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_arky", @@ -1903,14 +1915,14 @@ def BGM_v04oliver(self): ###### PROJECTIONS ###### ### str d2 output - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse_without_max, name="str_d2__gpe_arky", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -1918,14 +1930,14 @@ def BGM_v04oliver(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse_without_max, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -1933,14 +1945,14 @@ def BGM_v04oliver(self): name="str_fsi__str_fsi", ) ### gpe arky output - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse_without_max, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_arky = Projection( # NEW, not in original BGM + gpe_arky__gpe_arky = ann.Projection( # NEW, not in original BGM pre=gpe_arky, post=gpe_arky, target="gaba", @@ -1958,18 +1970,18 @@ def BGM_v04newgpe(self): """ ####### POPULATIONS ###### ### Str Populations - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_I, name="str_d2", ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_Corbit_FSI_noisy_I, name="str_fsi", ) ### BG Populations - gpe_proto = Population( + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_I_nonlin, name="gpe_proto", @@ -1977,14 +1989,14 @@ def BGM_v04newgpe(self): ###### PROJECTIONS ###### ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse_without_max, name="str_d2__gpe_proto", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -1992,14 +2004,14 @@ def BGM_v04newgpe(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse_without_max, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -2007,14 +2019,14 @@ def BGM_v04newgpe(self): name="str_fsi__str_fsi", ) ### gpe proto output - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", synapse=factor_synapse_without_max, name="gpe_proto__str_fsi", ) - gpe_proto__gpe_proto = Projection( # NEW, not in original BGM + gpe_proto__gpe_proto = ann.Projection( # NEW, not in original BGM pre=gpe_proto, post=gpe_proto, target="gaba", @@ -2030,56 +2042,58 @@ def BGM_v05(self): """ ####### POPULATIONS ###### ### cortex / input populations - cor_go = Population( + cor_go = ann.Population( self.params["cor_go.size"], poisson_neuron_up_down, name="cor_go" ) - cor_pause = Population( + cor_pause = ann.Population( self.params["cor_pause.size"], poisson_neuron_up_down, name="cor_pause" ) - cor_stop = Population( + cor_stop = ann.Population( self.params["cor_stop.size"], poisson_neuron_up_down, name="cor_stop" ) ### Str Populations - str_d1 = Population( + str_d1 = ann.Population( self.params["str_d1.size"], Izhikevich2007_noisy_AMPA_oscillating, name="str_d1", # NEW NEURON MODEL ) - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA_oscillating, name="str_d2", # NEW NEURON MODEL ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_Corbit_FSI_noisy_AMPA, name="str_fsi", ) ### BG Populations - stn = Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") - snr = Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") - gpe_proto = Population( + stn = ann.Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") + snr = ann.Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_AMPA, # NEW NEURON MODEL name="gpe_proto", ) - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA_oscillating, name="gpe_arky", ) - gpe_cp = Population( + gpe_cp = ann.Population( self.params["gpe_cp.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_cp" ) - thal = Population(self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal") + thal = ann.Population( + self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal" + ) ### integrator Neurons - integrator_go = Population( + integrator_go = ann.Population( self.params["integrator_go.size"], integrator_neuron, stop_condition="decision>=0 : any", name="integrator_go", ) - integrator_stop = Population( + integrator_stop = ann.Population( self.params["integrator_stop.size"], integrator_neuron, stop_condition="decision>=0 : any", @@ -2088,28 +2102,28 @@ def BGM_v05(self): ###### PROJECTIONS ###### ### cortex go output - cor_go__str_d1 = Projection( + cor_go__str_d1 = ann.Projection( pre=cor_go, post=str_d1, target="ampa", synapse=factor_synapse, name="cor_go__str_d1", ) - cor_go__str_d2 = Projection( + cor_go__str_d2 = ann.Projection( pre=cor_go, post=str_d2, target="ampa", synapse=factor_synapse, name="cor_go__str_d2", ) - cor_go__str_fsi = Projection( + cor_go__str_fsi = ann.Projection( pre=cor_go, post=str_fsi, target="ampa", synapse=factor_synapse, name="cor_go__str_fsi", ) - cor_go__thal = Projection( + cor_go__thal = ann.Projection( pre=cor_go, post=thal, target="ampa", @@ -2117,14 +2131,14 @@ def BGM_v05(self): name="cor_go__thal", ) ### cortex stop output - cor_stop__gpe_arky = Projection( + cor_stop__gpe_arky = ann.Projection( pre=cor_stop, post=gpe_arky, target="ampa", synapse=factor_synapse, name="cor_stop__gpe_arky", ) - cor_stop__gpe_cp = Projection( + cor_stop__gpe_cp = ann.Projection( pre=cor_stop, post=gpe_cp, target="ampa", @@ -2132,7 +2146,7 @@ def BGM_v05(self): name="cor_stop__gpe_cp", ) - cor_stop__gpe_proto = Projection( # NEW ! + cor_stop__gpe_proto = ann.Projection( # NEW ! pre=cor_stop, post=gpe_proto, target="ampa", @@ -2140,7 +2154,7 @@ def BGM_v05(self): name="cor_stop__gpe_proto", ) ### cortex pause output - cor_pause__stn = Projection( + cor_pause__stn = ann.Projection( pre=cor_pause, post=stn, target="ampa", @@ -2148,24 +2162,24 @@ def BGM_v05(self): name="cor_pause__stn", ) ### str d1 output - str_d1__snr = Projection( + str_d1__snr = ann.Projection( pre=str_d1, post=snr, target="gaba", synapse=factor_synapse, name="str_d1__snr" ) - str_d1__gpe_cp = Projection( + str_d1__gpe_cp = ann.Projection( pre=str_d1, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d1__gpe_cp", ) - str_d1__str_d1 = Projection( + str_d1__str_d1 = ann.Projection( pre=str_d1, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d1__str_d1", ) - str_d1__str_d2 = Projection( + str_d1__str_d2 = ann.Projection( pre=str_d1, post=str_d2, target="gaba", @@ -2173,35 +2187,35 @@ def BGM_v05(self): name="str_d1__str_d2", ) ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse, name="str_d2__gpe_proto", ) - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse, name="str_d2__gpe_arky", ) - str_d2__gpe_cp = Projection( + str_d2__gpe_cp = ann.Projection( pre=str_d2, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d2__gpe_cp", ) - str_d2__str_d1 = Projection( + str_d2__str_d1 = ann.Projection( pre=str_d2, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d2__str_d1", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -2209,21 +2223,21 @@ def BGM_v05(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d1 = Projection( + str_fsi__str_d1 = ann.Projection( pre=str_fsi, post=str_d1, target="gaba", synapse=factor_synapse, name="str_fsi__str_d1", ) - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -2231,49 +2245,49 @@ def BGM_v05(self): name="str_fsi__str_fsi", ) ### stn output - stn__snr = Projection( + stn__snr = ann.Projection( pre=stn, post=snr, target="ampa", synapse=factor_synapse, name="stn__snr" ) - stn__gpe_proto = Projection( + stn__gpe_proto = ann.Projection( pre=stn, post=gpe_proto, target="ampa", synapse=factor_synapse, name="stn__gpe_proto", ) - stn__gpe_arky = Projection( + stn__gpe_arky = ann.Projection( pre=stn, post=gpe_arky, target="ampa", synapse=factor_synapse, name="stn__gpe_arky", ) - stn__gpe_cp = Projection( + stn__gpe_cp = ann.Projection( pre=stn, post=gpe_cp, target="ampa", synapse=factor_synapse, name="stn__gpe_cp" ) ### gpe proto output - gpe_proto__stn = Projection( + gpe_proto__stn = ann.Projection( pre=gpe_proto, post=stn, target="gaba", synapse=factor_synapse, name="gpe_proto__stn", ) - gpe_proto__snr = Projection( + gpe_proto__snr = ann.Projection( pre=gpe_proto, post=snr, target="gaba", synapse=factor_synapse, name="gpe_proto__snr", ) - gpe_proto__gpe_arky = Projection( + gpe_proto__gpe_arky = ann.Projection( pre=gpe_proto, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_arky", ) - gpe_proto__gpe_cp = Projection( + gpe_proto__gpe_cp = ann.Projection( pre=gpe_proto, post=gpe_cp, target="gaba", @@ -2281,7 +2295,7 @@ def BGM_v05(self): name="gpe_proto__gpe_cp", ) - gpe_proto__gpe_proto = Projection( # NEW, not in original BGM + gpe_proto__gpe_proto = ann.Projection( # NEW, not in original BGM pre=gpe_proto, post=gpe_proto, target="gaba", @@ -2289,7 +2303,7 @@ def BGM_v05(self): name="gpe_proto__gpe_proto", ) - gpe_arky__gpe_arky = Projection( # NEW, not in original BGM + gpe_arky__gpe_arky = ann.Projection( # NEW, not in original BGM pre=gpe_arky, post=gpe_arky, target="gaba", @@ -2297,7 +2311,7 @@ def BGM_v05(self): name="gpe_arky__gpe_arky", ) - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", @@ -2305,35 +2319,35 @@ def BGM_v05(self): name="gpe_proto__str_fsi", ) ### gpe arky output - gpe_arky__str_d1 = Projection( + gpe_arky__str_d1 = ann.Projection( pre=gpe_arky, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d1", ) - gpe_arky__str_d2 = Projection( + gpe_arky__str_d2 = ann.Projection( pre=gpe_arky, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d2", ) - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_proto = Projection( + gpe_arky__gpe_proto = ann.Projection( pre=gpe_arky, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_arky__gpe_proto", ) - gpe_arky__gpe_cp = Projection( + gpe_arky__gpe_cp = ann.Projection( pre=gpe_arky, post=gpe_cp, target="gaba", @@ -2341,42 +2355,42 @@ def BGM_v05(self): name="gpe_arky__gpe_cp", ) ### gpe cp output - gpe_cp__str_d1 = Projection( + gpe_cp__str_d1 = ann.Projection( pre=gpe_cp, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d1", ) - gpe_cp__str_d2 = Projection( + gpe_cp__str_d2 = ann.Projection( pre=gpe_cp, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d2", ) - gpe_cp__str_fsi = Projection( + gpe_cp__str_fsi = ann.Projection( pre=gpe_cp, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_cp__str_fsi", ) - gpe_cp__gpe_proto = Projection( + gpe_cp__gpe_proto = ann.Projection( pre=gpe_cp, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_proto", ) - gpe_cp__gpe_arky = Projection( + gpe_cp__gpe_arky = ann.Projection( pre=gpe_cp, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_arky", ) - gpe_cp__integrator_stop = Projection( + gpe_cp__integrator_stop = ann.Projection( pre=gpe_cp, post=integrator_stop, target="ampa", @@ -2384,32 +2398,32 @@ def BGM_v05(self): name="gpe_cp__integrator_stop", ) ### snr output - snr__thal = Projection( + snr__thal = ann.Projection( pre=snr, post=thal, target="gaba", synapse=factor_synapse, name="snr__thal" ) ### thal output - thal__integrator_go = Projection( + thal__integrator_go = ann.Projection( pre=thal, post=integrator_go, target="ampa", synapse=factor_synapse, name="thal__integrator_go", ) - thal__str_d1 = Projection( + thal__str_d1 = ann.Projection( pre=thal, post=str_d1, target="ampa", synapse=factor_synapse, name="thal__str_d1", ) - thal__str_d2 = Projection( + thal__str_d2 = ann.Projection( pre=thal, post=str_d2, target="ampa", synapse=factor_synapse, name="thal__str_d2", ) - thal__str_fsi = Projection( + thal__str_fsi = ann.Projection( pre=thal, post=str_fsi, target="ampa", @@ -2425,54 +2439,58 @@ def BGM_v06(self): """ ####### POPULATIONS ###### ### cortex / input populations - cor_go = Population(self.params["cor_go.size"], poisson_neuron_sin, name="cor_go") - cor_pause = Population( + cor_go = ann.Population( + self.params["cor_go.size"], poisson_neuron_sin, name="cor_go" + ) + cor_pause = ann.Population( self.params["cor_pause.size"], poisson_neuron_up_down, name="cor_pause" ) - cor_stop = Population( + cor_stop = ann.Population( self.params["cor_stop.size"], poisson_neuron_sin, name="cor_stop" ) ### Str Populations - str_d1 = Population( + str_d1 = ann.Population( self.params["str_d1.size"], Izhikevich2007_noisy_AMPA, name="str_d1", # NEW NEURON MODEL ) - str_d2 = Population( + str_d2 = ann.Population( self.params["str_d2.size"], Izhikevich2007_noisy_AMPA, name="str_d2", # NEW NEURON MODEL ) - str_fsi = Population( + str_fsi = ann.Population( self.params["str_fsi.size"], Izhikevich2007_Corbit_FSI_noisy_AMPA, name="str_fsi", ) ### BG Populations - stn = Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") - snr = Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") - gpe_proto = Population( + stn = ann.Population(self.params["stn.size"], Izhikevich2003_noisy_AMPA, name="stn") + snr = ann.Population(self.params["snr.size"], Izhikevich2003_noisy_AMPA, name="snr") + gpe_proto = ann.Population( self.params["gpe_proto.size"], Izhikevich2003_flexible_noisy_AMPA, # NEW NEURON MODEL name="gpe_proto", ) - gpe_arky = Population( + gpe_arky = ann.Population( self.params["gpe_arky.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_arky", ) - gpe_cp = Population( + gpe_cp = ann.Population( self.params["gpe_cp.size"], Izhikevich2003_flexible_noisy_AMPA, name="gpe_cp" ) - thal = Population(self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal") + thal = ann.Population( + self.params["thal.size"], Izhikevich2003_noisy_AMPA, name="thal" + ) ### integrator Neurons - integrator_go = Population( + integrator_go = ann.Population( self.params["integrator_go.size"], integrator_neuron, stop_condition="decision>=0 : any", name="integrator_go", ) - integrator_stop = Population( + integrator_stop = ann.Population( self.params["integrator_stop.size"], integrator_neuron, stop_condition="decision>=0 : any", @@ -2481,28 +2499,28 @@ def BGM_v06(self): ###### PROJECTIONS ###### ### cortex go output - cor_go__str_d1 = Projection( + cor_go__str_d1 = ann.Projection( pre=cor_go, post=str_d1, target="ampa", synapse=factor_synapse, name="cor_go__str_d1", ) - cor_go__str_d2 = Projection( + cor_go__str_d2 = ann.Projection( pre=cor_go, post=str_d2, target="ampa", synapse=factor_synapse, name="cor_go__str_d2", ) - cor_go__str_fsi = Projection( + cor_go__str_fsi = ann.Projection( pre=cor_go, post=str_fsi, target="ampa", synapse=factor_synapse, name="cor_go__str_fsi", ) - cor_go__thal = Projection( + cor_go__thal = ann.Projection( pre=cor_go, post=thal, target="ampa", @@ -2510,14 +2528,14 @@ def BGM_v06(self): name="cor_go__thal", ) ### cortex stop output - cor_stop__gpe_arky = Projection( + cor_stop__gpe_arky = ann.Projection( pre=cor_stop, post=gpe_arky, target="ampa", synapse=factor_synapse, name="cor_stop__gpe_arky", ) - cor_stop__gpe_cp = Projection( + cor_stop__gpe_cp = ann.Projection( pre=cor_stop, post=gpe_cp, target="ampa", @@ -2525,7 +2543,7 @@ def BGM_v06(self): name="cor_stop__gpe_cp", ) - cor_stop__gpe_proto = Projection( # NEW ! + cor_stop__gpe_proto = ann.Projection( # NEW ! pre=cor_stop, post=gpe_proto, target="ampa", @@ -2533,7 +2551,7 @@ def BGM_v06(self): name="cor_stop__gpe_proto", ) ### cortex pause output - cor_pause__stn = Projection( + cor_pause__stn = ann.Projection( pre=cor_pause, post=stn, target="ampa", @@ -2541,24 +2559,24 @@ def BGM_v06(self): name="cor_pause__stn", ) ### str d1 output - str_d1__snr = Projection( + str_d1__snr = ann.Projection( pre=str_d1, post=snr, target="gaba", synapse=factor_synapse, name="str_d1__snr" ) - str_d1__gpe_cp = Projection( + str_d1__gpe_cp = ann.Projection( pre=str_d1, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d1__gpe_cp", ) - str_d1__str_d1 = Projection( + str_d1__str_d1 = ann.Projection( pre=str_d1, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d1__str_d1", ) - str_d1__str_d2 = Projection( + str_d1__str_d2 = ann.Projection( pre=str_d1, post=str_d2, target="gaba", @@ -2566,35 +2584,35 @@ def BGM_v06(self): name="str_d1__str_d2", ) ### str d2 output - str_d2__gpe_proto = Projection( + str_d2__gpe_proto = ann.Projection( pre=str_d2, post=gpe_proto, target="gaba", synapse=factor_synapse, name="str_d2__gpe_proto", ) - str_d2__gpe_arky = Projection( + str_d2__gpe_arky = ann.Projection( pre=str_d2, post=gpe_arky, target="gaba", synapse=factor_synapse, name="str_d2__gpe_arky", ) - str_d2__gpe_cp = Projection( + str_d2__gpe_cp = ann.Projection( pre=str_d2, post=gpe_cp, target="gaba", synapse=factor_synapse, name="str_d2__gpe_cp", ) - str_d2__str_d1 = Projection( + str_d2__str_d1 = ann.Projection( pre=str_d2, post=str_d1, target="gaba", synapse=factor_synapse, name="str_d2__str_d1", ) - str_d2__str_d2 = Projection( + str_d2__str_d2 = ann.Projection( pre=str_d2, post=str_d2, target="gaba", @@ -2602,21 +2620,21 @@ def BGM_v06(self): name="str_d2__str_d2", ) ### str fsi output - str_fsi__str_d1 = Projection( + str_fsi__str_d1 = ann.Projection( pre=str_fsi, post=str_d1, target="gaba", synapse=factor_synapse, name="str_fsi__str_d1", ) - str_fsi__str_d2 = Projection( + str_fsi__str_d2 = ann.Projection( pre=str_fsi, post=str_d2, target="gaba", synapse=factor_synapse, name="str_fsi__str_d2", ) - str_fsi__str_fsi = Projection( + str_fsi__str_fsi = ann.Projection( pre=str_fsi, post=str_fsi, target="gaba", @@ -2624,49 +2642,49 @@ def BGM_v06(self): name="str_fsi__str_fsi", ) ### stn output - stn__snr = Projection( + stn__snr = ann.Projection( pre=stn, post=snr, target="ampa", synapse=factor_synapse, name="stn__snr" ) - stn__gpe_proto = Projection( + stn__gpe_proto = ann.Projection( pre=stn, post=gpe_proto, target="ampa", synapse=factor_synapse, name="stn__gpe_proto", ) - stn__gpe_arky = Projection( + stn__gpe_arky = ann.Projection( pre=stn, post=gpe_arky, target="ampa", synapse=factor_synapse, name="stn__gpe_arky", ) - stn__gpe_cp = Projection( + stn__gpe_cp = ann.Projection( pre=stn, post=gpe_cp, target="ampa", synapse=factor_synapse, name="stn__gpe_cp" ) ### gpe proto output - gpe_proto__stn = Projection( + gpe_proto__stn = ann.Projection( pre=gpe_proto, post=stn, target="gaba", synapse=factor_synapse, name="gpe_proto__stn", ) - gpe_proto__snr = Projection( + gpe_proto__snr = ann.Projection( pre=gpe_proto, post=snr, target="gaba", synapse=factor_synapse, name="gpe_proto__snr", ) - gpe_proto__gpe_arky = Projection( + gpe_proto__gpe_arky = ann.Projection( pre=gpe_proto, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_proto__gpe_arky", ) - gpe_proto__gpe_cp = Projection( + gpe_proto__gpe_cp = ann.Projection( pre=gpe_proto, post=gpe_cp, target="gaba", @@ -2674,7 +2692,7 @@ def BGM_v06(self): name="gpe_proto__gpe_cp", ) - gpe_proto__gpe_proto = Projection( # NEW, not in original BGM + gpe_proto__gpe_proto = ann.Projection( # NEW, not in original BGM pre=gpe_proto, post=gpe_proto, target="gaba", @@ -2682,7 +2700,7 @@ def BGM_v06(self): name="gpe_proto__gpe_proto", ) - gpe_arky__gpe_arky = Projection( # NEW, not in original BGM + gpe_arky__gpe_arky = ann.Projection( # NEW, not in original BGM pre=gpe_arky, post=gpe_arky, target="gaba", @@ -2690,7 +2708,7 @@ def BGM_v06(self): name="gpe_arky__gpe_arky", ) - gpe_proto__str_fsi = Projection( + gpe_proto__str_fsi = ann.Projection( pre=gpe_proto, post=str_fsi, target="gaba", @@ -2698,35 +2716,35 @@ def BGM_v06(self): name="gpe_proto__str_fsi", ) ### gpe arky output - gpe_arky__str_d1 = Projection( + gpe_arky__str_d1 = ann.Projection( pre=gpe_arky, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d1", ) - gpe_arky__str_d2 = Projection( + gpe_arky__str_d2 = ann.Projection( pre=gpe_arky, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_arky__str_d2", ) - gpe_arky__str_fsi = Projection( + gpe_arky__str_fsi = ann.Projection( pre=gpe_arky, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_arky__str_fsi", ) - gpe_arky__gpe_proto = Projection( + gpe_arky__gpe_proto = ann.Projection( pre=gpe_arky, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_arky__gpe_proto", ) - gpe_arky__gpe_cp = Projection( + gpe_arky__gpe_cp = ann.Projection( pre=gpe_arky, post=gpe_cp, target="gaba", @@ -2734,42 +2752,42 @@ def BGM_v06(self): name="gpe_arky__gpe_cp", ) ### gpe cp output - gpe_cp__str_d1 = Projection( + gpe_cp__str_d1 = ann.Projection( pre=gpe_cp, post=str_d1, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d1", ) - gpe_cp__str_d2 = Projection( + gpe_cp__str_d2 = ann.Projection( pre=gpe_cp, post=str_d2, target="gaba", synapse=factor_synapse, name="gpe_cp__str_d2", ) - gpe_cp__str_fsi = Projection( + gpe_cp__str_fsi = ann.Projection( pre=gpe_cp, post=str_fsi, target="gaba", synapse=factor_synapse, name="gpe_cp__str_fsi", ) - gpe_cp__gpe_proto = Projection( + gpe_cp__gpe_proto = ann.Projection( pre=gpe_cp, post=gpe_proto, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_proto", ) - gpe_cp__gpe_arky = Projection( + gpe_cp__gpe_arky = ann.Projection( pre=gpe_cp, post=gpe_arky, target="gaba", synapse=factor_synapse, name="gpe_cp__gpe_arky", ) - gpe_cp__integrator_stop = Projection( + gpe_cp__integrator_stop = ann.Projection( pre=gpe_cp, post=integrator_stop, target="ampa", @@ -2777,32 +2795,32 @@ def BGM_v06(self): name="gpe_cp__integrator_stop", ) ### snr output - snr__thal = Projection( + snr__thal = ann.Projection( pre=snr, post=thal, target="gaba", synapse=factor_synapse, name="snr__thal" ) ### thal output - thal__integrator_go = Projection( + thal__integrator_go = ann.Projection( pre=thal, post=integrator_go, target="ampa", synapse=factor_synapse, name="thal__integrator_go", ) - thal__str_d1 = Projection( + thal__str_d1 = ann.Projection( pre=thal, post=str_d1, target="ampa", synapse=factor_synapse, name="thal__str_d1", ) - thal__str_d2 = Projection( + thal__str_d2 = ann.Projection( pre=thal, post=str_d2, target="ampa", synapse=factor_synapse, name="thal__str_d2", ) - thal__str_fsi = Projection( + thal__str_fsi = ann.Projection( pre=thal, post=str_fsi, target="ampa", diff --git a/src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py b/src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py index 222c0ac..ec32bd2 100644 --- a/src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py +++ b/src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py @@ -1,11 +1,11 @@ -from ..generate_model import CompNeuroModel +from CompNeuroPy.generate_model import CompNeuroModel from CompNeuroPy.neuron_models import ( HHneuronCorbit, HHneuronCorbitSyn, HHneuronBischop, HHneuronBischopSyn, ) -from ANNarchy import Population +from CompNeuroPy import ann class HHmodelBischop(CompNeuroModel): @@ -61,9 +61,11 @@ def __init__( def _bischop_2012_creation_function(self): if self.conductance_based_synapses: - Population(self.pop_size, neuron=HHneuronBischopSyn, name="HH_Bischop_syn") + ann.Population( + self.pop_size, neuron=HHneuronBischopSyn, name="HH_Bischop_syn" + ) else: - Population(self.pop_size, neuron=HHneuronBischop, name="HH_Bischop") + ann.Population(self.pop_size, neuron=HHneuronBischop, name="HH_Bischop") class HHmodelCorbit(CompNeuroModel): @@ -119,6 +121,8 @@ def __init__( def _model_creation_function(self): if self.conductance_based_synapses: - Population(self.pop_size, neuron=HHneuronCorbitSyn, name="HH_Corbit_syn") + ann.Population( + self.pop_size, neuron=HHneuronCorbitSyn, name="HH_Corbit_syn" + ) else: - Population(self.pop_size, neuron=HHneuronCorbit, name="HH_Corbit") + ann.Population(self.pop_size, neuron=HHneuronCorbit, name="HH_Corbit") diff --git a/src/CompNeuroPy/generate_model.py b/src/CompNeuroPy/generate_model.py index 3bf969e..3998684 100644 --- a/src/CompNeuroPy/generate_model.py +++ b/src/CompNeuroPy/generate_model.py @@ -1,7 +1,7 @@ from typing import Callable from CompNeuroPy import model_functions as mf from CompNeuroPy import analysis_functions as af -from ANNarchy import get_population, get_projection +from CompNeuroPy import ann import numpy as np import pandas as pd from typingchecker import check_types @@ -139,7 +139,7 @@ def _update_attribute_df_weights(self): Updates _attribute_df for the weights of all projections. """ for proj_name in self.projections: - values = get_projection(proj_name).w + values = ann.get_projection(proj_name).w self._update_attribute_df( compartment=proj_name, parameter_name="w", parameter_value=values ) @@ -283,9 +283,9 @@ def set_param(self, compartment, parameter_name, parameter_value): comp_in_proj = compartment in self.projections if comp_in_pop: - comp_obj = get_population(compartment) + comp_obj = ann.get_population(compartment) elif comp_in_proj: - comp_obj = get_projection(compartment) + comp_obj = ann.get_projection(compartment) else: assert ( comp_in_pop or comp_in_proj @@ -374,12 +374,12 @@ def _get_attribute_df(self): ### fill paramter dict with population attributes for pop in self.populations: - for attribute in vars(get_population(pop))["attributes"]: + for attribute in vars(ann.get_population(pop))["attributes"]: ### store min and max of attribute ### create numpy array with getattr to use numpy min max function values = np.array( - [getattr(get_population(pop), attribute)] - + [getattr(get_population(pop), attribute)] + [getattr(ann.get_population(pop), attribute)] + + [getattr(ann.get_population(pop), attribute)] ) attribute_dict["compartment_type"].append("population") attribute_dict["compartment_name"].append(pop) @@ -392,12 +392,12 @@ def _get_attribute_df(self): ### fill paramter dict with projection attributes for proj in self.projections: - for attribute in vars(get_projection(proj))["attributes"]: + for attribute in vars(ann.get_projection(proj))["attributes"]: ### store min and max of attribute ### create numpy array with getattr to use numpy min max function values = np.array( - [getattr(get_projection(proj), attribute)] - + [getattr(get_projection(proj), attribute)] + [getattr(ann.get_projection(proj), attribute)] + + [getattr(ann.get_projection(proj), attribute)] ) attribute_dict["compartment_type"].append("projection") attribute_dict["compartment_name"].append(proj) diff --git a/src/CompNeuroPy/generate_simulation.py b/src/CompNeuroPy/generate_simulation.py index a420312..e217711 100644 --- a/src/CompNeuroPy/generate_simulation.py +++ b/src/CompNeuroPy/generate_simulation.py @@ -1,4 +1,4 @@ -from ANNarchy import get_time +from CompNeuroPy import ann from CompNeuroPy import extra_functions as ef from CompNeuroPy.monitors import CompNeuroMonitors import numpy as np @@ -117,12 +117,12 @@ def run(self, simulation_kwargs: dict | None = None): self.monitor_chunk.append(self.monitor_object.current_chunk()) ### run the simulation, store start and end simulation time - self.start.append(get_time()) + self.start.append(ann.get_time()) if tmp_kwargs is not None: self.info.append(self.simulation_function(**tmp_kwargs)) else: self.info.append(self.simulation_function()) - self.end.append(get_time()) + self.end.append(ann.get_time()) def _nr_simulations(self): """ diff --git a/src/CompNeuroPy/model_functions.py b/src/CompNeuroPy/model_functions.py index df2f3c3..4cbfab9 100644 --- a/src/CompNeuroPy/model_functions.py +++ b/src/CompNeuroPy/model_functions.py @@ -1,18 +1,12 @@ -from ANNarchy import ( - compile, - populations, - projections, - clear, -) -from ANNarchy import __version__ as ANNarchy_version +from CompNeuroPy import ann import os from CompNeuroPy import system_functions as sf from CompNeuroPy.generate_model import CompNeuroModel -if ANNarchy_version >= "4.8": - from ANNarchy.intern.NetworkManager import NetworkManager +if ann.__version__ >= "4.8": + from CompNeuroPy import ann_NetworkManager as NetworkManager else: - from ANNarchy.core import Global + from CompNeuroPy import ann_Global as Global def compile_in_folder(folder_name, net=None, clean=False, silent=False): @@ -33,7 +27,7 @@ def compile_in_folder(folder_name, net=None, clean=False, silent=False): """ sf.create_dir("annarchy_folders/" + folder_name, print_info=False) if isinstance(net, type(None)): - compile("annarchy_folders/" + folder_name, clean=clean, silent=silent) + ann.compile("annarchy_folders/" + folder_name, clean=clean, silent=silent) else: net.compile("annarchy_folders/" + folder_name, clean=clean, silent=silent) if os.getcwd().split("/")[-1] == "annarchy_folders": @@ -48,7 +42,7 @@ def annarchy_compiled(net_id=0): net_id (int, optional): Network ID. Default: 0. """ - if ANNarchy_version >= "4.8": + if ann.__version__ >= "4.8": return NetworkManager().is_compiled(net_id) else: return Global._network[net_id]["compiled"] @@ -64,8 +58,8 @@ def get_full_model(): population and projection names, respectively. """ return { - "populations": [pop.name for pop in populations()], - "projections": [proj.name for proj in projections()], + "populations": [pop.name for pop in ann.populations()], + "projections": [proj.name for proj in ann.projections()], } @@ -83,7 +77,9 @@ def cnp_clear(functions=True, neurons=True, synapses=True, constants=True): constants (bool, optional): If True, all constants are cleared. Default: True. """ - clear(functions=functions, neurons=neurons, synapses=synapses, constants=constants) + ann.clear( + functions=functions, neurons=neurons, synapses=synapses, constants=constants + ) for model_name in CompNeuroModel._initialized_models.keys(): CompNeuroModel._initialized_models[model_name] = False for model_name in CompNeuroModel._compiled_models.keys(): @@ -115,14 +111,14 @@ def _get_all_attributes(compartment_list: list[str]): ): raise ValueError(f"Compartment {compartment} not found in model") ### get attributes of populations - for pop in populations(): + for pop in ann.populations(): if pop.name not in compartment_list: continue attributes["populations"][pop.name] = { param_name: getattr(pop, param_name) for param_name in pop.attributes } ### get attributes of projections - for proj in projections(): + for proj in ann.projections(): if proj.name not in compartment_list: continue attributes["projections"][proj.name] = { @@ -143,7 +139,7 @@ def _set_all_attributes(attributes: dict, parameters: bool): If True, set parameters and variables, else only set variables. """ ### set attributes of populations - for pop in populations(): + for pop in ann.populations(): ### skip populations which are not in attributes if pop.name not in attributes["populations"].keys(): continue @@ -153,7 +149,7 @@ def _set_all_attributes(attributes: dict, parameters: bool): continue setattr(pop, param_name, param_value) ### set attributes of projections - for proj in projections(): + for proj in ann.projections(): ### skip projections which are not in attributes if proj.name not in attributes["projections"].keys(): continue @@ -177,11 +173,11 @@ def _get_all_parameters(): "populations": {}, "projections": {}, } - for pop in populations(): + for pop in ann.populations(): parameters["populations"][pop.name] = { param_name: getattr(pop, param_name) for param_name in pop.parameters } - for proj in projections(): + for proj in ann.projections(): parameters["projections"][proj.name] = { param_name: getattr(proj, param_name) for param_name in proj.parameters } @@ -197,9 +193,9 @@ def _set_all_parameters(parameters): Dictionary with keys "populations" and "projections" and values dicts of parameters of populations and projections, respectively. """ - for pop in populations(): + for pop in ann.populations(): for param_name, param_value in parameters["populations"][pop.name].items(): setattr(pop, param_name, param_value) - for proj in projections(): + for proj in ann.projections(): for param_name, param_value in parameters["projections"][proj.name].items(): setattr(proj, param_name, param_value) diff --git a/src/CompNeuroPy/monitors.py b/src/CompNeuroPy/monitors.py index d28504c..729a937 100644 --- a/src/CompNeuroPy/monitors.py +++ b/src/CompNeuroPy/monitors.py @@ -1,15 +1,6 @@ from CompNeuroPy import analysis_functions as af from CompNeuroPy import model_functions as mf -from ANNarchy import ( - get_time, - reset, - dt, - Monitor, - get_population, - get_projection, - populations, - projections, -) +from CompNeuroPy import ann import numpy as np from typingchecker import check_types from scipy.interpolate import interp1d @@ -168,7 +159,7 @@ def reset( self.timings[key]["start"].append(0) else: self.timings[key]["start"].append( - np.round(get_time(), af.get_number_of_decimals(dt())) + np.round(ann.get_time(), af.get_number_of_decimals(ann.dt())) ) ### reset model @@ -177,7 +168,7 @@ def reset( ### if parameters=False, get parameters before reset and set them after ### reset parameters_dict = mf._get_all_parameters() - reset(populations, projections, synapses, monitors, net_id=net_id) + ann.reset(populations, projections, synapses, monitors, net_id=net_id) if parameters is False: ### if parameters=False, set parameters after reset mf._set_all_parameters(parameters_dict) @@ -342,7 +333,7 @@ def _get_temp_timings(self): self.timings[compartment]["stop"] ): ### was started/resumed but never stoped after --> use current time for stop time - self.timings[compartment]["stop"].append(get_time()) + self.timings[compartment]["stop"].append(ann.get_time()) ### calculate the idx of the recorded arrays which correspond to the timings and remove 'currently_paused' ### get for each start-stop pair the corrected start stop timings (when teh values were actually recorded, depends on period and timestep) ### and also get the number of recorded values for start-stop pair @@ -368,13 +359,13 @@ def _get_temp_timings(self): temp_timings[compartment] = { "start": { "ms": np.round( - start_time_arr, af.get_number_of_decimals(dt()) + start_time_arr, af.get_number_of_decimals(ann.dt()) ).tolist(), "idx": start_idx, }, "stop": { "ms": np.round( - stop_time_arr, af.get_number_of_decimals(dt()) + stop_time_arr, af.get_number_of_decimals(ann.dt()) ).tolist(), "idx": stop_idx, }, @@ -416,13 +407,13 @@ def _add_monitors(self, mon_dict: dict): ) ### check if compartment is pop if compartmentType == "pop": - mon[compartment] = Monitor( - get_population(compartment), val, start=False, period=period + mon[compartment] = ann.Monitor( + ann.get_population(compartment), val, start=False, period=period ) ### check if compartment is proj if compartmentType == "proj": - mon[compartment] = Monitor( - get_projection(compartment), val, start=False, period=period + mon[compartment] = ann.Monitor( + ann.get_projection(compartment), val, start=False, period=period ) return mon @@ -474,7 +465,7 @@ def _start_monitors(self, compartment_list, mon, timings=None): if len(timings[compartment_name]["start"]) <= len( timings[compartment_name]["stop"] ): - timings[compartment_name]["start"].append(get_time()) + timings[compartment_name]["start"].append(ann.get_time()) return timings def _pause_monitors(self, compartment_list, mon, timings=None): @@ -510,7 +501,7 @@ def _pause_monitors(self, compartment_list, mon, timings=None): timings[key]["currently_paused"] = True ### never make pause longer than start, this can be caused if pause is called multiple times without start in between if len(timings[key]["stop"]) < len(timings[key]["start"]): - timings[key]["stop"].append(get_time()) + timings[key]["stop"].append(ann.get_time()) ### if pause is directly called after start --> start == stop --> remove these entries, this is no actual period if ( len(timings[key]["stop"]) == len(timings[key]["start"]) @@ -544,14 +535,14 @@ def _get_monitors(self, mon_dict, mon): compartment_type, compartment, period = self._unpack_mon_dict_keys(key) recordings[f"{compartment};period"] = period if compartment_type == "pop": - pop = get_population(compartment) + pop = ann.get_population(compartment) parameter_dict = { param_name: getattr(pop, param_name) for param_name in pop.parameters } recordings[f"{compartment};parameter_dict"] = parameter_dict if compartment_type == "proj": - proj = get_projection(compartment) + proj = ann.get_projection(compartment) parameter_dict = { param_name: getattr(proj, param_name) for param_name in proj.parameters @@ -560,7 +551,7 @@ def _get_monitors(self, mon_dict, mon): for val_val in val: temp = mon[compartment].get(val_val) recordings[f"{compartment};{val_val}"] = temp - recordings["dt"] = dt() + recordings["dt"] = ann.dt() return recordings def _unpack_mon_dict_keys(self, s: str, warning: bool = False): @@ -590,8 +581,8 @@ def _unpack_mon_dict_keys(self, s: str, warning: bool = False): compartment_name = splitted_s[0] ### get type - pop_list = [pop.name for pop in populations()] - proj_list = [proj.name for proj in projections()] + pop_list = [pop.name for pop in ann.populations()] + proj_list = [proj.name for proj in ann.projections()] if compartment_name in pop_list and compartment_name in proj_list: ### raise error because name is in both lists print( @@ -607,13 +598,13 @@ def _unpack_mon_dict_keys(self, s: str, warning: bool = False): if len(splitted_s) == 2: period = float(splitted_s[1]) else: - period = {"pop": dt(), "proj": dt() * 1000}[compartment_type] + period = {"pop": ann.dt(), "proj": ann.dt() * 1000}[compartment_type] ### print warning for compartment_type proj if compartment_type == "proj" and warning: print( f"WARNING CompNeuroMonitors: no period provided for projection {compartment_name}, period set to {period} ms" ) - period = round(period / dt()) * dt() + period = round(period / ann.dt()) * ann.dt() return compartment_type, compartment_name, period diff --git a/src/CompNeuroPy/neuron_models/experimental_models/fit_Bogacz_nm.py b/src/CompNeuroPy/neuron_models/experimental_models/fit_Bogacz_nm.py index e56ea21..49575e8 100644 --- a/src/CompNeuroPy/neuron_models/experimental_models/fit_Bogacz_nm.py +++ b/src/CompNeuroPy/neuron_models/experimental_models/fit_Bogacz_nm.py @@ -1,6 +1,6 @@ -from ANNarchy import Neuron +from CompNeuroPy import ann -_fit_Bogacz = Neuron( +_fit_Bogacz = ann.Neuron( parameters=""" a = 0 : population b = 0 : population @@ -39,7 +39,7 @@ ) -_fit_Bogacz_2 = Neuron( +_fit_Bogacz_2 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # pS * mV**-1 diff --git a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py index e13303a..418e2ab 100644 --- a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py +++ b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py @@ -1,10 +1,10 @@ -from ANNarchy import Neuron +from CompNeuroPy import ann ### Fit neuron model for FSI neurons from Corbit et al. (2016) ### Corbit, V. L., Whalen, T. C., Zitelli, K. T., Crilly, S. Y., Rubin, J. E., & Gittis, A. H. (2016). Pallidostriatal Projections Promote β Oscillations in a Dopamine-Depleted Biophysical Network Model. Journal of Neuroscience, 36(20), 5556–5571. https://doi.org/10.1523/JNEUROSCI.0339-16.2016 -_Izhikevich2007_Corbit = Neuron( +_Izhikevich2007_Corbit = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -36,7 +36,7 @@ ) -_Izhikevich2007_Corbit2 = Neuron( +_Izhikevich2007_Corbit2 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -66,7 +66,7 @@ ) -_Izhikevich2007_Corbit3 = Neuron( +_Izhikevich2007_Corbit3 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -100,7 +100,7 @@ ) -_Izhikevich2007_Corbit4 = Neuron( +_Izhikevich2007_Corbit4 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -129,7 +129,7 @@ ) -_Izhikevich2007_Corbit5 = Neuron( +_Izhikevich2007_Corbit5 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -164,7 +164,7 @@ ) -_Izhikevich2007_Corbit6 = Neuron( +_Izhikevich2007_Corbit6 = ann.Neuron( parameters=""" C = 0 : population # ATTENTION! H&H model is myF/cm^2 --> here also myF/cm^2 and not pF --> current also myA/cm^2 and not pA k = 0 : population # @@ -194,7 +194,7 @@ ) -_Izhikevich2007_Corbit7 = Neuron( +_Izhikevich2007_Corbit7 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -222,7 +222,7 @@ ) -_Izhikevich2007_Corbit8 = Neuron( +_Izhikevich2007_Corbit8 = ann.Neuron( parameters=""" C = 0 : population # ATTENTION! H&H model is myF/cm^2 --> here also myF/cm^2 and not pF --> current also myF/cm^2 and not pA k = 0 : population # @@ -256,7 +256,7 @@ ) -_Izhikevich2007_Corbit9 = Neuron( +_Izhikevich2007_Corbit9 = ann.Neuron( parameters=""" C = 0 : population # ATTENTION! H&H model is myF/cm^2 --> here also myF/cm^2 and not pF --> current also myF/cm^2 and not pA k = 0 : population # @@ -287,7 +287,7 @@ description="Based on Izhikevich2007 adjusted to fit Corbit FSI neuron model. Adjusted version should be able to produce late spiking and non-linear f-I curve. Combination of Corbit6 and Corbit8 without parameter d, but x ", ) -_Izhikevich2007_Corbit10 = Neuron( +_Izhikevich2007_Corbit10 = ann.Neuron( parameters=""" ### base parameters C = 1.083990339534819 : population # yF/cm^2 @@ -334,7 +334,7 @@ name="_Izhikevich2007_Corbit10", description="Simple neuron model equations from Izhikevich (2007) adjusted version to fit the striatal FSI neuron model from Corbit et al. (2016) should be able to produce late spiking.", ) -_Izhikevich2007_Corbit11 = Neuron( +_Izhikevich2007_Corbit11 = ann.Neuron( parameters=""" ### base parameters C = 0 @@ -384,7 +384,7 @@ description="Simple neuron model equations from Izhikevich (2007) adjusted version to fit the striatal FSI neuron model from Corbit et al. (2016) should be able to produce late spiking.", ) -_Izhikevich2007_Corbit12 = Neuron( +_Izhikevich2007_Corbit12 = ann.Neuron( parameters=""" ### base parameters C = 0 @@ -433,7 +433,7 @@ description="Simple neuron model equations from Izhikevich (2007) adjusted version to fit the striatal FSI neuron model from Corbit et al. (2016) should be able to produce late spiking.", ) -_Izhikevich2007_Corbit13 = Neuron( +_Izhikevich2007_Corbit13 = ann.Neuron( parameters=""" ### base parameters C = 0 diff --git a/src/CompNeuroPy/neuron_models/experimental_models/fit_Hjorth_nm.py b/src/CompNeuroPy/neuron_models/experimental_models/fit_Hjorth_nm.py index d49ecb6..d9dc5a7 100644 --- a/src/CompNeuroPy/neuron_models/experimental_models/fit_Hjorth_nm.py +++ b/src/CompNeuroPy/neuron_models/experimental_models/fit_Hjorth_nm.py @@ -1,10 +1,10 @@ -from ANNarchy import Neuron +from CompNeuroPy import ann ### Fit neuron model for ChIN based on electrophysiological recordings from Hjorth et al. (2020) ### Hjorth, J. J. J., Kozlov, A., Carannante, I., Frost Nylén, J., Lindroos, R., Johansson, Y., Tokarska, A., Dorst, M. C., Suryanarayana, S. M., Silberberg, G., Hellgren Kotaleski, J., & Grillner, S. (2020). The microcircuits of striatum in silico. Proceedings of the National Academy of Sciences, 117(17), 9554–9565. https://doi.org/10.1073/pnas.2000671117 -_Izhikevich2007_Hjorth_2020_ChIN1 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN1 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -36,7 +36,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN2 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN2 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -70,7 +70,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN3 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN3 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -109,7 +109,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN4 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN4 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -144,7 +144,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN5 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN5 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -179,7 +179,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN6 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN6 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -219,7 +219,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN7 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN7 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -257,7 +257,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN8 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN8 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -299,7 +299,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN9 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN9 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # @@ -341,7 +341,7 @@ ) -_Izhikevich2007_Hjorth_2020_ChIN10 = Neuron( +_Izhikevich2007_Hjorth_2020_ChIN10 = ann.Neuron( parameters=""" C = 0 : population # pF k = 0 : population # diff --git a/src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py index 0df3ea9..580c280 100644 --- a/src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py @@ -1,4 +1,4 @@ -from ANNarchy import Neuron +from CompNeuroPy import ann ### Hodgkin Huxley like neuron models @@ -273,7 +273,7 @@ def __init__(self) -> None: """ -class _HHneuron(Neuron): +class _HHneuron(ann.Neuron): """ PREDEFINED diff --git a/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py b/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py index 9766a69..ea69875 100644 --- a/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py @@ -1,10 +1,9 @@ -from ANNarchy import Neuron - +from CompNeuroPy import ann ### artificial neuron models -class IntegratorNeuron(Neuron): +class IntegratorNeuron(ann.Neuron): """ TEMPLATE @@ -96,7 +95,7 @@ def __init__(self, tau: float = 1, threshold: float = 1): self._instantiated.append(True) -class IntegratorNeuronSimple(Neuron): +class IntegratorNeuronSimple(ann.Neuron): """ TEMPLATE @@ -177,7 +176,7 @@ def __init__(self, tau: float = 1): self._instantiated.append(True) -class PoissonNeuron(Neuron): +class PoissonNeuron(ann.Neuron): """ TEMPLATE @@ -224,7 +223,7 @@ def __init__(self, rates: float = 0): self._instantiated.append(True) -class PoissonNeuronUpDown(Neuron): +class PoissonNeuronUpDown(ann.Neuron): """ TEMPLATE @@ -276,7 +275,7 @@ def __init__(self, rates: float = 0, tau_up: float = 1, tau_down: float = 1): self._instantiated.append(True) -class PoissonNeuronSin(Neuron): +class PoissonNeuronSin(ann.Neuron): """ TEMPLATE diff --git a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py index 2647fa9..3e4e51a 100644 --- a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py @@ -1,10 +1,10 @@ -from ANNarchy import Neuron +from CompNeuroPy import ann ### Izhikevich (2003)-like neuron model templates ### based on: Izhikevich, E. M. (2003). Simple model of spiking neurons. IEEE Transactions on Neural Networks, 14(6), 1569–1572. https://doi.org/10.1109/TNN.2003.820440 -class Izhikevich2003FixedNoisyAmpa(Neuron): +class Izhikevich2003FixedNoisyAmpa(ann.Neuron): """ TEMPLATE @@ -104,7 +104,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2003NoisyAmpa(Neuron): +class Izhikevich2003NoisyAmpa(ann.Neuron): """ TEMPLATE @@ -215,7 +215,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2003NoisyAmpaNonlin(Neuron): +class Izhikevich2003NoisyAmpaNonlin(ann.Neuron): """ TEMPLATE @@ -336,7 +336,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2003NoisyAmpaOscillating(Neuron): +class Izhikevich2003NoisyAmpaOscillating(ann.Neuron): """ TEMPLATE @@ -458,7 +458,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2003NoisyBaseSNR(Neuron): +class Izhikevich2003NoisyBaseSNR(ann.Neuron): """ TEMPLATE @@ -603,7 +603,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2003NoisyBase(Neuron): +class Izhikevich2003NoisyBase(ann.Neuron): """ TEMPLATE @@ -724,7 +724,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2003NoisyBaseNonlin(Neuron): +class Izhikevich2003NoisyBaseNonlin(ann.Neuron): """ TEMPLATE diff --git a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py index b495c00..6aae3e2 100644 --- a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py @@ -1,4 +1,4 @@ -from ANNarchy import Neuron +from CompNeuroPy import ann import re ### Izhikevich (2007)-like neuron model templates @@ -93,7 +93,7 @@ def _set_init(equations, init): ############################################################################################ -class Izhikevich2007(Neuron): +class Izhikevich2007(ann.Neuron): """ TEMPLATE @@ -187,7 +187,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007RecCur(Neuron): +class Izhikevich2007RecCur(ann.Neuron): """ TEMPLATE @@ -293,7 +293,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007VoltageClamp(Neuron): +class Izhikevich2007VoltageClamp(ann.Neuron): """ TEMPLATE @@ -394,7 +394,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007Syn(Neuron): +class Izhikevich2007Syn(ann.Neuron): """ TEMPLATE @@ -512,7 +512,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007NoisyAmpa(Neuron): +class Izhikevich2007NoisyAmpa(ann.Neuron): """ TEMPLATE @@ -640,7 +640,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007NoisyBase(Neuron): +class Izhikevich2007NoisyBase(ann.Neuron): """ TEMPLATE @@ -774,7 +774,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007FsiNoisyAmpa(Neuron): +class Izhikevich2007FsiNoisyAmpa(ann.Neuron): """ TEMPLATE @@ -908,7 +908,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007CorbitFsiNoisyAmpa(Neuron): +class Izhikevich2007CorbitFsiNoisyAmpa(ann.Neuron): """ TEMPLATE @@ -1066,7 +1066,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007CorbitFsiNoisyBase(Neuron): +class Izhikevich2007CorbitFsiNoisyBase(ann.Neuron): """ TEMPLATE @@ -1232,7 +1232,7 @@ def __init__( self._instantiated.append(True) -class Izhikevich2007NoisyAmpaOscillating(Neuron): +class Izhikevich2007NoisyAmpaOscillating(ann.Neuron): """ TEMPLATE @@ -1372,7 +1372,7 @@ def __init__( self._instantiated.append(True) -class IzhikevichGolomb(Neuron): +class IzhikevichGolomb(ann.Neuron): """ PREDEFINED diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 8100ded..b5e7a51 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -1,4 +1,4 @@ -from ANNarchy import setup, Population, get_population, Neuron +from CompNeuroPy import ann import numpy as np import traceback from CompNeuroPy import system_functions as sf @@ -54,9 +54,9 @@ def __init__( experiment: Type[CompNeuroExp], get_loss_function: Callable[[Any, Any], float | list[float]], variables_bounds: dict[str, float | str | list[float | str]], - neuron_model: Neuron, + neuron_model: ann.Neuron, results_soll: Any | None = None, - target_neuron_model: Neuron | None = None, + target_neuron_model: ann.Neuron | None = None, time_step: float = 1.0, recording_period: float | None = None, compile_folder_name: str = "annarchy_OptNeuron", @@ -202,7 +202,7 @@ def __init__( self._check_neuron_models() ### setup ANNarchy - setup(dt=time_step) + ann.setup(dt=time_step) ### create and compile model ### if neuron models and target neuron model --> create both models then @@ -468,9 +468,9 @@ def _check_neuron_models(self): """ Checks if the neuron models are ANNarchy neuron models. """ - if not (isinstance(self.neuron_model, type(Neuron()))) or ( + if not (isinstance(self.neuron_model, type(ann.Neuron()))) or ( self.target_neuron is not None - and not (isinstance(self.target_neuron, type(Neuron()))) + and not (isinstance(self.target_neuron, type(ann.Neuron()))) ): print( "OptNeuron: Error: neuron_model and/or target_neuron_model have to be ANNarchy neuron models" @@ -713,7 +713,7 @@ def _raw_neuron(self, neuron, name, size): size (int): The number of neurons in the population. """ - Population(size, neuron=neuron, name=name) + ann.Population(size, neuron=neuron, name=name) def _test_variables(self): """ @@ -728,7 +728,7 @@ def _test_variables(self): ] ).tolist() ### check if pop has these parameters - pop_parameter_names = get_population(self.pop).attributes.copy() + pop_parameter_names = ann.get_population(self.pop).attributes.copy() for name in pop_parameter_names.copy(): if name in all_vars_names: all_vars_names.remove(name) @@ -1135,13 +1135,13 @@ def _set_fitting_parameters( ### set parameters for param_name, param_val in all_variables_dict.items(): - pop_parameter_names = get_population(pop).attributes + pop_parameter_names = ann.get_population(pop).attributes ### only if param_name in parameter attributes if param_name in pop_parameter_names: if self.popsize == 1: - setattr(get_population(pop), param_name, param_val[0]) + setattr(ann.get_population(pop), param_name, param_val[0]) else: - setattr(get_population(pop), param_name, param_val) + setattr(ann.get_population(pop), param_name, param_val) def _test_fit(self, fitparams_dict): """ diff --git a/src/CompNeuroPy/simulation_functions.py b/src/CompNeuroPy/simulation_functions.py index 96c0546..0e7da4f 100644 --- a/src/CompNeuroPy/simulation_functions.py +++ b/src/CompNeuroPy/simulation_functions.py @@ -1,11 +1,4 @@ -from ANNarchy import ( - simulate, - get_population, - dt, - simulate_until, - get_current_step, - get_time, -) +from CompNeuroPy import ann from CompNeuroPy import analysis_functions as af import numpy as np from typing import Callable @@ -33,22 +26,24 @@ def attr_sim(pop: str, attr_dict, t=500): ### save prev attr v_prev_dict = { - attr: getattr(get_population(pop), attr) for attr in attr_dict.keys() + attr: getattr(ann.get_population(pop), attr) for attr in attr_dict.keys() } ### set attributes for attr, v in attr_dict.items(): - setattr(get_population(pop), attr, v) + setattr(ann.get_population(pop), attr, v) ### simulate - simulate(t) + ann.simulate(t) ### reset attributes to previous values for attr, v in v_prev_dict.items(): - setattr(get_population(pop), attr, v) + setattr(ann.get_population(pop), attr, v) ### return the values for the attribute for each time step - attr_list_dict = {attr: [v] * int(round(t / dt())) for attr, v in attr_dict.items()} + attr_list_dict = { + attr: [v] * int(round(t / ann.dt())) for attr, v in attr_dict.items() + } return attr_list_dict @@ -125,12 +120,12 @@ def attr_ramp(pop: str, attr, v0, v1, dur, n): simulation time step without remainder """ - if (dur / n) / dt() % 1 != 0: + if (dur / n) / ann.dt() % 1 != 0: raise ValueError( "ERROR current_ramp: dur/n should result in a duration (for a single stimulation) which is divisible by the simulation time step (without remainder)\ncurrent duration = " + str(dur / n) + ", timestep = " - + str(dt()) + + str(ann.dt()) + "!\n" ) @@ -478,7 +473,7 @@ def run(self): ### (otherwise run would need to be called at time 0) for event in self.event_list: if event.onset is not None: - event.onset = get_current_step() + event.onset + event.onset = ann.get_current_step() + event.onset ### check if there are events which have no onset and are not triggered by other ### events and have no model_trigger --> they would never start @@ -493,7 +488,7 @@ def run(self): and event.model_trigger is None and event.name not in triggered_events ): - event.onset = get_current_step() + event.onset = ann.get_current_step() if self.verbose: print(event.name, "set onset to start of run") @@ -519,22 +514,23 @@ def run(self): print("check_triggers:", self.model_trigger_list) if len(self.model_trigger_list) > 1: ### multiple model triggers - simulate_until( + ann.simulate_until( max_duration=next_events_time, population=[ - get_population(pop_name) for pop_name in self.model_trigger_list + ann.get_population(pop_name) + for pop_name in self.model_trigger_list ], operator="or", ) elif len(self.model_trigger_list) > 0: ### a single model trigger - simulate_until( + ann.simulate_until( max_duration=next_events_time, - population=get_population(self.model_trigger_list[0]), + population=ann.get_population(self.model_trigger_list[0]), ) else: ### no model_triggers - simulate(next_events_time) + ann.simulate(next_events_time) ### after run finishes initialize again self._initialize() @@ -554,7 +550,7 @@ def _run_current_events(self): event_run = False for event in self.event_list: if ( - event.onset == get_current_step() + event.onset == ann.get_current_step() and not (event.name in self.run_event_list) and event._check_requirements() ): @@ -571,7 +567,7 @@ def _run_model_trigger_events(self): ### loop to check if model trigger got active for model_trigger in self.model_trigger_list: ### TODO this is not generalized yet, only works if the model_trigger populations have the variable decision which is set to -1 if the model trigger is active - if int(get_population(model_trigger).decision[0]) == -1: + if int(ann.get_population(model_trigger).decision[0]) == -1: ### -1 means got active ### find the events triggerd by the model_trigger and run them for event in self.event_list: @@ -598,12 +594,12 @@ def _get_next_events_time(self): continue ### check if onset in the future and nearest if ( - event.onset > get_current_step() - and (event.onset - get_current_step()) < next_event_time + event.onset > ann.get_current_step() + and (event.onset - ann.get_current_step()) < next_event_time ): - next_event_time = event.onset - get_current_step() + next_event_time = event.onset - ann.get_current_step() ### return difference (simulation duration until nearest next event) in ms, round to full timesteps - return round(next_event_time * dt(), af.get_number_of_decimals(dt())) + return round(next_event_time * ann.dt(), af.get_number_of_decimals(ann.dt())) def _get_model_trigger_list(self): """ @@ -686,10 +682,10 @@ def run(self): if self._check_requirements(): ### run the event if self.trial_procedure.verbose: - print("run event:", self.name, get_time()) + print("run event:", self.name, ann.get_time()) ### for events which are triggered by model --> set onset if self.onset == None: - self.onset = get_current_step() + self.onset = ann.get_current_step() ### run the effect if self.effect is not None: self.effect() diff --git a/src/CompNeuroPy/simulation_requirements.py b/src/CompNeuroPy/simulation_requirements.py index cb52a9b..eb79ab2 100644 --- a/src/CompNeuroPy/simulation_requirements.py +++ b/src/CompNeuroPy/simulation_requirements.py @@ -1,4 +1,4 @@ -from ANNarchy import get_population, Population +from CompNeuroPy import ann class ReqPopHasAttr: @@ -31,7 +31,7 @@ def run(self): """ for attr_name in self.attr_name_list: for pop_name in self.pop_name_list: - pop: Population = get_population(pop_name) + pop: ann.Population = ann.get_population(pop_name) if not (attr_name in pop.attributes): raise ValueError( "Population " diff --git a/src/CompNeuroPy/synapse_models/synapse_models.py b/src/CompNeuroPy/synapse_models/synapse_models.py index 094aea0..334608d 100644 --- a/src/CompNeuroPy/synapse_models/synapse_models.py +++ b/src/CompNeuroPy/synapse_models/synapse_models.py @@ -1,7 +1,7 @@ -from ANNarchy import Synapse +from CompNeuroPy import ann -class FactorSynapse(Synapse): +class FactorSynapse(ann.Synapse): """ Synapse which scales the transmitted value by a specified factor. Factor is equivalent to the connection weight if weight==1. From 6f9486bc6c63ea8c8857ba233907276c14df55e6 Mon Sep 17 00:00:00 2001 From: olimaol Date: Tue, 13 Aug 2024 15:00:36 +0200 Subject: [PATCH 16/21] model_configurator: continued restructure mean_shift, std_scale optimization --- .../examples/model_configurator/test2.py | 510 +++++++++++++----- .../test2_deap_opt_regress.py | 148 ++--- .../test2_deap_opt_transform.py | 89 +-- 3 files changed, 506 insertions(+), 241 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 811c5fa..582b9c4 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -19,6 +19,7 @@ from sklearn.pipeline import make_pipeline from sklearn.metrics import mean_squared_error from tqdm import tqdm +import itertools def mean_shift_regression(n, p): @@ -144,83 +145,33 @@ def log_normal_1d(x, amp, mean, sig): deap_opt_regress_path = "test2_deap_opt_regress/" -def curve_fit_func( +def regression_func( X, - g0, - g1, - g2, - g3, - g4, - g5, - g6, - g7, - g8, - g9, - g10, - g11, - g12, - g13, - g14, - p0, - # p1, - # p2, - # p3, - # p4, - # p5, - # p6, - # p7, - # p8, - # p9, - # p10, - # p11, - # p12, - # p13, - # p14, - # p15, - # p16, - # p17, - # p18, - # p19, - # p20, + *args, ): - x0, x1 = X + """ + A 2D regression function. + + Args: + X (array): + The x (X[0]) and y (X[1]) coordinates. + *args: + The parameters of the regression function. + + Returns: + float: + The z(x,y) value(s) of the regression function. + """ + x, y = X ### 2D polynomial with certain degree return np.clip( - p0 - + gauss_1d( - ( - x0 - # p0 - # + p1 * x0 - # + p2 * x1 - # + p3 * x0**2 - # + p4 * x0 * x1 - # + p5 * x1**2 - # + p6 * x0**3 - # + p7 * x0**2 * x1 - # + p8 * x0 * x1**2 - # + p9 * x1**3 - # + p10 * x0**4 - # + p11 * x0**3 * x1 - # + p12 * x0**2 * x1**2 - # + p13 * x0 * x1**3 - # + p14 * x1**4 - # + p15 * x0**5 - # + p16 * x0**4 * x1 - # + p17 * x0**3 * x1**2 - # + p18 * x0**2 * x1**3 - # + p19 * x0 * x1**4 - # + p20 * x1**5 - ), - amp=g0, - mean=g1, - sig=g2, - ) - + gauss_1d(x1, amp=g3, mean=g4, sig=g5) - + gauss_1d(x0 * x1, amp=g6, mean=g7, sig=g8) - + gauss_1d(x0**2 * x1, amp=g9, mean=g10, sig=g11) - + gauss_1d(x0 * x1**2, amp=g12, mean=g13, sig=g14), + args[0] + + gauss_1d(x, amp=args[1], mean=args[2], sig=args[3]) + + gauss_1d(y, amp=args[4], mean=args[5], sig=args[6]) + + gauss_1d(x * y, amp=args[7], mean=args[8], sig=args[9]) + + gauss_1d(x**2 * y, amp=args[10], mean=args[11], sig=args[12]) + + gauss_1d(x * y**2, amp=args[13], mean=args[14], sig=args[15]), -1e20, 1e20, ) @@ -503,7 +454,28 @@ def get_difference_of_samples(binomial_sample, normal_sample, n): def difference_binomial_normal_optimize(n, p): - print(f"Optimize for n={n}, p={p}") + """ + Calculate the difference between samples of a binomial and a normal distribution. + The binomial distribution is generated with parameters n and p. + The normal distribution is generated to best approximate the binomial distribution. + Further the normal distribution is shifted by mean_shift and scaled by std_scale. + Both are optimized to minimize the difference between the binomial and normal + distribution. + + Args: + n (int): + The number of trials of the binomial distribution. + p (float): + The probability of success of the binomial distribution. + + Returns: + mean_shift_opt (float): + The shift of the mean of the normal distribution. + std_scale_opt (float): + The scaling of the standard deviation of the normal distribution. + error_opt (float): + The difference between the binomial and normal distribution. + """ ### save p and n to be availyble in optimization script save_variables( variable_list=[p, n], @@ -511,7 +483,7 @@ def difference_binomial_normal_optimize(n, p): path=OPTIMIZE_FOLDER, ) ### run optimization - args_list = [[f"{parallel_id}"] for parallel_id in range(N_RUNS)] + args_list = [[f"{parallel_id}"] for parallel_id in range(N_RUNS_OPT_PER_PAIR)] run_script_parallel( script_path="test2_deap_opt_transform.py", n_jobs=N_JOBS, @@ -521,7 +493,7 @@ def difference_binomial_normal_optimize(n, p): ### fitness best_fitness = 1e6 best_parallel_id = 0 - for parallel_id in range(N_RUNS): + for parallel_id in range(N_RUNS_OPT_PER_PAIR): loaded_variables = load_variables( name_list=[ f"error_opt_{parallel_id}", @@ -529,7 +501,6 @@ def difference_binomial_normal_optimize(n, p): path=OPTIMIZE_FOLDER, ) error_opt = loaded_variables[f"error_opt_{parallel_id}"] - print(f"n={n}, p={p}, error_opt_{parallel_id}: {error_opt}") if error_opt < best_fitness: best_fitness = error_opt best_parallel_id = parallel_id @@ -549,6 +520,46 @@ def difference_binomial_normal_optimize(n, p): return mean_shift_opt, std_scale_opt, error_opt +def difference_binomial_normal_regress(n, p): + """ + Calculate the difference between samples of a binomial and a normal distribution. + The binomial distribution is generated with parameters n and p. + The normal distribution is generated to best approximate the binomial distribution. + Further the normal distribution is shifted by mean_shift and scaled by std_scale. + Both are obtained from the regression of the optimized mean_shift and std_scale. + + Args: + n (int): + The number of trials of the binomial distribution. + p (float): + The probability of success of the binomial distribution. + + Returns: + mean_shift_regress (float): + The shift of the mean of the normal distribution. + std_scale_regress (float): + The scaling of the standard deviation of the normal distribution. + error_regress (float): + The difference between the binomial and normal distribution. + """ + ### load parameters for regression + loaded_variables = load_variables( + name_list=[ + "popt_mean_shift", + "popt_std_scale", + ], + path=REGRESS_FOLDER, + ) + + mean_shift_regress = regression_func((n, p), *loaded_variables["popt_mean_shift"]) + std_scale_regress = regression_func((n, p), *loaded_variables["popt_std_scale"]) + error_regress = difference_binomial_normal( + mean_shift=mean_shift_regress, std_scale=std_scale_regress, n=n, p=p + ) + + return mean_shift_regress, std_scale_regress, error_regress + + def difference_binomial_normal(mean_shift, std_scale, n, p): """ Calculate the difference between samples of a binomial and a normal distribution. @@ -565,6 +576,10 @@ def difference_binomial_normal(mean_shift, std_scale, n, p): The number of trials of the binomial distribution. p (float): The probability of success of the binomial distribution. + + Returns: + diff (float): + The difference between the binomial and normal distribution. """ # Generate data samples binomial_sample, normal_sample = generate_samples( @@ -607,29 +622,48 @@ def logarithmic_arange(start, end, num_points): return points -def plot_optimize(): +def plot_with_transformation(mode: str): """ Plot the difference between the binomial and normal distribution for various n and p values. Further plots the optimized mean_shift and std_scale values. Load the data from the OPTIMIZE_FOLDER and save the plot to the PLOTS_FOLDER. + + Args: + mode (str): + Either 'opt' or 'regress' """ + if mode not in ["opt", "regress"]: + raise ValueError("Mode must be either 'opt' or 'regress'.") + ### load the data loaded_variables = load_variables( name_list=[ "p_list", "n_list", - "mean_shift_opt_list", - "std_scale_opt_list", - "diff_opt_list", + f"mean_shift_{mode}_list", + f"std_scale_{mode}_list", + f"diff_{mode}_list", ], - path=OPTIMIZE_FOLDER, + path=OPTIMIZE_FOLDER if mode == "opt" else REGRESS_FOLDER, ) ### plot the data plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 3)) for idx, title, key in [ - (1, "Mean Shift optimized", "mean_shift_opt_list"), - (2, "Std Scale optimized", "std_scale_opt_list"), - (3, "Difference optimized", "diff_opt_list"), + ( + 1, + "Mean Shift optimized" if mode == "opt" else "Mean Shift regressed", + f"mean_shift_{mode}_list", + ), + ( + 2, + "Std Scale optimized" if mode == "opt" else "Std Scale regressed", + f"std_scale_{mode}_list", + ), + ( + 3, + "Difference optimized" if mode == "opt" else "Difference regressed", + f"diff_{mode}_list", + ), ]: plt.subplot(3, 1, idx) plot_2d_interpolated_image( @@ -645,7 +679,14 @@ def plot_optimize(): plt.title(f"{title}\n(max: {np.max(loaded_variables[key])})") plt.tight_layout() create_dir(PLOTS_FOLDER) - plt.savefig(f"{PLOTS_FOLDER}/difference_optimized.png", dpi=300) + plt.savefig( + ( + f"{PLOTS_FOLDER}/difference_optimized.png" + if mode == "opt" + else f"{PLOTS_FOLDER}/difference_regressed.png" + ), + dpi=300, + ) def plot_compare_original(): @@ -682,25 +723,17 @@ def plot_compare_original(): plt.savefig(f"{PLOTS_FOLDER}/difference_original.png", dpi=300) -def compare_with_or_without_optimization(): +def compare_normal_binomial(compare_original, optimize, regress): """ Compare the difference between the binomial and normal distribution for various n and - p values with and without optimization. Save the data to the COMPARE_ORIGINAL_FOLDER - and OPTIMIZE_FOLDER. + p values with and without optimization or regression. """ - ### create the save folder(s) - if COMPARE_ORIGINAL: - create_data_raw_folder( - COMPARE_ORIGINAL_FOLDER, - ) - if OPTIMIZE: - create_data_raw_folder( - OPTIMIZE_FOLDER, - ) ### create the n/p pairs n_arr = logarithmic_arange(*N_VALUES).astype(int) p_arr = logarithmic_arange(*P_VALUES) + ### get all possible combinations of n and p + np_pair_arr = list(itertools.product(n_arr, p_arr)) ### get difference between binomial and normal distribution for each n/p pair p_list = [] @@ -709,25 +742,39 @@ def compare_with_or_without_optimization(): mean_shift_opt_list = [] std_scale_opt_list = [] diff_opt_list = [] - for p in p_arr: - for n in n_arr: - p_list.append(p) - n_list.append(n) - if COMPARE_ORIGINAL: - ### get the error without optimization - error = difference_binomial_normal(mean_shift=0, std_scale=1, n=n, p=p) - diff_original_list.append(error) - if OPTIMIZE: - ### get the error with optimization - mean_shift_opt, std_scale_opt, error_opt = ( - difference_binomial_normal_optimize(n=n, p=p) - ) - mean_shift_opt_list.append(mean_shift_opt) - std_scale_opt_list.append(std_scale_opt) - diff_opt_list.append(error_opt) + mean_shift_regress_list = [] + std_scale_regress_list = [] + diff_regress_list = [] + progress_bar = tqdm( + np_pair_arr, + desc=f"Compare {['','original'][int(compare_original)]} {['','optimized'][int(optimize)]} {['','regression'][int(regress)]}", + ) + for n, p in progress_bar: + p_list.append(p) + n_list.append(n) + if compare_original: + ### get the error without optimization + error = difference_binomial_normal(mean_shift=0, std_scale=1, n=n, p=p) + diff_original_list.append(error) + if optimize: + ### get the error with optimization + mean_shift_opt, std_scale_opt, error_opt = ( + difference_binomial_normal_optimize(n=n, p=p) + ) + mean_shift_opt_list.append(mean_shift_opt) + std_scale_opt_list.append(std_scale_opt) + diff_opt_list.append(error_opt) + if regress: + ### get the error with the regression + mean_shift_regress, std_scale_regress, error_regress = ( + difference_binomial_normal_regress(n=n, p=p) + ) + mean_shift_regress_list.append(mean_shift_regress) + std_scale_regress_list.append(std_scale_regress) + diff_regress_list.append(error_regress) ### save variables - if COMPARE_ORIGINAL: + if compare_original: save_variables( variable_list=[ p_list, @@ -741,7 +788,7 @@ def compare_with_or_without_optimization(): ], path=COMPARE_ORIGINAL_FOLDER, ) - if OPTIMIZE: + if optimize: save_variables( variable_list=[ p_list, @@ -759,6 +806,89 @@ def compare_with_or_without_optimization(): ], path=OPTIMIZE_FOLDER, ) + if regress: + save_variables( + variable_list=[ + p_list, + n_list, + mean_shift_regress_list, + std_scale_regress_list, + diff_regress_list, + ], + name_list=[ + "p_list", + "n_list", + "mean_shift_regress_list", + "std_scale_regress_list", + "diff_regress_list", + ], + path=REGRESS_FOLDER, + ) + + +def get_regression_parameters(): + """ + Get the regression parameters for the mean shift and std scale. Save the parameters + to the REGRESS_FOLDER. + + Returns: + popt_mean_shift (array): + The optimized parameters for the mean shift regression. + popt_std_scale (array): + The optimized parameters for the std scale regression. + """ + args_list = [[f"{parallel_id}"] for parallel_id in range(N_RUNS_REGRESS)] + run_script_parallel( + script_path="test2_deap_opt_regress.py", + n_jobs=N_JOBS, + args_list=args_list, + ) + ### get best parameters for regression of mean_shift and std_scale + best_fitness_mean_shift = 1e6 + best_fitness_std_scale = 1e6 + best_parallel_id_mean_shift = 0 + best_parallel_id_std_scale = 0 + for parallel_id in range(N_RUNS_REGRESS): + loaded_variables = load_variables( + name_list=[ + f"best_fitness_mean_shift_{parallel_id}", + f"best_fitness_std_scale_{parallel_id}", + ], + path=REGRESS_FOLDER, + ) + if ( + loaded_variables[f"best_fitness_mean_shift_{parallel_id}"] + < best_fitness_mean_shift + ): + best_fitness_mean_shift = loaded_variables[ + f"best_fitness_mean_shift_{parallel_id}" + ] + best_parallel_id_mean_shift = parallel_id + if ( + loaded_variables[f"best_fitness_std_scale_{parallel_id}"] + < best_fitness_std_scale + ): + best_fitness_std_scale = loaded_variables[ + f"best_fitness_std_scale_{parallel_id}" + ] + best_parallel_id_std_scale = parallel_id + # load best of mean_shift + loaded_variables = load_variables( + name_list=[f"popt_mean_shift_{best_parallel_id_mean_shift}"], + path=REGRESS_FOLDER, + ) + popt_mean_shift = loaded_variables[f"popt_mean_shift_{best_parallel_id_mean_shift}"] + # load best of std_scale + loaded_variables = load_variables( + name_list=[f"popt_std_scale_{best_parallel_id_std_scale}"], path=REGRESS_FOLDER + ) + popt_std_scale = loaded_variables[f"popt_std_scale_{best_parallel_id_std_scale}"] + + save_variables( + variable_list=[popt_mean_shift, popt_std_scale], + name_list=["popt_mean_shift", "popt_std_scale"], + path=REGRESS_FOLDER, + ) ### TODO I have the problem that for very small p the normal distribution is not a good @@ -769,19 +899,64 @@ def compare_with_or_without_optimization(): ### global paramters COMPARE_ORIGINAL = True OPTIMIZE = True -REGRESS = True +REGRESS = False PLOT_COMPARE_ORIGINAL = True PLOT_OPTIMIZE = True -PLOT_REGRESS = True +PLOT_REGRESS = False COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" OPTIMIZE_FOLDER = "test2_data_optimize" +REGRESS_FOLDER = "test2_data_regress" PLOTS_FOLDER = "test2_plots" S = 10000 SEED = 1234 -N_VALUES = [10, 1000, 2] # 20] -P_VALUES = [0.001, 0.1, 2] # 10] +N_VALUES = [10, 1000, 3] # 20] +P_VALUES = [0.001, 0.1, 3] # 10] N_JOBS = 2 -N_RUNS = 100 * N_JOBS +N_RUNS_OPT_PER_PAIR = 1 # * N_JOBS +N_RUNS_REGRESS = 1 +N_PARAMS_REGRESS = 16 + + +def preprocess_p_n_for_regress(n, p): + """ + Normalize n and p values before regression. + + Args: + n (int or array): + The number of trials of the binomial distribution. + p (float or array): + The probability of success of the binomial distribution. + + Returns: + n_pre_processed (float or array): + The normalized n value(s) which can be used for regression. + p_pre_processed (float or array): + The normalized p value(s) which can be used for regression. + """ + n_pre_processed = (n - N_MIN) / (N_MAX - N_MIN) + p_pre_processed = (p - P_MIN) / (P_MAX - P_MIN) + return n_pre_processed, p_pre_processed + + +def post_process_p_n_for_regression(n_pre_processed, p_pre_processed): + """ + Post-process the n and p values after regression. + + Args: + n_pre_processed (float or array): + The normalized n value(s) which were used for regression. + p_pre_processed (float or array): + The normalized p value(s) which were used for regression. + + Returns: + n (int or array): + The original n value(s). + p (float or array): + The original p value(s). + """ + n = n_pre_processed * (N_MAX - N_MIN) + N_MIN + p = p_pre_processed * (P_MAX - P_MIN) + P_MIN + return n, p if __name__ == "__main__": @@ -793,14 +968,103 @@ def compare_with_or_without_optimization(): # 3rd make a 2D regression for the optimized mean shift and std scale, get mean_shift_regress(n, p) and std_scale_regress(n, p), save: the optimized parameters of the regression equations # 4th plot: (1) error depending on n and p, (2) optimized mean shift and std scale depending on n and p and corresponding error improvement, (3) regressed mean shift and std scale depending on n and p and corresponding error improvement - if COMPARE_ORIGINAL or OPTIMIZE: - compare_with_or_without_optimization() + ### create the save folder(s) + if COMPARE_ORIGINAL: + create_data_raw_folder( + COMPARE_ORIGINAL_FOLDER, + ) + if OPTIMIZE: + create_data_raw_folder( + OPTIMIZE_FOLDER, + ) + if REGRESS: + create_data_raw_folder( + REGRESS_FOLDER, + ) + + ### compare with and without optimization + compare_normal_binomial( + compare_original=COMPARE_ORIGINAL, optimize=OPTIMIZE, regress=False + ) + + ### compare with regression (must compare original and with optimization first) + if REGRESS: + # prepare the pre-processing and post-processing of the data for the regression + loaded_variables_opt = load_variables( + name_list=[ + "p_list", + "n_list", + "mean_shift_opt_list", + "std_scale_opt_list", + "diff_opt_list", + ], + path=OPTIMIZE_FOLDER, + ) + loaded_variables = load_variables( + name_list=[ + "diff_list", + ], + path=COMPARE_ORIGINAL_FOLDER, + ) + p_arr = np.array(loaded_variables_opt["p_list"]) + n_arr = np.array(loaded_variables_opt["n_list"]) + mean_shift_opt_arr = np.array(loaded_variables_opt["mean_shift_opt_list"]) + std_scale_opt_arr = np.array(loaded_variables_opt["std_scale_opt_list"]) + diff_opt_arr = np.array(loaded_variables_opt["diff_opt_list"]) + diff_arr = np.array(loaded_variables["diff_list"]) + + ### TODO: create global variables which can be used for pre-processing and post-processing for the regression + + ### get how much the difference improved (decreased) by the optimized mean shift + ### and std scale values + diff_improvement_arr = -np.clip( + np.array(diff_opt_list) - np.array(diff_list), None, 0 + ) + diff_improvement_arr = diff_improvement_arr / np.max(diff_improvement_arr) + + ### scale the mean shift and std scale by the difference improvement + ### --> only keep the transformations which improve the difference + ### if there is no improvement mean shift and std scale are closer to 0 and 1 + mean_shift_opt_arr = ( + diff_improvement_arr * np.array(mean_shift_opt_list) + + (1 - diff_improvement_arr) * 0 + ) + std_scale_opt_arr = ( + diff_improvement_arr * np.array(std_scale_opt_list) + + (1 - diff_improvement_arr) * 1 + ) + ### the mean shift is mostly 0 and at some positions negative, multiply it by -1 + mean_shift_opt_arr = -mean_shift_opt_arr + + # Normalize the data used for regression + mean_shift_opt_max = np.max(mean_shift_opt_arr) + mean_shift_opt_min = np.min(mean_shift_opt_arr) + std_scale_opt_max = np.max(std_scale_opt_arr) + std_scale_opt_min = np.min(std_scale_opt_arr) + p_max = np.max(p_arr) + p_min = np.min(p_arr) + n_max = np.max(n_arr) + n_min = np.min(n_arr) + + mean_shift_opt_arr_for_regress = ( + mean_shift_opt_arr - np.min(mean_shift_opt_arr) + ) / (np.max(mean_shift_opt_arr) - np.min(mean_shift_opt_arr)) + y = (y - y_min) / (y_max - y_min) + z = (z - z_min) / (z_max - z_min) + + get_regression_parameters() + compare_normal_binomial(compare_original=False, optimize=False, regress=True) + + ### plot the results if PLOT_COMPARE_ORIGINAL: plot_compare_original() if PLOT_OPTIMIZE: - plot_optimize() + plot_with_transformation(mode="opt") + + if PLOT_REGRESS: + plot_with_transformation(mode="regress") quit() diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py index ac589ce..67f95f3 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py @@ -1,91 +1,91 @@ from CompNeuroPy import DeapCma, load_variables, save_variables import numpy as np -from test2 import deap_opt_regress_path, curve_fit_func -import sys - -# Load the variables -variables = load_variables( - name_list=[ - "x", - "y", - "z", - ], - path=deap_opt_regress_path, +from test2 import ( + regression_func, + OPTIMIZE_FOLDER, + N_PARAMS_REGRESS, + REGRESS_FOLDER, ) -x = variables["x"] -y = variables["y"] -z = variables["z"] - - -param_names = [ - "g0", - "g1", - "g2", - "g3", - "g4", - "g5", - "g6", - "g7", - "g8", - "g9", - "g10", - "g11", - "g12", - "g13", - "g14", - "p0", - # "p1", - # "p2", - # "p3", - # "p4", - # "p5", - # "p6", - # "p7", - # "p8", - # "p9", - # "p10", - # "p11", - # "p12", - # "p13", - # "p14", - # "p15", - # "p16", - # "p17", - # "p18", - # "p19", - # "p20", -] +import sys -def curve_fit_evaluate_function(population): +def regression_evaluate_function(population, X, z): loss_list = [] ### the population is a list of individuals which are lists of parameters for individual in population: - loss_of_individual = curve_fit_objective_function(individual) + loss_of_individual = regression_objective_function( + individual=individual, X=X, z=z + ) loss_list.append((loss_of_individual,)) return loss_list -def curve_fit_objective_function(individual): - is_data = curve_fit_func((x, y), *individual) +def regression_objective_function(individual, X, z): + is_data = regression_func(X, *individual) target_data = z return np.sum((is_data - target_data) ** 2) -deap_cma = DeapCma( - lower=np.array([-1] * len(param_names)), - upper=np.array([1] * len(param_names)), - evaluate_function=curve_fit_evaluate_function, - param_names=param_names, - hard_bounds=False, - display_progress_bar=False, -) -deap_cma_result = deap_cma.run(max_evals=2000) -popt = [deap_cma_result[param_name] for param_name in param_names] +if __name__ == "__main__": + # Load the variables for regression from the previous optimization + loaded_variables = load_variables( + name_list=[ + "p_list", + "n_list", + "mean_shift_opt_list", + "std_scale_opt_list", + ], + path=OPTIMIZE_FOLDER, + ) + p_list = loaded_variables["p_list"] + n_list = loaded_variables["n_list"] + mean_shift_opt_list = loaded_variables["mean_shift_opt_list"] + std_scale_opt_list = loaded_variables["std_scale_opt_list"] -# Save the variables -save_variables( - name_list=[f"popt_{sys.argv[1]}", f"best_fitness_{sys.argv[1]}"], - variable_list=[popt, deap_cma_result["best_fitness"]], - path=deap_opt_regress_path, -) + ### optimize regression of mean_shift + # TODO do some transofrmations + deap_cma = DeapCma( + lower=np.array([-1] * len(N_PARAMS_REGRESS)), + upper=np.array([1] * len(N_PARAMS_REGRESS)), + evaluate_function=lambda population: regression_evaluate_function( + population=population, X=(n_list, p_list), z=mean_shift_opt_list + ), + hard_bounds=False, + display_progress_bar=False, + ) + deap_cma_result = deap_cma.run(max_evals=2000) + popt = [deap_cma_result[param_name] for param_name in deap_cma.param_names] + + # Save the variables + save_variables( + name_list=[ + f"popt_mean_shift_{sys.argv[1]}", + f"best_fitness_mean_shift_{sys.argv[1]}", + ], + variable_list=[popt, deap_cma_result["best_fitness"]], + path=REGRESS_FOLDER, + ) + + ### optimize regression of std_scale + # TODO do some transofrmations + deap_cma = DeapCma( + lower=np.array([-1] * len(N_PARAMS_REGRESS)), + upper=np.array([1] * len(N_PARAMS_REGRESS)), + evaluate_function=lambda population: regression_evaluate_function( + population=population, X=(n_list, p_list), z=std_scale_opt_list + ), + hard_bounds=False, + display_progress_bar=False, + ) + deap_cma_result = deap_cma.run(max_evals=2000) + popt = [deap_cma_result[param_name] for param_name in deap_cma.param_names] + + # Save the variables + save_variables( + name_list=[ + f"popt_std_scale_{sys.argv[1]}", + f"best_fitness_std_scale_{sys.argv[1]}", + ], + variable_list=[popt, deap_cma_result["best_fitness"]], + path=REGRESS_FOLDER, + ) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py index 9ae7104..ebdf1fe 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_transform.py @@ -3,17 +3,6 @@ import numpy as np import sys -# Load the variables -variables = load_variables( - name_list=[ - "n", - "p", - ], - path=OPTIMIZE_FOLDER, -) -n = variables["n"] -p = variables["p"] - def evaluate_function(population): loss_list = [] @@ -26,40 +15,52 @@ def evaluate_function(population): return loss_list -### bounds for optimized parameters -shift_mean_bounds = [-1, 1] -scale_std_bounds = [0.5, 2] -lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) -ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) -p0 = np.random.default_rng().uniform( - low=lb + 0.25 * (ub - lb), high=ub - 0.25 * (ub - lb), size=2 -) +if __name__ == "__main__": + # Load the variables + variables = load_variables( + name_list=[ + "n", + "p", + ], + path=OPTIMIZE_FOLDER, + ) + n = variables["n"] + p = variables["p"] + + ### bounds for optimized parameters + shift_mean_bounds = [-1, 1] + scale_std_bounds = [0.5, 2] + lb = np.array([shift_mean_bounds[0], scale_std_bounds[0]]) + ub = np.array([shift_mean_bounds[1], scale_std_bounds[1]]) + p0 = np.random.default_rng().uniform( + low=lb + 0.25 * (ub - lb), high=ub - 0.25 * (ub - lb), size=2 + ) -### create an instance of the DeapCma class -deap_cma = DeapCma( - lower=lb, - upper=ub, - evaluate_function=evaluate_function, - param_names=["mean_shift", "std_scale"], - hard_bounds=True, - display_progress_bar=False, -) + ### create an instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + param_names=["mean_shift", "std_scale"], + hard_bounds=True, + display_progress_bar=False, + ) -### run the optimization -deap_cma_result = deap_cma.run(max_evals=1000) + ### run the optimization + deap_cma_result = deap_cma.run(max_evals=1000) -### get the optimized parameters and best error -mean_shift_opt = deap_cma_result["mean_shift"] -std_scale_opt = deap_cma_result["std_scale"] -error_opt = deap_cma_result["best_fitness"] + ### get the optimized parameters and best error + mean_shift_opt = deap_cma_result["mean_shift"] + std_scale_opt = deap_cma_result["std_scale"] + error_opt = deap_cma_result["best_fitness"] -# Save the variables -save_variables( - name_list=[ - f"mean_shift_opt_{sys.argv[1]}", - f"std_scale_opt_{sys.argv[1]}", - f"error_opt_{sys.argv[1]}", - ], - variable_list=[mean_shift_opt, std_scale_opt, error_opt], - path=OPTIMIZE_FOLDER, -) + # Save the variables + save_variables( + name_list=[ + f"mean_shift_opt_{sys.argv[1]}", + f"std_scale_opt_{sys.argv[1]}", + f"error_opt_{sys.argv[1]}", + ], + variable_list=[mean_shift_opt, std_scale_opt, error_opt], + path=OPTIMIZE_FOLDER, + ) From b3fc2171effa80b75403f31c9069eafd1fffeed6 Mon Sep 17 00:00:00 2001 From: olimaol Date: Wed, 14 Aug 2024 16:26:28 +0200 Subject: [PATCH 17/21] model_configurator: implemented regression --- .../examples/model_configurator/test2.py | 206 ++++++++++++------ .../test2_deap_opt_regress.py | 58 +++-- 2 files changed, 178 insertions(+), 86 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 582b9c4..c7d793d 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -147,15 +147,18 @@ def log_normal_1d(x, amp, mean, sig): def regression_func( X, - *args, + denormalize: None | str, + args: list, ): """ A 2D regression function. Args: X (array): - The x (X[0]) and y (X[1]) coordinates. - *args: + The x (X[0]) and y (X[1]) coordinates. Needs to be normalized. + denormalize (None | str): + The variable name to denormalize the calculated values. + args (list): The parameters of the regression function. Returns: @@ -165,7 +168,7 @@ def regression_func( x, y = X ### 2D polynomial with certain degree - return np.clip( + ret = np.clip( args[0] + gauss_1d(x, amp=args[1], mean=args[2], sig=args[3]) + gauss_1d(y, amp=args[4], mean=args[5], sig=args[6]) @@ -176,6 +179,13 @@ def regression_func( 1e20, ) + ### denormalize the calculated values, during regression optimization the target + ### values are normalized + if denormalize is not None: + ret = post_process_for_regression(var_value=ret, var_name=denormalize) + + return ret + def plot_2d_curve_fit_regression( x, y, z, sample_weight=None, vmin=None, vmax=None, grid_size=100 @@ -550,9 +560,18 @@ def difference_binomial_normal_regress(n, p): ], path=REGRESS_FOLDER, ) - - mean_shift_regress = regression_func((n, p), *loaded_variables["popt_mean_shift"]) - std_scale_regress = regression_func((n, p), *loaded_variables["popt_std_scale"]) + ### regression was optimized with normalized data thus need to normalize the data + ### here too and after regression denormalize the results + n = preprocess_for_regress(var_value=n, var_name="n") + p = preprocess_for_regress(var_value=p, var_name="p") + mean_shift_regress = regression_func( + X=(n, p), denormalize="mean_shift", args=loaded_variables["popt_mean_shift"] + ) + std_scale_regress = regression_func( + X=(n, p), denormalize="std_scale", args=loaded_variables["popt_std_scale"] + ) + n = post_process_for_regression(var_value=n, var_name="n") + p = post_process_for_regression(var_value=p, var_name="p") error_regress = difference_binomial_normal( mean_shift=mean_shift_regress, std_scale=std_scale_regress, n=n, p=p ) @@ -728,6 +747,8 @@ def compare_normal_binomial(compare_original, optimize, regress): Compare the difference between the binomial and normal distribution for various n and p values with and without optimization or regression. """ + if not compare_original and not optimize and not regress: + return ### create the n/p pairs n_arr = logarithmic_arange(*N_VALUES).astype(int) @@ -773,6 +794,10 @@ def compare_normal_binomial(compare_original, optimize, regress): std_scale_regress_list.append(std_scale_regress) diff_regress_list.append(error_regress) + print(mean_shift_regress_list) + print(std_scale_regress_list) + print(diff_regress_list) + ### save variables if compare_original: save_variables( @@ -899,64 +924,94 @@ def get_regression_parameters(): ### global paramters COMPARE_ORIGINAL = True OPTIMIZE = True -REGRESS = False +REGRESS = True PLOT_COMPARE_ORIGINAL = True PLOT_OPTIMIZE = True -PLOT_REGRESS = False +PLOT_REGRESS = True COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" OPTIMIZE_FOLDER = "test2_data_optimize" REGRESS_FOLDER = "test2_data_regress" PLOTS_FOLDER = "test2_plots" S = 10000 SEED = 1234 -N_VALUES = [10, 1000, 3] # 20] -P_VALUES = [0.001, 0.1, 3] # 10] -N_JOBS = 2 -N_RUNS_OPT_PER_PAIR = 1 # * N_JOBS -N_RUNS_REGRESS = 1 +N_VALUES = [10, 1000, 20] +P_VALUES = [0.001, 0.1, 10] +N_JOBS = 15 +N_RUNS_OPT_PER_PAIR = 100 * N_JOBS +N_RUNS_REGRESS = 15 N_PARAMS_REGRESS = 16 -def preprocess_p_n_for_regress(n, p): +def preprocess_for_regress(var_value, var_name): """ - Normalize n and p values before regression. + Normalize variable values before regression. Args: - n (int or array): - The number of trials of the binomial distribution. - p (float or array): - The probability of success of the binomial distribution. + var_value (float or array): + The original value(s) of the variable. + var_name (str): + The name of the variable. Returns: - n_pre_processed (float or array): - The normalized n value(s) which can be used for regression. - p_pre_processed (float or array): - The normalized p value(s) which can be used for regression. + var_value_processed (float or array): + The normalized value(s) of the variable ready for regression. """ - n_pre_processed = (n - N_MIN) / (N_MAX - N_MIN) - p_pre_processed = (p - P_MIN) / (P_MAX - P_MIN) - return n_pre_processed, p_pre_processed + ### load the dicts for normalization + loaded_variables = load_variables( + name_list=[ + "min_dict", + "max_dict", + ], + path=REGRESS_FOLDER, + ) + min_dict = loaded_variables["min_dict"] + max_dict = loaded_variables["max_dict"] + ### do calculations + var_value_processed = (var_value - min_dict[var_name]) / ( + max_dict[var_name] - min_dict[var_name] + ) + if var_name == "mean_shift": + var_value_processed = -var_value_processed + return var_value_processed -def post_process_p_n_for_regression(n_pre_processed, p_pre_processed): + +def post_process_for_regression(var_value, var_name): """ - Post-process the n and p values after regression. + Denormalize variable values after regression. Args: - n_pre_processed (float or array): - The normalized n value(s) which were used for regression. - p_pre_processed (float or array): - The normalized p value(s) which were used for regression. + var_value (float or array): + The normalized value(s) of the variable. + var_name (str): + The name of the variable. Returns: - n (int or array): - The original n value(s). - p (float or array): - The original p value(s). + var_value_processed (float or array): + The original value(s) of the variable after denormalization. """ - n = n_pre_processed * (N_MAX - N_MIN) + N_MIN - p = p_pre_processed * (P_MAX - P_MIN) + P_MIN - return n, p + ### load the dicts for normalization + loaded_variables = load_variables( + name_list=[ + "min_dict", + "max_dict", + ], + path=REGRESS_FOLDER, + ) + min_dict = loaded_variables["min_dict"] + max_dict = loaded_variables["max_dict"] + + ### do calculations + if var_name == "mean_shift": + var_value = -var_value + + var_value_processed = ( + var_value * (max_dict[var_name] - min_dict[var_name]) + min_dict[var_name] + ) + + if var_name == "n": + var_value_processed = int(np.round(var_value_processed)) + return var_value_processed if __name__ == "__main__": @@ -989,7 +1044,7 @@ def post_process_p_n_for_regression(n_pre_processed, p_pre_processed): ### compare with regression (must compare original and with optimization first) if REGRESS: - # prepare the pre-processing and post-processing of the data for the regression + ### prepare the pre-processing and post-processing of the data for the regression loaded_variables_opt = load_variables( name_list=[ "p_list", @@ -1013,46 +1068,61 @@ def post_process_p_n_for_regression(n_pre_processed, p_pre_processed): diff_opt_arr = np.array(loaded_variables_opt["diff_opt_list"]) diff_arr = np.array(loaded_variables["diff_list"]) - ### TODO: create global variables which can be used for pre-processing and post-processing for the regression - + ### Transform mean_shift_opt and std_scale_opt for regression ### get how much the difference improved (decreased) by the optimized mean shift ### and std scale values - diff_improvement_arr = -np.clip( - np.array(diff_opt_list) - np.array(diff_list), None, 0 - ) + diff_improvement_arr = -np.clip(diff_opt_arr - diff_arr, None, 0) diff_improvement_arr = diff_improvement_arr / np.max(diff_improvement_arr) ### scale the mean shift and std scale by the difference improvement ### --> only keep the transformations which improve the difference ### if there is no improvement mean shift and std scale are closer to 0 and 1 mean_shift_opt_arr = ( - diff_improvement_arr * np.array(mean_shift_opt_list) - + (1 - diff_improvement_arr) * 0 + diff_improvement_arr * mean_shift_opt_arr + (1 - diff_improvement_arr) * 0 ) std_scale_opt_arr = ( - diff_improvement_arr * np.array(std_scale_opt_list) - + (1 - diff_improvement_arr) * 1 + diff_improvement_arr * std_scale_opt_arr + (1 - diff_improvement_arr) * 1 ) - ### the mean shift is mostly 0 and at some positions negative, multiply it by -1 - mean_shift_opt_arr = -mean_shift_opt_arr - - # Normalize the data used for regression - mean_shift_opt_max = np.max(mean_shift_opt_arr) - mean_shift_opt_min = np.min(mean_shift_opt_arr) - std_scale_opt_max = np.max(std_scale_opt_arr) - std_scale_opt_min = np.min(std_scale_opt_arr) - p_max = np.max(p_arr) - p_min = np.min(p_arr) - n_max = np.max(n_arr) - n_min = np.min(n_arr) - - mean_shift_opt_arr_for_regress = ( - mean_shift_opt_arr - np.min(mean_shift_opt_arr) - ) / (np.max(mean_shift_opt_arr) - np.min(mean_shift_opt_arr)) - y = (y - y_min) / (y_max - y_min) - z = (z - z_min) / (z_max - z_min) + ### save the trasnformed mean shift and std scale values for regression + save_variables( + variable_list=[ + mean_shift_opt_arr, + std_scale_opt_arr, + ], + name_list=[ + "mean_shift_opt_for_regress_arr", + "std_scale_opt_for_regress_arr", + ], + path=REGRESS_FOLDER, + ) + + ### create global variables which can be used for pre-processing and + ### post-processing for the regression + min_dict = {} + max_dict = {} + min_dict["n"] = np.min(n_arr) + min_dict["p"] = np.min(p_arr) + max_dict["n"] = np.max(n_arr) + max_dict["p"] = np.max(p_arr) + min_dict["mean_shift"] = np.min(mean_shift_opt_arr) + min_dict["std_scale"] = np.min(std_scale_opt_arr) + max_dict["mean_shift"] = np.max(mean_shift_opt_arr) + max_dict["std_scale"] = np.max(std_scale_opt_arr) + ### save the dicts + save_variables( + variable_list=[ + min_dict, + max_dict, + ], + name_list=[ + "min_dict", + "max_dict", + ], + path=REGRESS_FOLDER, + ) + ### do the regression get_regression_parameters() compare_normal_binomial(compare_original=False, optimize=False, regress=True) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py index 67f95f3..d8091ef 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py @@ -5,6 +5,7 @@ OPTIMIZE_FOLDER, N_PARAMS_REGRESS, REGRESS_FOLDER, + preprocess_for_regress, ) import sys @@ -21,40 +22,59 @@ def regression_evaluate_function(population, X, z): def regression_objective_function(individual, X, z): - is_data = regression_func(X, *individual) + is_data = regression_func(X=X, denormalize=None, args=individual) target_data = z return np.sum((is_data - target_data) ** 2) if __name__ == "__main__": - # Load the variables for regression from the previous optimization + ### Load the p and n variables for regression from the previous optimization loaded_variables = load_variables( name_list=[ "p_list", "n_list", - "mean_shift_opt_list", - "std_scale_opt_list", ], path=OPTIMIZE_FOLDER, ) - p_list = loaded_variables["p_list"] - n_list = loaded_variables["n_list"] - mean_shift_opt_list = loaded_variables["mean_shift_opt_list"] - std_scale_opt_list = loaded_variables["std_scale_opt_list"] + p_arr = np.array(loaded_variables["p_list"]) + n_arr = np.array(loaded_variables["n_list"]) + ### Load the mean_shift and std_scale variables for regression prepared before + ### regression + loaded_variables = load_variables( + name_list=[ + "mean_shift_opt_for_regress_arr", + "std_scale_opt_for_regress_arr", + ], + path=REGRESS_FOLDER, + ) + mean_shift_opt_arr = loaded_variables["mean_shift_opt_for_regress_arr"] + std_scale_opt_arr = loaded_variables["std_scale_opt_for_regress_arr"] + + ### normalize the data before regression + n_arr = preprocess_for_regress(var_value=n_arr, var_name="n") + p_arr = preprocess_for_regress(var_value=p_arr, var_name="p") + mean_shift_opt_arr = preprocess_for_regress( + var_value=mean_shift_opt_arr, var_name="mean_shift" + ) + std_scale_opt_arr = preprocess_for_regress( + var_value=std_scale_opt_arr, var_name="std_scale" + ) ### optimize regression of mean_shift - # TODO do some transofrmations deap_cma = DeapCma( - lower=np.array([-1] * len(N_PARAMS_REGRESS)), - upper=np.array([1] * len(N_PARAMS_REGRESS)), + lower=np.array([-1] * N_PARAMS_REGRESS), + upper=np.array([1] * N_PARAMS_REGRESS), evaluate_function=lambda population: regression_evaluate_function( - population=population, X=(n_list, p_list), z=mean_shift_opt_list + population=population, X=(n_arr, p_arr), z=mean_shift_opt_arr ), hard_bounds=False, display_progress_bar=False, ) deap_cma_result = deap_cma.run(max_evals=2000) - popt = [deap_cma_result[param_name] for param_name in deap_cma.param_names] + popt = [deap_cma_result[f"param{param_id}"] for param_id in range(N_PARAMS_REGRESS)] + print( + f"finished regression of mean_shift, best fitness: {deap_cma_result['best_fitness']}" + ) # Save the variables save_variables( @@ -67,18 +87,20 @@ def regression_objective_function(individual, X, z): ) ### optimize regression of std_scale - # TODO do some transofrmations deap_cma = DeapCma( - lower=np.array([-1] * len(N_PARAMS_REGRESS)), - upper=np.array([1] * len(N_PARAMS_REGRESS)), + lower=np.array([-1] * N_PARAMS_REGRESS), + upper=np.array([1] * N_PARAMS_REGRESS), evaluate_function=lambda population: regression_evaluate_function( - population=population, X=(n_list, p_list), z=std_scale_opt_list + population=population, X=(n_arr, p_arr), z=std_scale_opt_arr ), hard_bounds=False, display_progress_bar=False, ) deap_cma_result = deap_cma.run(max_evals=2000) - popt = [deap_cma_result[param_name] for param_name in deap_cma.param_names] + popt = [deap_cma_result[f"param{param_id}"] for param_id in range(N_PARAMS_REGRESS)] + print( + f"finished regression of std_scale, best fitness: {deap_cma_result['best_fitness']}" + ) # Save the variables save_variables( From 25102feb4423f07d492022af7dbdf5b6155dad85 Mon Sep 17 00:00:00 2001 From: olimaol Date: Thu, 15 Aug 2024 15:34:00 +0200 Subject: [PATCH 18/21] model_configurator: adjusted regression of mean_shift and std_scale --- .../examples/model_configurator/test2.py | 1061 ++++------------- 1 file changed, 214 insertions(+), 847 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index c7d793d..526399c 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -22,118 +22,6 @@ import itertools -def mean_shift_regression(n, p): - x0 = n - x1 = p - - p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14 = ( - 1.176321040012159, - -6.429595249324671, - -6.804798904871692, - 21.915210556787986, - 61.64443550309026, - 27.70993009301549, - -27.95654152965883, - 44.87058003864243, - -63.817886336670654, - -40.65337691430986, - 12.246014608429185, - -101.39842049962134, - 55.18658444426345, - 3.6975636800782237, - 19.531732368721627, - ) - - x0_min = 10 - x0_max = 1000 - x0_norm = (x0 - x0_min) / (x0_max - x0_min) - x1_min = 0.001 - x1_max = 0.1 - x1_norm = (x1 - x1_min) / (x1_max - x1_min) - z_min = -0.34886991656269 - z_max = 0.03020699313695153 - - z_norm = np.clip( - ( - p0 - + p1 * x0_norm - + p2 * x1_norm - + p3 * x0_norm**2 - + p4 * x0_norm * x1_norm - + p5 * x1_norm**2 - + p6 * x0_norm**3 - + p7 * x0_norm**2 * x1_norm - + p8 * x0_norm * x1_norm**2 - + p9 * x1_norm**3 - + p10 * x0_norm**4 - + p11 * x0_norm**3 * x1_norm - + p12 * x0_norm**2 * x1_norm**2 - + p13 * x0_norm * x1_norm**3 - + p14 * x1_norm**4 - ) - ** 3, - 0, - 1, - ) - return z_norm * (z_max - z_min) + z_min - - -def std_scale_regression(n, p): - x0 = n - x1 = p - - p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14 = ( - 0.320502224444166, - 6.699870528297452, - 6.935907486422781, - -22.52956657500412, - 108.421673456727, - -30.088834608939973, - 29.237548128991747, - -1172.785284855411, - -1027.3975831574996, - 58.67858836080429, - -14.043681478309374, - 1174.4693590015047, - -3413.5816408185233, - 1307.2105954815152, - -44.03158341787127, - ) - - x0_min = 10 - x0_max = 1000 - x0_norm = (x0 - x0_min) / (x0_max - x0_min) - x1_min = 0.001 - x1_max = 0.1 - x1_norm = (x1 - x1_min) / (x1_max - x1_min) - z_min = 0.9710804893692282 - z_max = 1.6265889931274558 - - z_norm = np.clip( - ( - p0 - + p1 * x0_norm - + p2 * x1_norm - + p3 * x0_norm**2 - + p4 * x0_norm * x1_norm - + p5 * x1_norm**2 - + p6 * x0_norm**3 - + p7 * x0_norm**2 * x1_norm - + p8 * x0_norm * x1_norm**2 - + p9 * x1_norm**3 - + p10 * x0_norm**4 - + p11 * x0_norm**3 * x1_norm - + p12 * x0_norm**2 * x1_norm**2 - + p13 * x0_norm * x1_norm**3 - + p14 * x1_norm**4 - ) - ** 3, - 0, - 1, - ) - return z_norm * (z_max - z_min) + z_min - - def gauss_1d(x, amp, mean, sig): return amp * np.exp(-((x - mean) ** 2) / (2 * sig**2)) @@ -142,9 +30,6 @@ def log_normal_1d(x, amp, mean, sig): return (amp / x) * np.exp(-((np.log(x) - mean) ** 2) / (2 * sig**2)) -deap_opt_regress_path = "test2_deap_opt_regress/" - - def regression_func( X, denormalize: None | str, @@ -187,188 +72,8 @@ def regression_func( return ret -def plot_2d_curve_fit_regression( - x, y, z, sample_weight=None, vmin=None, vmax=None, grid_size=100 -): - """ - Plots a 2D color-coded image of the data with curve_fit regression and prints the regression equation. - - Parameters: - - x: list or array of x coordinates - - y: list or array of y coordinates - - z: list or array of z values corresponding to the (x, y) coordinates - - grid_size: size of the grid for plotting (default: 100) - """ - # Check if sample_weight is provided and does not contain zeros - if sample_weight is not None and 0 in sample_weight: - raise ValueError("Sample weight cannot contain zeros.") - - # Normalize x, y, and z and keep the transformation for later - x_max = np.max(x) - x_min = np.min(x) - y_max = np.max(y) - y_min = np.min(y) - z_max = np.max(z) - z_min = np.min(z) - x = (x - x_min) / (x_max - x_min) - y = (y - y_min) / (y_max - y_min) - z = (z - z_min) / (z_max - z_min) - - # Fit the curve_fit regression model - ### do opt with deap cma in other script - save_variables( - name_list=["x", "y", "z"], variable_list=[x, y, z], path=deap_opt_regress_path - ) - n_jobs = 15 - n_runs = 100 * n_jobs - args_list = [[f"{parallel_id}"] for parallel_id in range(n_runs)] - run_script_parallel( - script_path="test2_deap_opt_regress.py", - n_jobs=n_jobs, - args_list=args_list, - ) - ### get best parameters - best_fitness = 1e6 - best_parallel_id = 0 - for parallel_id in range(n_runs): - loaded_variables = load_variables( - name_list=[f"best_fitness_{parallel_id}"], path=deap_opt_regress_path - ) - if loaded_variables[f"best_fitness_{parallel_id}"] < best_fitness: - best_fitness = loaded_variables[f"best_fitness_{parallel_id}"] - best_parallel_id = parallel_id - loaded_variables = load_variables( - name_list=[f"popt_{best_parallel_id}"], path=deap_opt_regress_path - ) - popt = loaded_variables[f"popt_{best_parallel_id}"] - - # Create grid for plotting - xi = np.linspace(min(x), max(x), grid_size) - yi = np.linspace(min(y), max(y), grid_size) - xi, yi = np.meshgrid(xi, yi) - zi = curve_fit_func((xi, yi), *popt) - - # Unnormalize the data - xi = xi * (x_max - x_min) + x_min - yi = yi * (y_max - y_min) + y_min - zi = zi * (z_max - z_min) + z_min - x = x * (x_max - x_min) + x_min - y = y * (y_max - y_min) + y_min - z = z * (z_max - z_min) + z_min - - # Plot the regression surface - if vmin is None: - vmin = np.min(z) - if vmax is None: - vmax = np.max(z) - plt.contourf(xi, yi, zi, levels=100, cmap="viridis", vmin=vmin, vmax=vmax) - plt.scatter( - x, - y, - c=z, - cmap="viridis", - vmin=vmin, - vmax=vmax, - edgecolor="k", - marker="o", - s=( - 40 * np.array(sample_weight) / np.max(sample_weight) - if sample_weight - else None - ), - ) - - ### print the regression equation and data normalization - print(f"best_fitness: {best_fitness}") - print(f"popt: {popt}") - print(f"x_max: {x_max}, x_min: {x_min}") - print(f"y_max: {y_max}, y_min: {y_min}") - print(f"z_max: {z_max}, z_min: {z_min}") - - -def plot_2d_regression_image( - x, y, z, sample_weight=None, vmin=None, vmax=None, degree=2, grid_size=100 -): - """ - Plots a 2D color-coded image of the data with polynomial regression and plots the regression equation. - - Parameters: - - x: list or array of x coordinates - - y: list or array of y coordinates - - z: list or array of z values corresponding to the (x, y) coordinates - - degree: degree of the polynomial regression (default: 2) - - grid_size: size of the grid for plotting (default: 100) - """ - # Normalize x and y and keep the transformation for later - x_mean = np.mean(x) - x_std = np.std(x) - x_max = np.max(x) - x_min = np.min(x) - y_mean = np.mean(y) - y_std = np.std(y) - y_max = np.max(y) - y_min = np.min(y) - x = (x - x_min) / (x_max - x_min) - y = (y - y_min) / (y_max - y_min) - - # Prepare the data for polynomial regression - X = np.column_stack((x, y)) - # Create a polynomial regression pipeline - polynomial_model = make_pipeline(PolynomialFeatures(degree), LinearRegression()) - - # Fit the model - polynomial_model.fit( - X, - z, - linearregression__sample_weight=( - sample_weight if sample_weight is not None else None - ), - ) - - # Predict new values for the surface plot - xi = np.linspace(min(x), max(x), grid_size) - yi = np.linspace(min(y), max(y), grid_size) - xi, yi = np.meshgrid(xi, yi) - Xi = np.column_stack((xi.ravel(), yi.ravel())) - zi = polynomial_model.predict(Xi) - zi = zi.reshape(xi.shape) - - # Unnormalize the x,y values - xi = xi * (x_max - x_min) + x_min - yi = yi * (y_max - y_min) + y_min - x = x * (x_max - x_min) + x_min - y = y * (y_max - y_min) + y_min - - # Plot the regression surface - if vmin is None: - vmin = np.min(z) - if vmax is None: - vmax = np.max(z) - plt.contourf(xi, yi, zi, levels=100, cmap="viridis", vmin=vmin, vmax=vmax) - - # Plot the original data points, scaled by the sample weight - plt.scatter( - x, - y, - c=z, - cmap="viridis", - vmin=vmin, - vmax=vmax, - edgecolor="k", - marker="o", - s=( - 40 * np.array(sample_weight) / np.max(sample_weight) - if sample_weight - else None - ), - ) - - # Print the regression equation - # TODO - - def plot_2d_interpolated_image( - x, y, z, vmin=None, vmax=None, grid_size=100, method="linear" + x, y, z, vmin=None, vmax=None, grid_size=100, method="linear", cmap="viridis" ): """ Plots a 2D color-coded image of "D grid data. @@ -399,19 +104,17 @@ def plot_2d_interpolated_image( # Perform the interpolation zi = griddata((x, y), z, (xi, yi), method=method) - print(f"max interpolated: {np.max(zi)}") + # print(f"max interpolated: {np.max(zi)}") # Plot the interpolated data if vmin is None: vmin = np.min(z) if vmax is None: vmax = np.max(z) - plt.contourf(xi, yi, zi, levels=100, cmap="viridis", vmin=vmin, vmax=vmax) + plt.contourf(xi, yi, zi, levels=100, cmap=cmap, vmin=vmin, vmax=vmax) # plot scatter plot of original data - plt.scatter( - x, y, c=z, cmap="viridis", vmin=vmin, vmax=vmax, edgecolor="k", marker="o" - ) + plt.scatter(x, y, c=z, cmap=cmap, vmin=vmin, vmax=vmax, edgecolor="k", marker="o") def generate_samples(n, p, mean_shift=0, std_scale=1): @@ -530,6 +233,56 @@ def difference_binomial_normal_optimize(n, p): return mean_shift_opt, std_scale_opt, error_opt +def difference_binomial_normal_mixed(n, p): + """ + Calculate the difference between samples of a binomial and a normal distribution. + The binomial distribution is generated with parameters n and p. + The normal distribution is generated to best approximate the binomial distribution. + Further the normal distribution is shifted by mean_shift and scaled by std_scale. + Both are obtained either from optimization, regression or self-defined values. + + Args: + n (int): + The number of trials of the binomial distribution. + p (float): + The probability of success of the binomial distribution. + + Returns: + mean_shift_mixed (float): + The shift of the mean of the normal distribution. + std_scale_mixed (float): + The scaling of the standard deviation of the normal distribution. + error_mixed (float): + The difference between the binomial and normal distribution. + """ + ### load parameters for regression + loaded_variables = load_variables( + name_list=[ + "popt_mean_shift", + "popt_std_scale", + ], + path=REGRESS_FOLDER, + ) + ### regression was optimized with normalized data thus need to normalize the data + ### here too and after regression denormalize the results + n = preprocess_for_regress(var_value=n, var_name="n") + p = preprocess_for_regress(var_value=p, var_name="p") + std_scale_regress = regression_func( + X=(n, p), denormalize="std_scale", args=loaded_variables["popt_std_scale"] + ) + n = post_process_for_regression(var_value=n, var_name="n") + p = post_process_for_regression(var_value=p, var_name="p") + + ### for mixed version only use regressed std_scale + mean_shift_mixed = 0 + std_scale_mixed = std_scale_regress + error_mixed = difference_binomial_normal( + mean_shift=mean_shift_mixed, std_scale=std_scale_mixed, n=n, p=p + ) + + return mean_shift_mixed, std_scale_mixed, error_mixed + + def difference_binomial_normal_regress(n, p): """ Calculate the difference between samples of a binomial and a normal distribution. @@ -649,10 +402,11 @@ def plot_with_transformation(mode: str): Args: mode (str): - Either 'opt' or 'regress' + Either 'opt', 'regress', or 'mixed'. """ - if mode not in ["opt", "regress"]: - raise ValueError("Mode must be either 'opt' or 'regress'.") + possible_modes = ["opt", "regress", "mixed"] + if mode not in possible_modes: + raise ValueError("Mode must be either 'opt', 'regress', or 'mixed'.") ### load the data loaded_variables = load_variables( @@ -663,34 +417,64 @@ def plot_with_transformation(mode: str): f"std_scale_{mode}_list", f"diff_{mode}_list", ], - path=OPTIMIZE_FOLDER if mode == "opt" else REGRESS_FOLDER, + path=[OPTIMIZE_FOLDER, REGRESS_FOLDER, MIXED_FOLDER][ + possible_modes.index(mode) + ], ) ### plot the data plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 3)) for idx, title, key in [ ( 1, - "Mean Shift optimized" if mode == "opt" else "Mean Shift regressed", + ["Mean Shift optimized", "Mean Shift regressed", "Mean Shift mixed"][ + possible_modes.index(mode) + ], f"mean_shift_{mode}_list", ), ( 2, - "Std Scale optimized" if mode == "opt" else "Std Scale regressed", + ["Std Scale optimized", "Std Scale regressed", "Std Scale mixed"][ + possible_modes.index(mode) + ], f"std_scale_{mode}_list", ), ( 3, - "Difference optimized" if mode == "opt" else "Difference regressed", + ["Difference optimized", "Difference regressed", "Difference mixed"][ + possible_modes.index(mode) + ], f"diff_{mode}_list", ), ]: plt.subplot(3, 1, idx) + vmin = { + f"mean_shift_{mode}_list": -np.max( + np.absolute(np.array(loaded_variables[key])) + ), + f"std_scale_{mode}_list": 1 + - np.max(np.absolute(np.array(loaded_variables[key]) - 1)), + f"diff_{mode}_list": 0, + }[key] + vmax = { + f"mean_shift_{mode}_list": np.max( + np.absolute(np.array(loaded_variables[key])) + ), + f"std_scale_{mode}_list": 1 + + np.max(np.absolute(np.array(loaded_variables[key]) - 1)), + f"diff_{mode}_list": np.max(loaded_variables[key]), + }[key] + cmap = { + f"mean_shift_{mode}_list": "coolwarm", + f"std_scale_{mode}_list": "coolwarm", + f"diff_{mode}_list": "viridis", + }[key] plot_2d_interpolated_image( x=loaded_variables["n_list"], y=loaded_variables["p_list"], z=loaded_variables[key], - vmin=0, - vmax=np.max(loaded_variables[key]), + vmin=vmin, + vmax=vmax, + cmap=cmap, ) plt.colorbar() plt.xlabel("n") @@ -700,9 +484,7 @@ def plot_with_transformation(mode: str): create_dir(PLOTS_FOLDER) plt.savefig( ( - f"{PLOTS_FOLDER}/difference_optimized.png" - if mode == "opt" - else f"{PLOTS_FOLDER}/difference_regressed.png" + f"{PLOTS_FOLDER}/difference_{['optimized', 'regressed', 'mixed'][possible_modes.index(mode)]}.png" ), dpi=300, ) @@ -732,6 +514,7 @@ def plot_compare_original(): z=loaded_variables["diff_list"], vmin=0, vmax=np.max(loaded_variables["diff_list"]), + cmap="viridis", ) plt.colorbar() plt.xlabel("n") @@ -742,12 +525,12 @@ def plot_compare_original(): plt.savefig(f"{PLOTS_FOLDER}/difference_original.png", dpi=300) -def compare_normal_binomial(compare_original, optimize, regress): +def compare_normal_binomial(compare_original, optimize, regress, mixed): """ Compare the difference between the binomial and normal distribution for various n and p values with and without optimization or regression. """ - if not compare_original and not optimize and not regress: + if not compare_original and not optimize and not regress and not mixed: return ### create the n/p pairs @@ -766,9 +549,12 @@ def compare_normal_binomial(compare_original, optimize, regress): mean_shift_regress_list = [] std_scale_regress_list = [] diff_regress_list = [] + mean_shift_mixed_list = [] + std_scale_mixed_list = [] + diff_mixed_list = [] progress_bar = tqdm( np_pair_arr, - desc=f"Compare {['','original'][int(compare_original)]} {['','optimized'][int(optimize)]} {['','regression'][int(regress)]}", + desc=f"Compare {['','original'][int(compare_original)]} {['','optimized'][int(optimize)]} {['','regression'][int(regress)]} {['','mixed'][int(mixed)]}", ) for n, p in progress_bar: p_list.append(p) @@ -793,10 +579,14 @@ def compare_normal_binomial(compare_original, optimize, regress): mean_shift_regress_list.append(mean_shift_regress) std_scale_regress_list.append(std_scale_regress) diff_regress_list.append(error_regress) - - print(mean_shift_regress_list) - print(std_scale_regress_list) - print(diff_regress_list) + if mixed: + ### get the error with the mixed method + mean_shift_mixed, std_scale_mixed, error_mixed = ( + difference_binomial_normal_mixed(n=n, p=p) + ) + mean_shift_mixed_list.append(mean_shift_mixed) + std_scale_mixed_list.append(std_scale_mixed) + diff_mixed_list.append(error_mixed) ### save variables if compare_original: @@ -849,6 +639,24 @@ def compare_normal_binomial(compare_original, optimize, regress): ], path=REGRESS_FOLDER, ) + if mixed: + save_variables( + variable_list=[ + p_list, + n_list, + mean_shift_mixed_list, + std_scale_mixed_list, + diff_mixed_list, + ], + name_list=[ + "p_list", + "n_list", + "mean_shift_mixed_list", + "std_scale_mixed_list", + "diff_mixed_list", + ], + path=MIXED_FOLDER, + ) def get_regression_parameters(): @@ -916,32 +724,6 @@ def get_regression_parameters(): ) -### TODO I have the problem that for very small p the normal distribution is not a good -### approximation of the binomial distribution. -### I think one can shift the mean and scale the standard deviation depending on the p -### and n values. I will try to optimize the shift and scale for each n and p value. - -### global paramters -COMPARE_ORIGINAL = True -OPTIMIZE = True -REGRESS = True -PLOT_COMPARE_ORIGINAL = True -PLOT_OPTIMIZE = True -PLOT_REGRESS = True -COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" -OPTIMIZE_FOLDER = "test2_data_optimize" -REGRESS_FOLDER = "test2_data_regress" -PLOTS_FOLDER = "test2_plots" -S = 10000 -SEED = 1234 -N_VALUES = [10, 1000, 20] -P_VALUES = [0.001, 0.1, 10] -N_JOBS = 15 -N_RUNS_OPT_PER_PAIR = 100 * N_JOBS -N_RUNS_REGRESS = 15 -N_PARAMS_REGRESS = 16 - - def preprocess_for_regress(var_value, var_name): """ Normalize variable values before regression. @@ -964,15 +746,22 @@ def preprocess_for_regress(var_value, var_name): ], path=REGRESS_FOLDER, ) - min_dict = loaded_variables["min_dict"] - max_dict = loaded_variables["max_dict"] + min_value = loaded_variables["min_dict"][var_name] + max_value = loaded_variables["max_dict"][var_name] + var_value_processed = var_value ### do calculations - var_value_processed = (var_value - min_dict[var_name]) / ( - max_dict[var_name] - min_dict[var_name] - ) if var_name == "mean_shift": + ### mean shift looks like std_scale but it is negative and has some positive + ### coordinates (I think they are not important) + ### make mean shift look like std_scale + ### skip mean shift and also vmin and vmax, then rescale to 0 and 1 var_value_processed = -var_value_processed + var_value_processed = np.clip(var_value_processed, 0, None) + max_value = np.max(-np.array([min_value, max_value])) + min_value = 0 + + var_value_processed = (var_value - min_value) / (max_value - min_value) return var_value_processed @@ -998,30 +787,57 @@ def post_process_for_regression(var_value, var_name): ], path=REGRESS_FOLDER, ) - min_dict = loaded_variables["min_dict"] - max_dict = loaded_variables["max_dict"] + min_value = loaded_variables["min_dict"][var_name] + max_value = loaded_variables["max_dict"][var_name] + + if var_name == "mean_shift": + ### skip vmin and vmax of mean shift, then rescale from 0 and 1 and then skip + ### mean shift + max_value = np.max(-np.array([min_value, max_value])) + min_value = 0 ### do calculations + var_value_processed = var_value * (max_value - min_value) + min_value + if var_name == "mean_shift": var_value = -var_value - var_value_processed = ( - var_value * (max_dict[var_name] - min_dict[var_name]) + min_dict[var_name] - ) - if var_name == "n": var_value_processed = int(np.round(var_value_processed)) return var_value_processed -if __name__ == "__main__": +### TODO I have the problem that for very small p the normal distribution is not a good +### approximation of the binomial distribution. +### I think one can shift the mean and scale the standard deviation depending on the p +### and n values. I will try to optimize the shift and scale for each n and p value. + +### global paramters +COMPARE_ORIGINAL = False +OPTIMIZE = False +REGRESS = True +MIXED = False +PLOT_COMPARE_ORIGINAL = True +PLOT_OPTIMIZE = True +PLOT_REGRESS = True +PLOT_MIXED = True +COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" +OPTIMIZE_FOLDER = "test2_data_optimize" +REGRESS_FOLDER = "test2_data_regress" +MIXED_FOLDER = "test2_data_mixed" +PLOTS_FOLDER = "test2_plots" +S = 10000 +SEED = 1234 +N_VALUES = [10, 1000, 20] +P_VALUES = [0.001, 0.1, 10] +N_JOBS = 2 +N_RUNS_OPT_PER_PAIR = 100 * N_JOBS +N_RUNS_REGRESS = 15 +N_PARAMS_REGRESS = 16 +SCALE_FOR_REGRESSION = True - ### TODO: restructure this thing - # 1st compare binomial and normal samples for various n and p values, save: p_list, n_list and diff_list - # 2nd optimize mean shift and std scale for each n and p value and get improved error, save: mean_shift_list, std_scale_list and error_improved_list - # 3rd make a 2D regression for the optimized mean shift and std scale, get mean_shift_regress(n, p) and std_scale_regress(n, p), save: the optimized parameters of the regression equations - # 4th plot: (1) error depending on n and p, (2) optimized mean shift and std scale depending on n and p and corresponding error improvement, (3) regressed mean shift and std scale depending on n and p and corresponding error improvement +if __name__ == "__main__": ### create the save folder(s) if COMPARE_ORIGINAL: @@ -1036,10 +852,14 @@ def post_process_for_regression(var_value, var_name): create_data_raw_folder( REGRESS_FOLDER, ) + if MIXED: + create_data_raw_folder( + MIXED_FOLDER, + ) ### compare with and without optimization compare_normal_binomial( - compare_original=COMPARE_ORIGINAL, optimize=OPTIMIZE, regress=False + compare_original=COMPARE_ORIGINAL, optimize=OPTIMIZE, regress=False, mixed=False ) ### compare with regression (must compare original and with optimization first) @@ -1068,21 +888,26 @@ def post_process_for_regression(var_value, var_name): diff_opt_arr = np.array(loaded_variables_opt["diff_opt_list"]) diff_arr = np.array(loaded_variables["diff_list"]) - ### Transform mean_shift_opt and std_scale_opt for regression - ### get how much the difference improved (decreased) by the optimized mean shift - ### and std scale values - diff_improvement_arr = -np.clip(diff_opt_arr - diff_arr, None, 0) - diff_improvement_arr = diff_improvement_arr / np.max(diff_improvement_arr) - - ### scale the mean shift and std scale by the difference improvement - ### --> only keep the transformations which improve the difference - ### if there is no improvement mean shift and std scale are closer to 0 and 1 - mean_shift_opt_arr = ( - diff_improvement_arr * mean_shift_opt_arr + (1 - diff_improvement_arr) * 0 - ) - std_scale_opt_arr = ( - diff_improvement_arr * std_scale_opt_arr + (1 - diff_improvement_arr) * 1 - ) + if SCALE_FOR_REGRESSION: + ### Transform mean_shift_opt and std_scale_opt for regression + ### get how much the difference improved (decreased) by the optimized mean shift + ### and std scale values + diff_improvement_arr = -np.clip(diff_opt_arr - diff_arr, None, 0) + diff_improvement_arr = diff_improvement_arr / np.max(diff_improvement_arr) + # ### scale the mean shift and std scale by the original difference the larger the difference the mor important the transformation + # diff_improvement_arr = np.absolute(diff_arr) / np.max(np.absolute(diff_arr)) + + ### scale the mean shift and std scale by the difference improvement + ### --> only keep the transformations which improve the difference + ### if there is no improvement mean shift and std scale are closer to 0 and 1 + mean_shift_opt_arr = ( + diff_improvement_arr * mean_shift_opt_arr + + (1 - diff_improvement_arr) * 0 + ) + std_scale_opt_arr = ( + diff_improvement_arr * std_scale_opt_arr + + (1 - diff_improvement_arr) * 1 + ) ### save the trasnformed mean shift and std scale values for regression save_variables( @@ -1124,7 +949,14 @@ def post_process_for_regression(var_value, var_name): ### do the regression get_regression_parameters() - compare_normal_binomial(compare_original=False, optimize=False, regress=True) + compare_normal_binomial( + compare_original=False, optimize=False, regress=True, mixed=False + ) + + if MIXED: + compare_normal_binomial( + compare_original=False, optimize=False, regress=False, mixed=True + ) ### plot the results if PLOT_COMPARE_ORIGINAL: @@ -1136,470 +968,5 @@ def post_process_for_regression(var_value, var_name): if PLOT_REGRESS: plot_with_transformation(mode="regress") - quit() - - OPTIMIZE = False - PLOT_OPTIMIZED = True - USE_REGRESSION = False - PLOT_REGRESSION = False - - ### 1st optimize mean shift and std scale for each n and p value - n_arr = logarithmic_arange(10, 1000, 20).astype(int) - p_arr = logarithmic_arange(0.001, 0.1, 10) - - ### 1st get errors for all n and p values without optimization - p_list = [] - n_list = [] - error_list = [] - for p in p_arr: - for n in n_arr: - ### get the error without optimization - error = objective_function(mean_shift=0, std_scale=1, n=n, p=p, m=M) - error_list.append(error) - - ### store the results - p_list.append(p) - n_list.append(n) - - ### save variables - save_variables( - variable_list=[ - p_list, - n_list, - error_list, - ], - name_list=[ - "p_list", - "n_list", - "error_list", - ], - path="data_optimize_binomial_normal", - ) - - ### plot the original error - # original error -> interpolation plot - plt.figure(figsize=(6.4 * 2, 4.8 * 2)) - plt.subplot(1, 1, 1) - plot_2d_interpolated_image( - x=n_list, y=p_list, z=error_list, vmin=0, vmax=np.max(error_list) - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title(f"Error original\n(max: {np.max(error_list)})") - plt.tight_layout() - plt.savefig("test2_01_error_original.png", dpi=300) - - ### 2nd optimize mean shift and std scale for each n and p value and get improved error - if OPTIMIZE: - loaded_variables = load_variables( - name_list=[ - "p_list", - "n_list", - "error_list", - ], - path="data_optimize_binomial_normal", - ) - p_list = loaded_variables["p_list"] - n_list = loaded_variables["n_list"] - error_list = loaded_variables["error_list"] - mean_shift_list = [] - std_scale_list = [] - error_improved_list = [] - for p, n in zip(p_list, n_list): - ### save p and n to be availyble in optimization script - save_variables( - variable_list=[p, n], - name_list=["p", "n"], - path=OPTIMIZE_FOLDER, - ) - ### run optimization - n_jobs = 15 - n_runs = 100 * n_jobs - args_list = [[f"{parallel_id}"] for parallel_id in range(n_runs)] - run_script_parallel( - script_path="test2_deap_opt_transform.py", - n_jobs=n_jobs, - args_list=args_list, - ) - ### get best parameters - best_fitness = 1e6 - best_parallel_id = 0 - for parallel_id in range(n_runs): - loaded_variables = load_variables( - name_list=[ - f"error_improved_{parallel_id}", - ], - path=OPTIMIZE_FOLDER, - ) - error_improved = loaded_variables[f"error_improved_{parallel_id}"] - - if error_improved < best_fitness: - best_fitness = error_improved - best_parallel_id = parallel_id - loaded_variables = load_variables( - name_list=[ - f"mean_shift_{best_parallel_id}", - f"std_scale_{best_parallel_id}", - f"error_improved_{best_parallel_id}", - ], - path=OPTIMIZE_FOLDER, - ) - mean_shift = loaded_variables[f"mean_shift_{best_parallel_id}"] - std_scale = loaded_variables[f"std_scale_{best_parallel_id}"] - error_improved = loaded_variables[f"error_improved_{best_parallel_id}"] - - ### store the results - error_improved_list.append(error_improved) - mean_shift_list.append(mean_shift) - std_scale_list.append(std_scale) - - ### save variables - save_variables( - variable_list=[ - mean_shift_list, - std_scale_list, - error_improved_list, - ], - name_list=[ - "mean_shift_list", - "std_scale_list", - "error_improved_list", - ], - path="data_optimize_binomial_normal", - ) - - ### 4th plot the optimized error with optimized mean shift and std scale - ### also calculate the ŕegression for the optimized mean shift and std scale - if PLOT_OPTIMIZED: - # fitting improved error -> interpolation plot - # fitting improvement -> interpolation plot - # fitting mean shift -> regression plot - # fitting std scale -> regression plot - loaded_variables = load_variables( - name_list=[ - "error_improved_list", - "mean_shift_list", - "std_scale_list", - ], - path="data_optimize_binomial_normal", - ) - error_improved_list = loaded_variables["error_improved_list"] - mean_shift_list = loaded_variables["mean_shift_list"] - std_scale_list = loaded_variables["std_scale_list"] - # ### TODO tmp - # error_improved_list = np.random.rand(len(error_improved_list)) - # mean_shift_list = np.random.rand(len(mean_shift_list)) - # std_scale_list = np.random.rand(len(std_scale_list)) - # ### TODO tmp - error_change_arr = np.array(error_improved_list) - np.array(error_list) - improvement_arr = -np.clip(error_change_arr, None, 0) - improvement_arr_norm = improvement_arr / np.max(improvement_arr) - - ### scale the mean shift and std scale by the error improvement - ### --> only keep the transformations which improve the error - alpha = improvement_arr_norm - mean_shift_list = alpha * np.array(mean_shift_list) + (1 - alpha) * 0 - std_scale_list = alpha * np.array(std_scale_list) + (1 - alpha) * 1 - - ### the mean shift is mostly 0 and at some positions negative, multiply it by -1 - mean_shift_list = -mean_shift_list - - plt.figure(figsize=(6.4 * 2 * 2, 4.8 * 2 * 4)) - plt.subplot(4, 2, 1) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_improved_list, - vmin=0, - vmax=np.max(error_improved_list), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title(f"Error optimized\n(max: {np.max(error_improved_list)})") - plt.subplot(4, 2, 3) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_change_arr, - vmin=-np.max(np.abs(error_change_arr)), - vmax=np.max(np.abs(error_change_arr)), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error improvement") - plt.subplot(4, 2, 5) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=mean_shift_list, - vmin=-np.max(np.abs(mean_shift_list)), - vmax=np.max(np.abs(mean_shift_list)), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift") - plt.subplot(4, 2, 7) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=std_scale_list, - vmin=1 - np.max(1 - np.array(std_scale_list)), - vmax=1 + np.max(np.array(std_scale_list) - 1), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Standard deviation scale") - plt.subplot(4, 2, 6) - plot_2d_curve_fit_regression( - x=n_list, - y=p_list, - z=mean_shift_list, - vmin=-np.max(np.abs(mean_shift_list)), - vmax=np.max(np.abs(mean_shift_list)), - # sample_weight=-np.clip(error_change_arr, None, 0) - # + 0.01 * np.max(improvement_arr), - # degree=3, - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift regression") - plt.subplot(4, 2, 8) - plot_2d_curve_fit_regression( - x=n_list, - y=p_list, - z=std_scale_list, - vmin=1 - np.max(1 - np.array(std_scale_list)), - vmax=1 + np.max(np.array(std_scale_list) - 1), - # sample_weight=-np.clip(error_change_arr, None, 0) - # + 0.01 * np.max(improvement_arr), - # degree=3, - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Standard deviation scale regression") - plt.tight_layout() - plt.savefig("test2_02_error_optimized.png", dpi=300) - - ### 3rd use regression for mean shift and std scale and recalculate the improved error - if USE_REGRESSION: - ### load the optimized parameters and corresponding original and optimized errors - loaded_variables = load_variables( - name_list=[ - "p_list", - "n_list", - ], - path="data_optimize_binomial_normal", - ) - p_list = loaded_variables["p_list"] - n_list = loaded_variables["n_list"] - - ### use regression equations to recalculate mean shift and std scale - mean_shift_reg_list = [] - std_scale_reg_list = [] - error_improved_reg_list = [] - for p, n in zip(p_list, n_list): - ### get the optimized parameters and best error - mean_shift = mean_shift_regression(n, p) - std_scale = std_scale_regression(n, p) - error_improved = objective_function( - mean_shift=mean_shift, std_scale=std_scale, n=n, p=p, m=M - ) - - ### store the results - error_improved_reg_list.append(error_improved) - mean_shift_reg_list.append(mean_shift) - std_scale_reg_list.append(std_scale) - - ### save variables - save_variables( - variable_list=[ - mean_shift_reg_list, - std_scale_reg_list, - error_improved_reg_list, - ], - name_list=[ - "mean_shift_reg_list", - "std_scale_reg_list", - "error_improved_reg_list", - ], - path="data_optimize_binomial_normal", - ) - - ### 5th plot the regression error with regressed mean shift and std scale and compare it - ### with the optimized error - if PLOT_REGRESSION: - # regression improved error -> interpolation plot - # regression improvement -> interpolation plot - # regression mean shift -> regression plot - # regression std scale -> regression plot - loaded_variables = load_variables( - name_list=[ - "error_improved_list", - "mean_shift_list", - "std_scale_list", - "error_improved_reg_list", - "mean_shift_reg_list", - "std_scale_reg_list", - ], - path="data_optimize_binomial_normal", - ) - error_improved_list = loaded_variables["error_improved_list"] - mean_shift_list = loaded_variables["mean_shift_list"] - std_scale_list = loaded_variables["std_scale_list"] - error_improved_reg_list = loaded_variables["error_improved_reg_list"] - mean_shift_reg_list = loaded_variables["mean_shift_reg_list"] - std_scale_reg_list = loaded_variables["std_scale_reg_list"] - - plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) - plt.subplot(4, 1, 1) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=error_improved_reg_list, - vmin=0, - vmax=np.max(error_improved_reg_list), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title(f"Error optimized\n(max: {np.max(error_improved_reg_list)})") - plt.subplot(4, 1, 2) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(error_improved_reg_list) - np.array(error_list), - vmin=-np.max( - np.abs(np.array(error_improved_reg_list) - np.array(error_list)) - ), - vmax=np.max( - np.abs(np.array(error_improved_reg_list) - np.array(error_list)) - ), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error improvement") - plt.subplot(4, 1, 3) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=mean_shift_reg_list, - vmin=-np.max(np.abs(mean_shift_reg_list)), - vmax=np.max(np.abs(mean_shift_reg_list)), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift") - plt.subplot(4, 1, 4) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=std_scale_reg_list, - vmin=1 - np.max(1 - np.array(std_scale_reg_list)), - vmax=1 + np.max(np.array(std_scale_reg_list) - 1), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Standard deviation scale") - plt.tight_layout() - plt.savefig("test2_03_error_regression.png", dpi=300) - - # difference fitting/regression improved error -> interpolation plot - # difference fitting/regression improvement -> interpolation plot - # difference fitting/regression mean shift -> interpolation plot - # difference fitting/regression std scale -> interpolation plot - plt.figure(figsize=(6.4 * 2, 4.8 * 2 * 4)) - plt.subplot(4, 1, 1) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(error_improved_list) - np.array(error_improved_reg_list), - vmin=-np.max( - np.abs( - np.array(error_improved_list) - np.array(error_improved_reg_list) - ) - ), - vmax=np.max( - np.abs( - np.array(error_improved_list) - np.array(error_improved_reg_list) - ) - ), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error difference between optimized and regression") - plt.subplot(4, 1, 2) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(error_improved_list) - - np.array(error_list) - - np.array(error_improved_reg_list) - + np.array(error_list), - vmin=-np.max( - np.abs( - np.array(error_improved_list) - - np.array(error_list) - - np.array(error_improved_reg_list) - + np.array(error_list) - ) - ), - vmax=np.max( - np.abs( - np.array(error_improved_list) - - np.array(error_list) - - np.array(error_improved_reg_list) - + np.array(error_list) - ) - ), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Error improvement difference between optimized and regression") - plt.subplot(4, 1, 3) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(mean_shift_list) - np.array(mean_shift_reg_list), - vmin=-np.max( - np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list)) - ), - vmax=np.max( - np.abs(np.array(mean_shift_list) - np.array(mean_shift_reg_list)) - ), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title("Mean shift difference between optimized and regression") - plt.subplot(4, 1, 4) - plot_2d_interpolated_image( - x=n_list, - y=p_list, - z=np.array(std_scale_list) - np.array(std_scale_reg_list), - vmin=-np.max( - np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list)) - ), - vmax=np.max( - np.abs(np.array(std_scale_list) - np.array(std_scale_reg_list)) - ), - ) - plt.colorbar() - plt.xlabel("n") - plt.ylabel("p") - plt.title( - "Standard deviation scale difference between optimized and regression" - ) - plt.tight_layout() - plt.savefig("test2_04_error_difference.png", dpi=300) + if PLOT_MIXED: + plot_with_transformation(mode="mixed") From 6a9fb69bdcfbfc4b54755d592331ecd89744f375 Mon Sep 17 00:00:00 2001 From: olimaol Date: Thu, 15 Aug 2024 16:21:59 +0200 Subject: [PATCH 19/21] . --- .../examples/model_configurator/test2.py | 80 ++++++++----------- .../test2_deap_opt_regress.py | 44 +++++----- 2 files changed, 57 insertions(+), 67 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 526399c..6c17953 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -479,7 +479,9 @@ def plot_with_transformation(mode: str): plt.colorbar() plt.xlabel("n") plt.ylabel("p") - plt.title(f"{title}\n(max: {np.max(loaded_variables[key])})") + plt.title( + f"{title}\n(min: {np.min(loaded_variables[key])}, max: {np.max(loaded_variables[key])})" + ) plt.tight_layout() create_dir(PLOTS_FOLDER) plt.savefig( @@ -519,7 +521,9 @@ def plot_compare_original(): plt.colorbar() plt.xlabel("n") plt.ylabel("p") - plt.title(f"Difference original\n(max: {np.max(loaded_variables['diff_list'])})") + plt.title( + f"Difference original\n(min: {np.min(loaded_variables['diff_list'])}, max: {np.max(loaded_variables['diff_list'])})" + ) plt.tight_layout() create_dir(PLOTS_FOLDER) plt.savefig(f"{PLOTS_FOLDER}/difference_original.png", dpi=300) @@ -717,6 +721,10 @@ def get_regression_parameters(): ) popt_std_scale = loaded_variables[f"popt_std_scale_{best_parallel_id_std_scale}"] + print("finished regressions") + print(f"best fitness for mean_shift: {best_fitness_mean_shift}") + print(f"best fitness for std_scale: {best_fitness_std_scale}") + save_variables( variable_list=[popt_mean_shift, popt_std_scale], name_list=["popt_mean_shift", "popt_std_scale"], @@ -735,7 +743,7 @@ def preprocess_for_regress(var_value, var_name): The name of the variable. Returns: - var_value_processed (float or array): + var_value (float or array): The normalized value(s) of the variable ready for regression. """ ### load the dicts for normalization @@ -748,7 +756,6 @@ def preprocess_for_regress(var_value, var_name): ) min_value = loaded_variables["min_dict"][var_name] max_value = loaded_variables["max_dict"][var_name] - var_value_processed = var_value ### do calculations if var_name == "mean_shift": @@ -756,13 +763,13 @@ def preprocess_for_regress(var_value, var_name): ### coordinates (I think they are not important) ### make mean shift look like std_scale ### skip mean shift and also vmin and vmax, then rescale to 0 and 1 - var_value_processed = -var_value_processed - var_value_processed = np.clip(var_value_processed, 0, None) + var_value = -var_value + var_value = np.clip(var_value, 0, None) max_value = np.max(-np.array([min_value, max_value])) min_value = 0 - var_value_processed = (var_value - min_value) / (max_value - min_value) - return var_value_processed + var_value = (var_value - min_value) / (max_value - min_value) + return var_value def post_process_for_regression(var_value, var_name): @@ -776,7 +783,7 @@ def post_process_for_regression(var_value, var_name): The name of the variable. Returns: - var_value_processed (float or array): + var_value (float or array): The original value(s) of the variable after denormalization. """ ### load the dicts for normalization @@ -797,14 +804,14 @@ def post_process_for_regression(var_value, var_name): min_value = 0 ### do calculations - var_value_processed = var_value * (max_value - min_value) + min_value + var_value = var_value * (max_value - min_value) + min_value if var_name == "mean_shift": var_value = -var_value if var_name == "n": - var_value_processed = int(np.round(var_value_processed)) - return var_value_processed + var_value = int(np.round(var_value)) + return var_value ### TODO I have the problem that for very small p the normal distribution is not a good @@ -834,7 +841,7 @@ def post_process_for_regression(var_value, var_name): N_RUNS_OPT_PER_PAIR = 100 * N_JOBS N_RUNS_REGRESS = 15 N_PARAMS_REGRESS = 16 -SCALE_FOR_REGRESSION = True +SCALE_ERROR_FOR_REGRESSION = True if __name__ == "__main__": @@ -888,41 +895,15 @@ def post_process_for_regression(var_value, var_name): diff_opt_arr = np.array(loaded_variables_opt["diff_opt_list"]) diff_arr = np.array(loaded_variables["diff_list"]) - if SCALE_FOR_REGRESSION: - ### Transform mean_shift_opt and std_scale_opt for regression - ### get how much the difference improved (decreased) by the optimized mean shift - ### and std scale values - diff_improvement_arr = -np.clip(diff_opt_arr - diff_arr, None, 0) - diff_improvement_arr = diff_improvement_arr / np.max(diff_improvement_arr) - # ### scale the mean shift and std scale by the original difference the larger the difference the mor important the transformation - # diff_improvement_arr = np.absolute(diff_arr) / np.max(np.absolute(diff_arr)) - - ### scale the mean shift and std scale by the difference improvement - ### --> only keep the transformations which improve the difference - ### if there is no improvement mean shift and std scale are closer to 0 and 1 - mean_shift_opt_arr = ( - diff_improvement_arr * mean_shift_opt_arr - + (1 - diff_improvement_arr) * 0 - ) - std_scale_opt_arr = ( - diff_improvement_arr * std_scale_opt_arr - + (1 - diff_improvement_arr) * 1 - ) + if SCALE_ERROR_FOR_REGRESSION: + ### get array to weight the error for the regression depending on the + ### improvement (reduction) of the difference by the optimization + weight_error_arr = -np.clip(diff_opt_arr - diff_arr, None, 0) + weight_error_arr = (weight_error_arr / np.max(weight_error_arr)) + 1.0 + else: + weight_error_arr = np.ones_like(diff_opt_arr) - ### save the trasnformed mean shift and std scale values for regression - save_variables( - variable_list=[ - mean_shift_opt_arr, - std_scale_opt_arr, - ], - name_list=[ - "mean_shift_opt_for_regress_arr", - "std_scale_opt_for_regress_arr", - ], - path=REGRESS_FOLDER, - ) - - ### create global variables which can be used for pre-processing and + ### create variables which can be used for pre-processing and ### post-processing for the regression min_dict = {} max_dict = {} @@ -934,15 +915,18 @@ def post_process_for_regression(var_value, var_name): min_dict["std_scale"] = np.min(std_scale_opt_arr) max_dict["mean_shift"] = np.max(mean_shift_opt_arr) max_dict["std_scale"] = np.max(std_scale_opt_arr) - ### save the dicts + + ### save the variables prepared for the regression save_variables( variable_list=[ min_dict, max_dict, + weight_error_arr, ], name_list=[ "min_dict", "max_dict", + "weight_error_arr", ], path=REGRESS_FOLDER, ) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py index d8091ef..05ade79 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py @@ -10,45 +10,51 @@ import sys -def regression_evaluate_function(population, X, z): +def regression_evaluate_function(population, X, z, weights): loss_list = [] ### the population is a list of individuals which are lists of parameters for individual in population: loss_of_individual = regression_objective_function( - individual=individual, X=X, z=z + individual=individual, X=X, z=z, weights=weights ) loss_list.append((loss_of_individual,)) return loss_list -def regression_objective_function(individual, X, z): +def regression_objective_function(individual, X, z, weights): is_data = regression_func(X=X, denormalize=None, args=individual) target_data = z - return np.sum((is_data - target_data) ** 2) + error_arr = (is_data - target_data) ** 2 + ### weight the error array + error_arr = error_arr * weights + return np.sum(error_arr) if __name__ == "__main__": - ### Load the p and n variables for regression from the previous optimization + ### Load the p, n, mean_shift and std_scale variables for regression from the + ### previous optimization loaded_variables = load_variables( name_list=[ "p_list", "n_list", + "mean_shift_opt_list", + "std_scale_opt_list", ], path=OPTIMIZE_FOLDER, ) p_arr = np.array(loaded_variables["p_list"]) n_arr = np.array(loaded_variables["n_list"]) - ### Load the mean_shift and std_scale variables for regression prepared before - ### regression + mean_shift_opt_arr = np.array(loaded_variables["mean_shift_opt_list"]) + std_scale_opt_arr = np.array(loaded_variables["std_scale_opt_list"]) + + ### Load the weight_error array to weight the regression errors loaded_variables = load_variables( name_list=[ - "mean_shift_opt_for_regress_arr", - "std_scale_opt_for_regress_arr", + "weight_error_arr", ], path=REGRESS_FOLDER, ) - mean_shift_opt_arr = loaded_variables["mean_shift_opt_for_regress_arr"] - std_scale_opt_arr = loaded_variables["std_scale_opt_for_regress_arr"] + weight_error_arr = np.array(loaded_variables["weight_error_arr"]) ### normalize the data before regression n_arr = preprocess_for_regress(var_value=n_arr, var_name="n") @@ -65,16 +71,16 @@ def regression_objective_function(individual, X, z): lower=np.array([-1] * N_PARAMS_REGRESS), upper=np.array([1] * N_PARAMS_REGRESS), evaluate_function=lambda population: regression_evaluate_function( - population=population, X=(n_arr, p_arr), z=mean_shift_opt_arr + population=population, + X=(n_arr, p_arr), + z=mean_shift_opt_arr, + weights=weight_error_arr, ), hard_bounds=False, display_progress_bar=False, ) deap_cma_result = deap_cma.run(max_evals=2000) popt = [deap_cma_result[f"param{param_id}"] for param_id in range(N_PARAMS_REGRESS)] - print( - f"finished regression of mean_shift, best fitness: {deap_cma_result['best_fitness']}" - ) # Save the variables save_variables( @@ -91,16 +97,16 @@ def regression_objective_function(individual, X, z): lower=np.array([-1] * N_PARAMS_REGRESS), upper=np.array([1] * N_PARAMS_REGRESS), evaluate_function=lambda population: regression_evaluate_function( - population=population, X=(n_arr, p_arr), z=std_scale_opt_arr + population=population, + X=(n_arr, p_arr), + z=std_scale_opt_arr, + weights=weight_error_arr, ), hard_bounds=False, display_progress_bar=False, ) deap_cma_result = deap_cma.run(max_evals=2000) popt = [deap_cma_result[f"param{param_id}"] for param_id in range(N_PARAMS_REGRESS)] - print( - f"finished regression of std_scale, best fitness: {deap_cma_result['best_fitness']}" - ) # Save the variables save_variables( From c05a139a7a450b66b90f2ca03f6d80c00dc2c1ed Mon Sep 17 00:00:00 2001 From: olimaol Date: Thu, 15 Aug 2024 16:44:18 +0200 Subject: [PATCH 20/21] . --- .../examples/model_configurator/test2.py | 12 +++++++++++- .../model_configurator/test2_deap_opt_regress.py | 13 +++++++------ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py index 6c17953..aae745b 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2.py +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -827,7 +827,7 @@ def post_process_for_regression(var_value, var_name): PLOT_COMPARE_ORIGINAL = True PLOT_OPTIMIZE = True PLOT_REGRESS = True -PLOT_MIXED = True +PLOT_MIXED = False COMPARE_ORIGINAL_FOLDER = "test2_data_compare_original" OPTIMIZE_FOLDER = "test2_data_optimize" REGRESS_FOLDER = "test2_data_regress" @@ -842,6 +842,7 @@ def post_process_for_regression(var_value, var_name): N_RUNS_REGRESS = 15 N_PARAMS_REGRESS = 16 SCALE_ERROR_FOR_REGRESSION = True +KEEP_ONLY_IMPROVEMENTS = True if __name__ == "__main__": @@ -903,6 +904,11 @@ def post_process_for_regression(var_value, var_name): else: weight_error_arr = np.ones_like(diff_opt_arr) + if KEEP_ONLY_IMPROVEMENTS: + ### keep only the improvements + mean_shift_opt_arr[diff_opt_arr >= diff_arr] = 0 + std_scale_opt_arr[diff_opt_arr >= diff_arr] = 1 + ### create variables which can be used for pre-processing and ### post-processing for the regression min_dict = {} @@ -922,11 +928,15 @@ def post_process_for_regression(var_value, var_name): min_dict, max_dict, weight_error_arr, + mean_shift_opt_arr, + std_scale_opt_arr, ], name_list=[ "min_dict", "max_dict", "weight_error_arr", + "mean_shift_opt_arr_for_regress", + "std_scale_opt_arr_for_regress", ], path=REGRESS_FOLDER, ) diff --git a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py index 05ade79..fc791ba 100644 --- a/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py +++ b/src/CompNeuroPy/examples/model_configurator/test2_deap_opt_regress.py @@ -31,30 +31,31 @@ def regression_objective_function(individual, X, z, weights): if __name__ == "__main__": - ### Load the p, n, mean_shift and std_scale variables for regression from the + ### Load the p, n variables for regression from the ### previous optimization loaded_variables = load_variables( name_list=[ "p_list", "n_list", - "mean_shift_opt_list", - "std_scale_opt_list", ], path=OPTIMIZE_FOLDER, ) p_arr = np.array(loaded_variables["p_list"]) n_arr = np.array(loaded_variables["n_list"]) - mean_shift_opt_arr = np.array(loaded_variables["mean_shift_opt_list"]) - std_scale_opt_arr = np.array(loaded_variables["std_scale_opt_list"]) - ### Load the weight_error array to weight the regression errors + ### Load mean_shift and std_scale values and the weight_error array to weight the + ### regression errors loaded_variables = load_variables( name_list=[ "weight_error_arr", + "mean_shift_opt_arr_for_regress", + "std_scale_opt_arr_for_regress", ], path=REGRESS_FOLDER, ) weight_error_arr = np.array(loaded_variables["weight_error_arr"]) + mean_shift_opt_arr = np.array(loaded_variables["mean_shift_opt_arr_for_regress"]) + std_scale_opt_arr = np.array(loaded_variables["std_scale_opt_arr_for_regress"]) ### normalize the data before regression n_arr = preprocess_for_regress(var_value=n_arr, var_name="n") From 1ce1b4e4d609d3998cd13cb1242b6f8789d05791 Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 4 Sep 2024 11:05:30 +0200 Subject: [PATCH 21/21] create_data_raw_folder now works with module and dictionary --- src/CompNeuroPy/system_functions.py | 49 ++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index 8384d6b..ebe0b75 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -10,6 +10,7 @@ import concurrent.futures import signal from typing import List +from types import ModuleType import threading import sys @@ -342,6 +343,8 @@ def _timeout_handler(signum, frame): def create_data_raw_folder( folder_name: str, + parameter_module: ModuleType | None = None, + parameter_dict: dict | None = None, **kwargs, ): """ @@ -356,7 +359,8 @@ def create_data_raw_folder( This function stores the following information in a file called "__data_raw_meta__" in the created folder: - the name of the python script which created the data raw - - the global variables of the python script given as kwargs + - the global variables of the python script given as parameter_module, + parameter_dict, and kwargs - the conda environment - the pip requirements - the git log of ANNarchy and CompNeuroPy if they are installed locally @@ -368,8 +372,12 @@ def create_data_raw_folder( folder_name (str): Name of the folder to create. - **kwargs (Any, optional): - Global variables of the caller script. + parameter_module (ModuleType, optional): + Module containing parameters as upper case constants. Default: None. + + parameter_dict (dict, optional): + Dictionary containing parameters to store as parameter name - value pairs. + Default: None. Returns: folder_name (str): @@ -378,6 +386,10 @@ def create_data_raw_folder( Example: ```python from CompNeuroPy import create_data_raw_folder + import parameter_module as params + + # this is a parameter + params.A = 10 ### define global variables var1 = 1 @@ -387,9 +399,8 @@ def create_data_raw_folder( ### call the function create_data_raw_folder( "my_data_raw_folder", - var1=var1, - var2=var2, - var3=var3, + parameter_module=params, + parameter_dict={"var1": var1, "var2": var2, "var3": var3}, ) ``` """ @@ -572,11 +583,27 @@ def create_data_raw_folder( f"{''.join(git_strings)}" f"# with the following global variables:\n" ) - for key, value in kwargs.items(): - if isinstance(value, str): - f.write(f"{key} = '{value}'\n") - else: - f.write(f"{key} = {value}\n") + # store parameters from parameter_module, parameter_dict, and kwargs + if parameter_module is not None: + for key, value in vars(parameter_module).items(): + if not (key.isupper()): + continue + if isinstance(value, str): + f.write(f"{key} = '{value}'\n") + else: + f.write(f"{key} = {value}\n") + if parameter_dict is not None: + for key, value in parameter_dict.items(): + if isinstance(value, str): + f.write(f"{key} = '{value}'\n") + else: + f.write(f"{key} = {value}\n") + if kwargs: + for key, value in kwargs.items(): + if isinstance(value, str): + f.write(f"{key} = '{value}'\n") + else: + f.write(f"{key} = {value}\n") f.write("\n") f.write( "# ##########################################################################\n"