diff --git a/.gitignore b/.gitignore index 36290a3..69bc5cd 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ dist/ !site/* *.pkl *json +*.log diff --git a/docs/built_in/neuron_models.md b/docs/built_in/neuron_models.md index e03825f..e1e1225 100644 --- a/docs/built_in/neuron_models.md +++ b/docs/built_in/neuron_models.md @@ -67,6 +67,10 @@ options: heading_level: 3 show_root_full_path: false +::: CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyBaseSNR + options: + heading_level: 3 + show_root_full_path: false ## Izhikevich (2007)-like Neurons ::: CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007 @@ -106,6 +110,10 @@ heading_level: 3 show_root_full_path: false ::: CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyAmpaOscillating + options: + heading_level: 3 + show_root_full_path: false +::: CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.IzhikevichGolomb options: heading_level: 3 show_root_full_path: false \ No newline at end of file diff --git a/docs/examples/deap_cma.md b/docs/examples/deap_cma.md new file mode 100644 index 0000000..7d4a1d1 --- /dev/null +++ b/docs/examples/deap_cma.md @@ -0,0 +1,165 @@ +## Introduction +This example demonstrates how to use the DeapCma class to optimize parameters. + +## Code +```python +from CompNeuroPy import DeapCma +import numpy as np + + +### for DeapCma we need to define the evaluate_function +def evaluate_function(population): + """ + Calculate the loss for a population of individuals. + + Args: + population (np.ndarray): + population of individuals (i.e., parameter sets) to evaluate + + Returns: + loss_values (list[tuple]): + list of tuples, where each tuple contains the loss for an individual of the + population + """ + loss_list = [] + ### the population is a list of individuals + for individual in population: + ### the individual is a list of parameters + p0, p1, p2 = individual + ### calculate the loss of the individual + loss_of_individual = float((p0 - 3) ** 2 + (p1 - 7) ** 2 + (p2 - (-2)) ** 2) + ### insert the loss of the individual into the list of tuples + loss_list.append((loss_of_individual,)) + + return loss_list + + +def get_source_solutions(lb, ub): + """ + DeapCma can use source solutions to initialize the optimization process. This + function returns an example of source solutions. + + Source solutions are a list of tuples, where each tuple contains the parameters of + an individual (np.ndarray) and its loss (float). + + Returns: + source_solutions (list[tuple]): + list of tuples, where each tuple contains the parameters of an individual + and its loss + """ + ### create random solutions + source_solutions_parameters = np.random.uniform(0, 1, (100, 3)) * (ub - lb) + lb + ### evaluate the random solutions + source_solutions_losses = evaluate_function(source_solutions_parameters) + ### create a list of tuples, where each tuple contains the parameters of an + ### individual and its loss + source_solutions = [ + (source_solutions_parameters[idx], source_solutions_losses[idx][0]) + for idx in range(len(source_solutions_parameters)) + ] + ### only use the best 10 as source solutions + source_solutions = sorted(source_solutions, key=lambda x: x[1])[:10] + + return source_solutions + + +def main(): + ### define lower bounds of paramters to optimize + lb = np.array([-10, -10, 0]) + + ### define upper bounds of paramters to optimize + ub = np.array([10, 15, 5]) + + ### create an "minimal" instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + ) + + ### create an instance of the DeapCma class using all optional attributes + ### to initialize one could give a p0 array (same shape as lower and upper) and a + ### sig0 value or use source solutions (as shown here) + deap_cma_optional = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + max_evals=1000, + p0=None, + sig0=None, + param_names=["a", "b", "c"], + learn_rate_factor=1, + damping_factor=1, + verbose=True, + plot_file="logbook_optional.png", + cma_params_dict={}, + source_solutions=get_source_solutions(lb=lb, ub=ub), + hard_bounds=True, + ) + + ### run the optimization, since max_evals was not defined during initialization of + ### the DeapCma instance, it has to be defined here + ### it automatically saves a plot file showing the loss over the generations + deap_cma_result = deap_cma.run(max_evals=1000) + + ### run the optimization with all optional attributes + deap_cma_optional_result = deap_cma_optional.run(verbose=False) + + ### print the best parameters and its loss, since we did not define the names of the + ### parameters during initialization of the DeapCma instance, the names are param0, + ### param1, param2, also print everything that is in the dict returned by the run + best_param_dict = { + param_name: deap_cma_result[param_name] + for param_name in ["param0", "param1", "param2"] + } + print("\nFirst (minimal) optimization:") + print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_result['best_fitness']}\n") + + ### print the same for the second optimization + best_param_dict = { + param_name: deap_cma_optional_result[param_name] + for param_name in ["a", "b", "c"] + } + print("Second optimization (with all optional attributes):") + print(f"Dict from run function contains: {list(deap_cma_optional_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_optional_result['best_fitness']}") + + return 1 + + +if __name__ == "__main__": + main() +``` + +## Conosole Output +```console +$ python deap_cma.py +ANNarchy 4.7 (4.7.3b) on linux (posix). +Starting optimization with: +centroid: [4.57628308 7.39815401 1.30602549], (scaled: [0.72881415 0.69592616 0.2612051 ]) +sigma: [2.90435163 3.63043954 0.72608791], (scaled: 0.14521758155307307) +lambda (The number of children to produce at each generation): 7 +mu (The number of parents to keep from the lambda children): 3 +weights: [0.63704257 0.28457026 0.07838717] +mueff: 2.0286114646100617 +ccum (Cumulation constant for covariance matrix.): 0.5714285714285714 +cs (Cumulation constant for step-size): 0.5017818438926943 +ccov1 (Learning rate for rank-one update): 0.09747248265066792 +ccovmu (Learning rate for rank-mu update): 0.038593139193450914 +damps (Damping for step-size): 1.5017818438926942 + 24%|██████████████████████████████▏ | 238/1000 [00:00<00:00, 1265.35gen/s, best loss: 0.00000] + 17%|█████████████████████ | 166/1000 [00:00<00:00, 1369.98gen/s, best loss: 4.00000] + +First (minimal) optimization: +Dict from run function contains: ['param0', 'param1', 'param2', 'logbook', 'deap_pop', 'best_fitness'] +Best parameters: {'param0': 3.0, 'param1': 7.0, 'param2': -2.0} +Loss of best parameters: 0.0 + +Second optimization (with all optional attributes): +Dict from run function contains: ['a', 'b', 'c', 'logbook', 'deap_pop', 'best_fitness'] +Best parameters: {'a': 3.000000004587328, 'b': 6.999999980571925, 'c': 0.0} +Loss of best parameters: 4.0 +``` \ No newline at end of file diff --git a/docs/main/generate_models.md b/docs/main/generate_models.md index b2f1956..5cfb3d4 100644 --- a/docs/main/generate_models.md +++ b/docs/main/generate_models.md @@ -5,26 +5,26 @@ One can create a CompNeuroPy-model using the `CompNeuroModel` class. The `CompNe 2. **model creation**: create the ANNarchy objects (populations, projections), i.e., run the `model_creation function` 3. **model compilation**: compile all created models -## Example -
from CompNeuroPy import CompNeuroModel
-my_model = CompNeuroModel(model_creation_function=create_model,  ### the most important part, this function creates the model (populations, projections)
-                          model_kwargs={'a':1, 'b':2},           ### define the two arguments a and b of function create_model
-                          name='my_model',                       ### you can give the model a name
-                          description='my simple example model', ### you can give the model a description
-                          do_create=True,                        ### create the model directly
-                          do_compile=True,                       ### let the model (and all models created before) compile directly
-                          compile_folder_name='my_model')        ### name of the saved compilation folder
-
+!!! example +
from CompNeuroPy import CompNeuroModel
+    my_model = CompNeuroModel(model_creation_function=create_model,  ### the most important part, this function creates the model (populations, projections)
+                            model_kwargs={'a':1, 'b':2},           ### define the two arguments a and b of function create_model
+                            name='my_model',                       ### you can give the model a name
+                            description='my simple example model', ### you can give the model a description
+                            do_create=True,                        ### create the model directly
+                            do_compile=True,                       ### let the model (and all models created before) compile directly
+                            compile_folder_name='my_model')        ### name of the saved compilation folder
+    
-The following function could be the corresponding model_creation_function: -
from ANNarchy import Population, Izhikevich
-def create_model(a, b):
-    pop = Population(geometry=a, neuron=Izhikevich, name='Izh_pop_a') ### first population, size a
-    pop.b = 0                                                         ### some parameter adjustment
-    Population(geometry=b, neuron=Izhikevich, name='Izh_pop_b')       ### second population, size b
-
-Here, two populations are created (both use built-in Izhikevich neuron model of ANNarchy). The function does not require a return value. It is important that all populations and projections have unique names. + The following function could be the corresponding model_creation_function: +
from ANNarchy import Population, Izhikevich
+    def create_model(a, b):
+        pop = Population(geometry=a, neuron=Izhikevich, name='Izh_pop_a') ### first population, size a
+        pop.b = 0                                                         ### some parameter adjustment
+        Population(geometry=b, neuron=Izhikevich, name='Izh_pop_b')       ### second population, size b
+    
+ Here, two populations are created (both use built-in Izhikevich neuron model of ANNarchy). The function does not require a return value. It is important that all populations and projections have unique names. -A more detailed example is available in the [Examples](../examples/generate_models.md). + A more detailed example is available in the [Examples](../examples/generate_models.md). ::: CompNeuroPy.generate_model.CompNeuroModel \ No newline at end of file diff --git a/docs/main/generate_simulations.md b/docs/main/generate_simulations.md index 1fc6fa8..69348af 100644 --- a/docs/main/generate_simulations.md +++ b/docs/main/generate_simulations.md @@ -1,38 +1,38 @@ ## Introduction A CompNeuroPy-simulation can be created using the [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) class. Similar to the [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel) class, a function must be defined that contains the actual simulation (the _simulation_function_) and the [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) object adds a clear framework. A [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) is first initialized and can then be run multiple times. -## Example: -```python -from CompNeuroPy import CompNeuroSim -my_simulation = CompNeuroSim(simulation_function=some_simulation, ### the most important part, this function defines the simulation - simulation_kwargs={'pop':pop1, 'duration':100}, ### define the two arguments pop and duration of simulation_function - name='my_simulation', ### you can give the simulation a name - description='my simple example simulation', ### you can give the simulation a description - requirements=[req], ### a list of requirements for the simulation (here only a single requirement) - kwargs_warning=True, ### should a warning be printed if simulation kwargs change in future runs - monitor_object = mon) ### the Monitors object which is used to record variables -``` - -A possible _simulation_function_ could be: -```python -def some_simulation(pop, duration=1): - get_population(pop).a = 5 ### adjust paramter a of pop - get_population(pop).b = 5 ### adjust paramter b of pop - simulate(duration) ### simulate the duration in ms - - ### return some info - ### will later be accessible for each run - return {'paramter a': a, 'paramter b': b, 'a_x_duration': a*duration} -``` - -And a corresponding requirement could be: -```python -from CompNeuroPy import ReqPopHasAttr -req = {'req':ReqPopHasAttr, 'pop':pop1, 'attr':['a', 'b']} -``` -Here, one checks if the population _pop1_ contains the attributes _a_ and _b_. The [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr) is a built-in requirements-class of CompNeuroPy (see below). - -A more detailed example is available in the [Examples](../examples/run_and_monitor_simulations.md). +!!! example + ```python + from CompNeuroPy import CompNeuroSim + my_simulation = CompNeuroSim(simulation_function=some_simulation, ### the most important part, this function defines the simulation + simulation_kwargs={'pop':pop1, 'duration':100}, ### define the two arguments pop and duration of simulation_function + name='my_simulation', ### you can give the simulation a name + description='my simple example simulation', ### you can give the simulation a description + requirements=[req], ### a list of requirements for the simulation (here only a single requirement) + kwargs_warning=True, ### should a warning be printed if simulation kwargs change in future runs + monitor_object = mon) ### the Monitors object which is used to record variables + ``` + + A possible _simulation_function_ could be: + ```python + def some_simulation(pop, duration=1): + get_population(pop).a = 5 ### adjust paramter a of pop + get_population(pop).b = 5 ### adjust paramter b of pop + simulate(duration) ### simulate the duration in ms + + ### return some info + ### will later be accessible for each run + return {'paramter a': a, 'paramter b': b, 'a_x_duration': a*duration} + ``` + + And a corresponding requirement could be: + ```python + from CompNeuroPy import ReqPopHasAttr + req = {'req':ReqPopHasAttr, 'pop':pop1, 'attr':['a', 'b']} + ``` + Here, one checks if the population _pop1_ contains the attributes _a_ and _b_. The [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr) is a built-in requirements-class of CompNeuroPy (see below). + + A more detailed example is available in the [Examples](../examples/run_and_monitor_simulations.md). ## Simulation information The function _simulation_info()_ returns a [`SimInfo`](#CompNeuroPy.generate_simulation.SimInfo) object which contains usefull information about the simulation runs (see below). The [`SimInfo`](#CompNeuroPy.generate_simulation.SimInfo) object also provides usefull analysis functions associated with specific simulation functions. Currently it provides the _get_current_arr()_ which returns arrays containing the input current for each time step of the built-in simulation functions _current_step()_, _current_stim()_, and _current_ramp()_. @@ -40,37 +40,37 @@ The function _simulation_info()_ returns a [`SimInfo`](#CompNeuroPy.generate_sim ## Simulation functions Just define a classic ANNarchy simulation in a function. Within the functions, the ANNarchy functions _get_population()_ and _get_projection()_ can be used to access the populations and projections using the population and projection names provided by a [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel). The return value of the simulation function can later be retrieved from the [`SimInfo`](#CompNeuroPy.generate_simulation.SimInfo) object (the _info_ attribute) in a list containing the return value for each run of the simulation. -### Example: -```python -from ANNarchy import simulate, get_population - -def current_step(pop, t1=500, t2=500, a1=0, a2=100): - """ - stimulates a given population in two periods with two input currents +!!! example + ```python + from ANNarchy import simulate, get_population + + def current_step(pop, t1=500, t2=500, a1=0, a2=100): + """ + stimulates a given population in two periods with two input currents + + pop: population name of population, which should be stimulated with input current + neuron model of population has to contain "I_app" as input current in pA + t1/t2: times in ms before/after current step + a1/a2: current amplitudes before/after current step in pA + """ - pop: population name of population, which should be stimulated with input current - neuron model of population has to contain "I_app" as input current in pA - t1/t2: times in ms before/after current step - a1/a2: current amplitudes before/after current step in pA - """ - - ### save prev input current - I_prev = get_population(pop).I_app - - ### first/pre current step simulation - get_population(pop).I_app = a1 - simulate(t1) - - ### second/post current step simulation - get_population(pop).I_app = a2 - simulate(t2) - - ### reset input current to previous value - get_population(pop).I_app = I_prev - - ### return some additional information which could be usefull - return {'duration':t1+t2} -``` + ### save prev input current + I_prev = get_population(pop).I_app + + ### first/pre current step simulation + get_population(pop).I_app = a1 + simulate(t1) + + ### second/post current step simulation + get_population(pop).I_app = a2 + simulate(t2) + + ### reset input current to previous value + get_population(pop).I_app = I_prev + + ### return some additional information which could be usefull + return {'duration':t1+t2} + ``` ## Requirements In order to perform simulations with models, the models must almost always fulfill certain requirements. For example, if the input current of a population is to be set, this population (or the neuron model) must of course have the corresponding variable. Such preconditions can be tested in advance with the `simulation_requirements` classes. They only need to contain a function _run()_ to test the requirements (if requirements are not met, cause an error). In CompNeuroPy predefined [`simulation_requirements`](../additional/simulation_requirements.md) classes are available (CompNeuroPy.simulation_requirements; currently only [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr)). In the [`CompNeuroSim`](#CompNeuroPy.generate_simulation.CompNeuroSim) class, the requirements are passed as arguments in a list (see above). Each requirement (list entry) must be defined as a dictionary with keys _req_ (the requirement class) and the arguments of the requirement class (e.g., _pop_ and _attr_ for the [`ReqPopHasAttr`](../additional/simulation_requirements.md#CompNeuroPy.simulation_requirements.ReqPopHasAttr)). diff --git a/docs/main/monitors_recordings.md b/docs/main/monitors_recordings.md index c5e1f66..98dd6fc 100644 --- a/docs/main/monitors_recordings.md +++ b/docs/main/monitors_recordings.md @@ -1,64 +1,64 @@ ## Create Monitors -CompNeuroPy provides a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) class that can be used to easily create and control multiple ANNarchy monitors at once. To create a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object, all that is needed is a monitors_dictionary that defines which variables should be recorded for each model component. All populations and projections have to have unique names to work with [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors). The keys of the monitor_dictionary are the names of the model components (in example below _"my_pop1"_ and _"my_pop2"_). The key can also include a recording period (the time between two recordings, given after a ";"), e.g. record the variables of _my_pop1_ only every 10 ms would look like this: _'pop;my_pop1;10':['v', 'spike']_. The default period is the time step of the simulation for populations and 1000 times the timestep for projections. The values of the monitor_dictionary are lists of all the variables that should be recorded from the corresponding components. The names of components (populations, projections) could be provided by a [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel). +CompNeuroPy provides a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) class that can be used to easily create and control multiple ANNarchy monitors at once. To create a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object, all that is needed is a monitors_dictionary that defines which variables should be recorded for each model component. All populations and projections have to have unique names to work with [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors). The keys of the monitor_dictionary are the names of the model components (in example below _"my_pop1"_ and _"my_pop2"_). The key can also include a recording period (the time between two recordings, given after a ";"), e.g. record the variables of _my_pop1_ only every 10 ms would look like this: _'my_pop1;10':['v', 'spike']_. The default period is the time step of the simulation for populations and 1000 times the timestep for projections. The values of the monitor_dictionary are lists of all the variables that should be recorded from the corresponding components. The names of components (populations, projections) could be provided by a [`CompNeuroModel`](generate_models.md#CompNeuroPy.generate_model.CompNeuroModel). -### Example: -Here the variables _v_ and _spike_ should be recorded of the population with the name _"my_pop1"_ and the variable _v_ should be recorded from the population with the name _"my_pop2"_: +!!! example + Here the variables _v_ and _spike_ should be recorded of the population with the name _"my_pop1"_ and the variable _v_ should be recorded from the population with the name _"my_pop2"_: -```python -from CompNeuroPy import CompNeuroMonitors -monitor_dictionary = {'my_pop1':['v', 'spike'], 'my_pop2':['v']} -mon = CompNeuroMonitors(monitor_dictionary) -``` + ```python + from CompNeuroPy import CompNeuroMonitors + monitor_dictionary = {'my_pop1':['v', 'spike'], 'my_pop2':['v']} + mon = CompNeuroMonitors(monitor_dictionary) + ``` -A full example is available in the [Examples](../examples/monitor_recordings.md). + A full example is available in the [Examples](../examples/monitor_recordings.md). ## Chunks and periods In CompNeuroPy, recordings are divided into so-called chunks and periods. Chunks are simulation sections that are separated by monitor resets (optionally also reset the model). A chunk can consist of several periods. A period represents the time span between the start and pause of a monitor recording. To divide a simulation into chunks and periods, a [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object provides the three functions _start()_, _pause()_ and _reset()_. At the beginning of a simulation, the monitors do not start automatically which is why the _start()_ function must be called at least once. The _start()_ function can also be used to resume paused recordings. With the function _pause()_ recordings are paused. The function _reset()_ starts a new chunk for the recordings (the end of a chunk is also always the end of a period, i.e. the last period of the corresponding chunk). After calling _reset()_ the monitors remain in their current mode (active or paused). By default _reset()_ also resets the model to the compile status (time = 0) by calling the ANNarchy _reset()_ function and has the same arguments. If the argument _model_ is set to False, the ANNarchy _reset()_ function is not called and only a new chunk is created. -### Example: -```python -### first chunk, one period -simulate(100) # 100 ms not recorded -mon.start() # start all monitors -simulate(100) # 100 ms recorded +!!! example + ```python + ### first chunk, one period + simulate(100) # 100 ms not recorded + mon.start() # start all monitors + simulate(100) # 100 ms recorded -### second chunk, two periods -mon.reset() # model reset, beginning of new chunk -simulate(100) # 100 ms recorded (monitors were active before reset --> still active) -mon.pause() # pause all monitors -simulate(100) # 100 ms not recorded -mon.start() # start all monitors -simulate(100) # 100 ms recorded -``` + ### second chunk, two periods + mon.reset() # model reset, beginning of new chunk + simulate(100) # 100 ms recorded (monitors were active before reset --> still active) + mon.pause() # pause all monitors + simulate(100) # 100 ms not recorded + mon.start() # start all monitors + simulate(100) # 100 ms recorded + ``` ## Get recordings The recordings can be obtained from the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object using the _get_recordings()_ function. This returns a list of dictionaries (one for each chunk). The dictionaries contain the recorded data defined with the monitor_dictionary at the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) initialization. In the recordings dictionaries the keys have the following structure: ";variable"; the corresponding dictionary values are the recordings of the respective variable. The dictionaries always contain the time step of the simulation (key = _"dt"_), the periods (time between recorded values) for each component (key = _";period"_) and the attributes of each component (key = _";parameter_dict"_). -### Example: -```python -recordings = mon.get_recordings() -y1 = recordings[0]['my_pop1;v'] ### variable v of my_pop1 from 1st chunk -y2 = recordings[1]['my_pop1;v'] ### variable v of my_pop1 from 2nd chunk -``` +!!! example + ```python + recordings = mon.get_recordings() + y1 = recordings[0]['my_pop1;v'] ### variable v of my_pop1 from 1st chunk + y2 = recordings[1]['my_pop1;v'] ### variable v of my_pop1 from 2nd chunk + ``` ## Get recording times In addition to the recordings themselves, recording times can also be obtained from the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object, which is very useful for later analyses. With the function _get_recording_times()_ of the [`CompNeuroMonitors`](#CompNeuroPy.monitors.CompNeuroMonitors) object a [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object can be obtained. From the [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object one can get time limits (in ms) and coresponding indizes for the recordings. -### Example: -```python -recording_times = mon.get_recording_times() -start_time = recording_times.time_lims(chunk=1, period=1)[0] ### 200 ms -start_idx = recording_times.idx_lims(chunk=1, period=1)[0] ### 1000, if dt == 0.1 -end_time = recording_times.time_lims(chunk=1, period=1)[1] ### 300 ms -end_idx = recording_times.idx_lims(chunk=1, period=1)[1] ### 2000 -``` +!!! example + ```python + recording_times = mon.get_recording_times() + start_time = recording_times.time_lims(chunk=1, period=1)[0] ### 200 ms + start_idx = recording_times.idx_lims(chunk=1, period=1)[0] ### 1000, if dt == 0.1 + end_time = recording_times.time_lims(chunk=1, period=1)[1] ### 300 ms + end_idx = recording_times.idx_lims(chunk=1, period=1)[1] ### 2000 + ``` -You can combine the recordings of both chunks of the example simulation shown above into a single time array and a single value array using the [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object's combine_chunks function: -```python -time_arr, value_arr = recording_times.combine_chunks(recordings, 'my_pop1;v', 'consecutive') -``` + You can combine the recordings of both chunks of the example simulation shown above into a single time array and a single value array using the [`RecordingTimes`](#CompNeuroPy.monitors.RecordingTimes) object's combine_chunks function: + ```python + time_arr, value_arr = recording_times.combine_chunks(recordings, 'my_pop1;v', 'consecutive') + ``` ## Plot recordings To get a quick overview of the recordings, CompNeuroPy provides the [`PlotRecordings`](../additional/analysis_functions.md#CompNeuroPy.analysis_functions.PlotRecordings) class. diff --git a/docs/main/optimize_neuron.md b/docs/main/optimize_neuron.md index 6dca039..bbb5ffd 100644 --- a/docs/main/optimize_neuron.md +++ b/docs/main/optimize_neuron.md @@ -23,25 +23,26 @@ Used optimization methods: * Singh, G. S., & Acerbi, L. (2023). PyBADS: Fast and robust black-box optimization in Python. arXiv preprint [arXiv:2306.15576](https://arxiv.org/abs/2306.15576). * Acerbi, L., & Ma, W. J. (2017). Practical Bayesian optimization for model fitting with Bayesian adaptive direct search. Advances in neural information processing systems, 30. [pdf](https://proceedings.neurips.cc/paper_files/paper/2017/file/df0aab058ce179e4f7ab135ed4e641a9-Paper.pdf) -### Example: -```python -opt = OptNeuron( - experiment=my_exp, - get_loss_function=get_loss, - variables_bounds=variables_bounds, - results_soll=experimental_data["results_soll"], - time_step=experimental_data["time_step"], - compile_folder_name="annarchy_opt_neuron_example", - neuron_model=my_neuron, - method="hyperopt", - record=["r"], -) -``` + +!!! example + ```python + opt = OptNeuron( + experiment=my_exp, + get_loss_function=get_loss, + variables_bounds=variables_bounds, + results_soll=experimental_data["results_soll"], + time_step=experimental_data["time_step"], + compile_folder_name="annarchy_opt_neuron_example", + neuron_model=my_neuron, + method="hyperopt", + record=["r"], + ) + ``` A full example is available in the [Examples](../examples/opt_neuron.md). ## Run the optimization -To run the optimization simply call the _run()_ function of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) object. +To run the optimization simply call the [_run()_](optimize_neuron.md#CompNeuroPy.opt_neuron.OptNeuron.run) function of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) object. This returns the optimized parameters and more. ## Define the experiment You have to define a [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) object containing a _run()_ function. In the _run()_ function simulations and recordings are performed. @@ -60,77 +61,77 @@ You have to define a [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experimen - do not call the functions _store_model_state()_ and _reset_model_state()_ of the [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) class within the _run()_ function! -### Example: -```python -class my_exp(CompNeuroExp): - """ - Define an experiment by inheriting from CompNeuroExp. - - CompNeuroExp provides the attributes: - - monitors (CompNeuroMonitors): - a CompNeuroMonitors object to do recordings, define during init otherwise - None - data (dict): - a dictionary for storing any optional data - - and the functions: - reset(): - resets the model and monitors - results(): - returns a results object - """ - - def run(self, population_name): +!!! example + ```python + class my_exp(CompNeuroExp): """ - Do the simulations and recordings. - - To use the CompNeuroExp class, you need to define a run function which - does the simulations and recordings. The run function should return the - results object which can be obtained by calling self.results(). + Define an experiment by inheriting from CompNeuroExp. - For using the CompNeuroExp for OptNeuron, the run function should have - one argument which is the name of the population which is automatically created - by OptNeuron, containing a single neuron of the model which should be optimized. + CompNeuroExp provides the attributes: - Args: - population_name (str): - name of the population which contains a single neuron, this will be - automatically provided by opt_neuron + monitors (CompNeuroMonitors): + a CompNeuroMonitors object to do recordings, define during init otherwise + None + data (dict): + a dictionary for storing any optional data - Returns: - results (CompNeuroExp._ResultsCl): - results object with attributes: - recordings (list): - list of recordings - recording_times (recording_times_cl): - recording times object - mon_dict (dict): - dict of recorded variables of the monitors - data (dict): - dict with optional data stored during the experiment + and the functions: + reset(): + resets the model and monitors + results(): + returns a results object """ - ### you have to start monitors within the run function, otherwise nothing will - ### be recorded - self.monitors.start() - - ### run the simulation, if you reset the monitors/model the model_state argument - ### has to be True (Default) - ... - simulate(100) - self.reset() - ... - - ### optional: store anything you want in the data dict. For example infomration - ### about the simulations. This is not used for the optimization but can be - ### retrieved after the optimization is finished - self.data["sim"] = sim_step.simulation_info() - self.data["population_name"] = population_name - self.data["time_step"] = dt() - - ### return results, use the object's self.results() - return self.results() -``` + + def run(self, population_name): + """ + Do the simulations and recordings. + + To use the CompNeuroExp class, you need to define a run function which + does the simulations and recordings. The run function should return the + results object which can be obtained by calling self.results(). + + For using the CompNeuroExp for OptNeuron, the run function should have + one argument which is the name of the population which is automatically created + by OptNeuron, containing a single neuron of the model which should be optimized. + + Args: + population_name (str): + name of the population which contains a single neuron, this will be + automatically provided by opt_neuron + + Returns: + results (CompNeuroExp._ResultsCl): + results object with attributes: + recordings (list): + list of recordings + recording_times (recording_times_cl): + recording times object + mon_dict (dict): + dict of recorded variables of the monitors + data (dict): + dict with optional data stored during the experiment + """ + ### you have to start monitors within the run function, otherwise nothing will + ### be recorded + self.monitors.start() + + ### run the simulation, if you reset the monitors/model the model_state argument + ### has to be True (Default) + ... + simulate(100) + self.reset() + ... + + ### optional: store anything you want in the data dict. For example infomration + ### about the simulations. This is not used for the optimization but can be + ### retrieved after the optimization is finished + self.data["sim"] = sim_step.simulation_info() + self.data["population_name"] = population_name + self.data["time_step"] = dt() + + ### return results, use the object's self.results() + return self.results() + ``` ## The get_loss_function The _get_loss_function_ must have two arguments. When this function is called during optimization, the first argument is always the _results_ object returned by the _experiment_, i.e. the results of the neuron you want to optimize. The second argument depends on whether you have specified _results_soll_, i.e. data to be reproduced by the _neuron_model_, or whether you have specified a _target_neuron_model_ whose results are to be reproduced by the _neuron_model_. Thus, the second argument is either _results_soll_ provided to the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class during initialization or another _results_ object (returned by the [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) _run_ function), generated with the _target_neuron_model_. @@ -138,45 +139,45 @@ The _get_loss_function_ must have two arguments. When this function is called du !!! warning You always have to work with the neuron rank 0 within the _get_loss_function_! -### Example: -In this example we assume, that _results_soll_ was provided during initialization of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class (no _target_neuron_model_ used). -```python -def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): - """ - Function which has to have the arguments results_ist and results_soll and should - calculates and return the loss. This structure is needed for the OptNeuron class. - - Args: - results_ist (object): - the results object returned by the run function of experiment (see above) - results_soll (any): - the target data directly provided to OptNeuron during initialization - - Returns: - loss (float or list of floats): - the loss - """ - ### get the recordings and other important things for calculating the loss from - ### results_ist, we do not use all available information here, but you could - rec_ist = results_ist.recordings - pop_ist = results_ist.data["population_name"] - neuron = 0 - - ### get the data for calculating the loss from the results_soll - r_target_0 = results_soll[0] - r_target_1 = results_soll[1] - - ### get the data for calculating the loss from the recordings - r_ist_0 = rec_ist[0][f"{pop_ist};r"][:, neuron] - r_ist_1 = rec_ist[1][f"{pop_ist};r"][:, neuron] - - ### calculate the loss, e.g. the root mean squared error - rmse1 = rmse(r_target_0, r_ist_0) - rmse2 = rmse(r_target_1, r_ist_1) - - ### return the loss, one can return a singel value or a list of values which will - ### be summed during the optimization - return [rmse1, rmse2] -``` +!!! example + In this example we assume, that _results_soll_ was provided during initialization of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class (no _target_neuron_model_ used). + ```python + def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): + """ + Function which has to have the arguments results_ist and results_soll and should + calculates and return the loss. This structure is needed for the OptNeuron class. + + Args: + results_ist (object): + the results object returned by the run function of experiment (see above) + results_soll (any): + the target data directly provided to OptNeuron during initialization + + Returns: + loss (float or list of floats): + the loss + """ + ### get the recordings and other important things for calculating the loss from + ### results_ist, we do not use all available information here, but you could + rec_ist = results_ist.recordings + pop_ist = results_ist.data["population_name"] + neuron = 0 + + ### get the data for calculating the loss from the results_soll + r_target_0 = results_soll[0] + r_target_1 = results_soll[1] + + ### get the data for calculating the loss from the recordings + r_ist_0 = rec_ist[0][f"{pop_ist};r"][:, neuron] + r_ist_1 = rec_ist[1][f"{pop_ist};r"][:, neuron] + + ### calculate the loss, e.g. the root mean squared error + rmse1 = rmse(r_target_0, r_ist_0) + rmse2 = rmse(r_target_1, r_ist_1) + + ### return the loss, one can return a singel value or a list of values which will + ### be summed during the optimization + return [rmse1, rmse2] + ``` ::: CompNeuroPy.opt_neuron.OptNeuron \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 814b765..2d05224 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -74,4 +74,5 @@ nav: - Define Experiments: 'examples/experiment.md' - DBS Simulator: 'examples/dbs.md' - Optimize a neuron model: 'examples/opt_neuron.md' + - Cma Optimization: 'examples/deap_cma.md' - License: 'license.md' \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 4cbae30..9a389af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "CompNeuroPy" -version = "1.0.1" +version = "1.0.2" description = 'General package for computational neuroscience with ANNarchy.' readme = "README.md" requires-python = ">=3.10" diff --git a/src/CompNeuroPy/__init__.py b/src/CompNeuroPy/__init__.py index 2ba694e..d704cac 100644 --- a/src/CompNeuroPy/__init__.py +++ b/src/CompNeuroPy/__init__.py @@ -6,11 +6,9 @@ get_population_power_spectrum, get_power_spektrum_from_time_array, get_pop_rate, - plot_recordings, get_number_of_zero_decimals, get_number_of_decimals, sample_data_with_timestep, - time_data_add_nan, rmse, rsse, get_minimum, @@ -30,15 +28,14 @@ evaluate_expression_with_dict, VClampParamSearch, DeapCma, - interactive_plot, - data_obj, # TODO remove - my_linear_cmap_obj, # TODO remove - decision_tree, # TODO remove - node_cl, # TODO remove + InteractivePlot, efel_loss, + RNG, + find_x_bound, ) from CompNeuroPy.model_functions import ( compile_in_folder, + annarchy_compiled, get_full_model, cnp_clear, ) @@ -51,6 +48,7 @@ attribute_step, attr_ramp, increasing_attr, + SimulationEvents, ) from CompNeuroPy.system_functions import ( clear_dir, @@ -60,15 +58,16 @@ timing_decorator, run_script_parallel, create_data_raw_folder, + Logger, ) from CompNeuroPy.simulation_requirements import req_pop_attr, ReqPopHasAttr from CompNeuroPy.statistic_functions import anova_between_groups ### classes -from CompNeuroPy.monitors import Monitors, CompNeuroMonitors -from CompNeuroPy.experiment import Experiment, CompNeuroExp -from CompNeuroPy.generate_model import generate_model, CompNeuroModel -from CompNeuroPy.generate_simulation import generate_simulation, CompNeuroSim +from CompNeuroPy.monitors import CompNeuroMonitors +from CompNeuroPy.experiment import CompNeuroExp +from CompNeuroPy.generate_model import CompNeuroModel +from CompNeuroPy.generate_simulation import CompNeuroSim from CompNeuroPy.dbs import DBSstimulator ### modules diff --git a/src/CompNeuroPy/analysis_functions.py b/src/CompNeuroPy/analysis_functions.py index eb96744..9741955 100644 --- a/src/CompNeuroPy/analysis_functions.py +++ b/src/CompNeuroPy/analysis_functions.py @@ -1,14 +1,13 @@ import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator -import matplotlib from ANNarchy import raster_plot, dt, inter_spike_interval, coefficient_of_variation import warnings from CompNeuroPy import system_functions as sf from CompNeuroPy import extra_functions as ef from CompNeuroPy.monitors import RecordingTimes +from CompNeuroPy.experiment import CompNeuroExp from scipy.interpolate import interp1d -from multiprocessing import Process from typingchecker import check_types @@ -723,788 +722,6 @@ def get_pop_rate( return (np.arange(t_start, t_start + duration, dt), ret) -@check_types() -def plot_recordings( - figname: str, - recordings: list, - recording_times: RecordingTimes, - chunk: int, - shape: tuple, - plan: list[str], - time_lim: None | tuple = None, - dpi: int = 300, -): - """ - Plots the recordings of a single chunk from recordings. Plotted variables are - specified in plan. - - Args: - figname (str): - path + name of figure (e.g. "figures/my_figure.png") - recordings (list): - a recordings list from CompNeuroPy obtained with the function - get_recordings() from a CompNeuroMonitors object. - recording_times (object): - recording_times object from CompNeuroPy obtained with the - function get_recording_times() from a CompNeuroMonitors object. - chunk (int): - which chunk of recordings should be used (the index of chunk) - shape (tuple): - Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns - plan (list of strings): - Defines which recordings are plotted in which subplot and how. - Entries of the list have the structure: - "subplot_nr;model_component_name;variable_to_plot;format", - e.g. "1,my_pop1;v;line". - mode: defines how the data is plotted, available modes: - - for spike data: raster, mean, hybrid - - for other data: line, mean, matrix - - only for projection data: matrix_mean - time_lim (tuple, optional): - Defines the x-axis for all subplots. The list contains two - numbers: start and end time in ms. The times have to be - within the chunk. Default: None, i.e., time lims of chunk - dpi (int, optional): - The dpi of the saved figure. Default: 300 - """ - proc = Process( - target=_plot_recordings, - args=(figname, recordings, recording_times, chunk, shape, plan, time_lim, dpi), - ) - proc.start() - proc.join() - if proc.exitcode != 0: - quit() - - -def _plot_recordings( - figname: str, - recordings: list, - recording_times: RecordingTimes, - chunk: int, - shape: tuple, - plan: list[str], - time_lim: None | tuple, - dpi: int, -): - """ - Plots the recordings for the given recording_times specified in plan. - - Args: - figname (str): - path + name of figure (e.g. "figures/my_figure.png") - recordings (list): - a recordings list from CompNeuroPy obtained with the function - get_recordings() from a CompNeuroMonitors object. - recording_times (object): - recording_times object from CompNeuroPy obtained with the - function get_recording_times() from a CompNeuroMonitors object. - chunk (int): - which chunk of recordings should be used (the index of chunk) - shape (tuple): - Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns - plan (list of strings): - Defines which recordings are plotted in which subplot and how. - Entries of the list have the structure: - "subplot_nr;model_component_name;variable_to_plot;format", - e.g. "1,my_pop1;v;line". - mode: defines how the data is plotted, available modes: - - for spike data: raster, mean, hybrid - - for other data: line, mean, matrix - - only for projection data: matrix_mean - time_lim (tuple): - Defines the x-axis for all subplots. The list contains two - numbers: start and end time in ms. The times have to be - within the chunk. - dpi (int): - The dpi of the saved figure. - """ - print(f"generate fig {figname}", end="... ", flush=True) - recordings = recordings[chunk] - if isinstance(time_lim, type(None)): - time_lim = recording_times.time_lims(chunk=chunk) - start_time = time_lim[0] - end_time = time_lim[1] - compartment_list = [] - for plan_str in plan: - compartment = plan_str.split(";")[1] - if not (compartment in compartment_list): - compartment_list.append(compartment) - - ### get idx_lim for all compartments, in parallel check that there are no pauses - time_arr_dict = {} - time_step = recordings["dt"] - for compartment in compartment_list: - actual_period = recordings[f"{compartment};period"] - - time_arr_part = [] - - ### loop over periods - nr_periods = recording_times._get_nr_periods( - chunk=chunk, compartment=compartment - ) - for period in range(nr_periods): - ### get the time_lim and idx_lim of the period - time_lims = recording_times.time_lims( - chunk=chunk, compartment=compartment, period=period - ) - time_arr_part.append( - np.arange(time_lims[0], time_lims[1] + actual_period, actual_period) - ) - - time_arr_dict[compartment] = np.concatenate(time_arr_part) - - plt.figure(figsize=([6.4 * shape[1], 4.8 * shape[0]])) - for subplot in plan: - try: - nr, part, variable, mode = subplot.split(";") - nr = int(nr) - style = "" - except: - try: - nr, part, variable, mode, style = subplot.split(";") - nr = int(nr) - except: - print( - '\nERROR plot_recordings: for each subplot give plan-string as: "nr;part;variable;mode" or "nr;part;variable;mode;style" if style is available!\nWrong string: ' - + subplot - + "\n" - ) - quit() - try: - ### check if variable is equation - variable_is_equation = ( - "+" in variable or "-" in variable or "*" in variable or "/" in variable - ) - if variable_is_equation: - ### evalueate the equation - value_dict = {} - for rec_key, rec_val in recordings.items(): - if rec_key is f"{part};parameter_dict": - continue - if ";" in rec_key: - rec_var_name = rec_key.split(";")[1] - else: - rec_var_name = rec_key - value_dict[rec_var_name] = rec_val - for param_key, param_val in recordings[ - f"{part};parameter_dict" - ].items(): - value_dict[param_key] = param_val - ### evaluate - evaluated_variable = ef.evaluate_expression_with_dict( - expression=variable, value_dict=value_dict - ) - ### add the evaluated variable to the recordings - recordings[f"{part};{variable}"] = evaluated_variable - - ### set data - data = recordings[f"{part};{variable}"] - except: - print( - "\nWARNING plot_recordings: data", - ";".join([part, variable]), - "not in recordings\n", - ) - plt.subplot(shape[0], shape[1], nr) - plt.text( - 0.5, - 0.5, - " ".join([part, variable]) + " not available", - va="center", - ha="center", - ) - continue - - plt.subplot(shape[0], shape[1], nr) - if (variable == "spike" or variable == "axon_spike") and ( - mode == "raster" or mode == "single" - ): # "single" only for down compatibility - nr_neurons = len(list(data.keys())) - t, n = my_raster_plot(data) - t = t * time_step # convert time steps into ms - mask = ((t >= start_time).astype(int) * (t <= end_time).astype(int)).astype( - bool - ) - if mask.size == 0: - plt.title("Spikes " + part) - print( - "\nWARNING plot_recordings: data", - ";".join([part, variable]), - "does not contain any spikes in the given time interval.\n", - ) - plt.text( - 0.5, - 0.5, - " ".join([part, variable]) + " does not contain any spikes.", - va="center", - ha="center", - ) - else: - if np.unique(n).size == 1: - marker, size = ["|", 3000] - else: - marker, size = [".", 3] - if style != "": - color = style - else: - color = "k" - - plt.scatter( - t[mask], n[mask], color=color, marker=marker, s=size, linewidth=0.1 - ) - plt.xlim(start_time, end_time) - plt.ylim(0 - 0.5, nr_neurons - 0.5) - plt.xlabel("time [ms]") - plt.ylabel("# neurons") - plt.title("Spikes " + part) - elif (variable == "spike" or variable == "axon_spike") and mode == "mean": - time_arr, firing_rate = get_pop_rate( - spikes=data, - t_start=start_time, - t_end=end_time, - time_step=time_step, - ) - plt.plot(time_arr, firing_rate, color="k") - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - plt.ylabel("Mean firing rate [Hz]") - plt.title("Mean firing rate " + part) - elif (variable == "spike" or variable == "axon_spike") and mode == "hybrid": - nr_neurons = len(list(data.keys())) - t, n = my_raster_plot(data) - t = t * time_step # convert time steps into ms - mask = ((t >= start_time).astype(int) * (t <= end_time).astype(int)).astype( - bool - ) - if mask.size == 0: - plt.title("Spikes " + part) - print( - "\nWARNING plot_recordings: data", - ";".join([part, variable]), - "does not contain any spikes in the given time interval.\n", - ) - plt.text( - 0.5, - 0.5, - " ".join([part, variable]) + " does not contain any spikes.", - va="center", - ha="center", - ) - else: - if np.unique(n).size == 1: - marker, size = ["|", np.sqrt(3000)] - else: - marker, size = [".", np.sqrt(3)] - - plt.plot( - t[mask], n[mask], f"k{marker}", markersize=size, markeredgewidth=0.1 - ) - plt.ylabel("# neurons") - plt.ylim(0 - 0.5, nr_neurons - 0.5) - ax = plt.gca().twinx() - time_arr, firing_rate = get_pop_rate( - spikes=data, - t_start=start_time, - t_end=end_time, - time_step=time_step, - ) - ax.plot( - time_arr, - firing_rate, - color="r", - ) - plt.ylabel("Mean firing rate [Hz]", color="r") - ax.tick_params(axis="y", colors="r") - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - plt.title("Activity " + part) - elif (variable != "spike" and variable != "axon_spike") and mode == "line": - if len(data.shape) == 1: - plt.plot(time_arr_dict[part], data, color="k") - plt.title(f"Variable {variable} of {part} (1)") - elif len(data.shape) == 2 and isinstance(data[0, 0], list) is not True: - ### population: data[time,neurons] - for neuron in range(data.shape[1]): - # in case of gaps file time gaps and add nan to data TODO also for other plots - plot_x, plot_y = time_data_add_nan( - time_arr_dict[part], data[:, neuron] - ) - - plt.plot( - plot_x, - plot_y, - color="k", - ) - plt.title(f"Variable {variable} of {part} ({data.shape[1]})") - elif len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - if len(data.shape) == 3: - ### projection data: data[time, postneurons, preneurons] - for post_neuron in range(data.shape[1]): - for pre_neuron in range(data.shape[2]): - plt.plot( - time_arr_dict[part], - data[:, post_neuron, pre_neuron], - color="k", - ) - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - for post_neuron in range(data.shape[1]): - for pre_neuron in range(len(data[0, post_neuron])): - plt.plot( - time_arr_dict[part], - np.array( - [ - data[t, post_neuron][pre_neuron] - for t in range(data.shape[0]) - ] - ), - color="k", - ) - - plt.title(f"Variable {variable} of {part} ({data.shape[1]})") - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - elif (variable != "spike" and variable != "axon_spike") and mode == "mean": - if len(data.shape) == 1: - plt.plot(time_arr_dict[part], data, color="k") - plt.title(f"Variable {variable} of {part} (1)") - elif len(data.shape) == 2 and isinstance(data[0, 0], list) is not True: - ### population: data[time,neurons] - nr_neurons = data.shape[1] - data = np.mean(data, 1) - plt.plot( - time_arr_dict[part], - data[:], - color="k", - ) - plt.title(f"Variable {variable} of {part} ({nr_neurons}, mean)") - elif len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - if len(data.shape) == 3: - ### projection data: data[time, postneurons, preneurons] - for post_neuron in range(data.shape[1]): - plt.plot( - time_arr_dict[part], - np.mean(data[:, post_neuron, :], 1), - color="k", - ) - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - for post_neuron in range(data.shape[1]): - avg_data = [] - for pre_neuron in range(len(data[0, post_neuron])): - avg_data.append( - np.array( - [ - data[t, post_neuron][pre_neuron] - for t in range(data.shape[0]) - ] - ) - ) - plt.plot( - time_arr_dict[part], - np.mean(avg_data, 0), - color="k", - ) - - plt.title( - f"Variable {variable} of {part}, mean for {data.shape[1]} post neurons" - ) - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - - elif ( - variable != "spike" and variable != "axon_spike" - ) and mode == "matrix_mean": - if len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - if len(data.shape) == 3: - ### average over pre neurons --> get 2D array [time, postneuron] - data_avg = np.mean(data, 2) - - ### after cerating 2D array --> same procedure as for populations - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - time_arr = time_arr_dict[part][mask] - data_arr = data_avg[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = ( - np.ceil(start_time / actual_period) * actual_period - ) - actual_end_time = ( - np.ceil(end_time / actual_period - 1) * actual_period - ) - soll_times = np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {variable} of {part} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - ### average over pre neurons --> get 2D array [time, postneuron] - data_avg = np.empty((data.shape[0], data.shape[1])) - for post_neuron in range(data.shape[1]): - avg_post = [] - for pre_neuron in range(len(data[0, post_neuron])): - avg_post.append( - np.array( - [ - data[t, post_neuron][pre_neuron] - for t in range(data.shape[0]) - ] - ) - ) - data_avg[:, post_neuron] = np.mean(avg_post, 0) - - ### after cerating 2D array --> same procedure as for populations - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - time_arr = time_arr_dict[part][mask] - data_arr = data_avg[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = ( - np.ceil(start_time / actual_period) * actual_period - ) - actual_end_time = ( - np.ceil(end_time / actual_period - 1) * actual_period - ) - soll_times = np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {variable} of {part} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - - plt.title( - f"Variable {variable} of {part}, mean for {data.shape[1]} post neurons [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - plt.xlim(start_time, end_time) - plt.xlabel("time [ms]") - - elif (variable != "spike" and variable != "axon_spike") and mode == "matrix": - # data[start_step:end_step,neuron] - if len(data.shape) == 2 and isinstance(data[0, 0], list) is not True: - ### data from population [times,neurons] - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - - time_decimals = get_number_of_decimals(time_step) - - time_arr = np.round(time_arr_dict[part][mask], time_decimals) - data_arr = data[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = np.ceil(start_time / actual_period) * actual_period - actual_end_time = np.ceil(end_time / actual_period - 1) * actual_period - soll_times = np.round( - np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ), - time_decimals, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {part} {variable} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - elif len(data.shape) == 3 or ( - len(data.shape) == 2 and isinstance(data[0, 0], list) is True - ): - ### data from projection - if len(data.shape) == 3: - ### projection data: data[time, postneurons, preneurons] - ### create a 2D array from the 3D array - data_resh = data.reshape( - (data.shape[0], int(data.shape[1] * data.shape[2])) - ) - data_split = np.split(data_resh, data.shape[1], axis=1) - ### separate the post_neurons by nan vectors - data_with_none = np.concatenate( - [ - np.concatenate( - [ - data_split[idx], - np.zeros((data.shape[0], 1)) * np.nan, - ], - axis=1, - ) - for idx in range(len(data_split)) - ], - axis=1, - )[:, :-1] - - ### after cerating 2D array --> same procedure as for populations - ### get the times and data between time_lims - mask = ( - (time_arr_dict[part] >= start_time).astype(int) - * (time_arr_dict[part] <= end_time).astype(int) - ).astype(bool) - time_arr = time_arr_dict[part][mask] - data_arr = data_with_none[mask, :] - - ### check with the actual_period and the times array if there is data missing - ### from time_lims and actual period opne should get all times at which data points should be - actual_period = recordings[f"{part};period"] - actual_start_time = ( - np.ceil(start_time / actual_period) * actual_period - ) - actual_end_time = ( - np.ceil(end_time / actual_period - 1) * actual_period - ) - soll_times = np.arange( - actual_start_time, - actual_end_time + actual_period, - actual_period, - ) - - ### check if there are time points, where data is missing - plot_data_arr = np.empty((soll_times.size, data_arr.shape[1])) - plot_data_arr[:] = None - for time_point_idx, time_point in enumerate(soll_times): - if time_point in time_arr: - ### data at time point is available --> use data - idx_available_data = time_arr == time_point - plot_data_arr[time_point_idx, :] = data_arr[ - idx_available_data, : - ] - ### if data is not available it stays none - - vmin = np.nanmin(plot_data_arr) - vmax = np.nanmax(plot_data_arr) - - masked_array = np.ma.array( - plot_data_arr.T, mask=np.isnan(plot_data_arr.T) - ) - cmap = matplotlib.cm.viridis - cmap.set_bad("red", 1.0) - - plt.title( - f"Variable {variable} of {part} ({data.shape[1]}) [{ef.sci(vmin)}, {ef.sci(vmax)}]" - ) - - plt.gca().imshow( - masked_array, - aspect="auto", - vmin=vmin, - vmax=vmax, - extent=[ - np.min(soll_times) - 0.5, - np.max(soll_times) - 0.5, - data.shape[1] - 0.5, - -0.5, - ], - cmap=cmap, - interpolation="none", - ) - if data.shape[1] == 1: - plt.yticks([0]) - else: - plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel("time [ms]") - else: - ### data[time, postneurons][preneurons] (with different number of preneurons) - pass - - else: - print( - "\nERROR plot_recordings: shape not accepted,", - ";".join([part, variable]), - "\n", - ) - quit() - else: - print( - "\nERROR plot_recordings: mode", - mode, - "not available for variable", - variable, - "\n", - ) - quit() - plt.tight_layout() - - ### save plot - figname_parts = figname.split("/") - if len(figname_parts) > 1: - save_dir = "/".join(figname_parts[:-1]) - sf.create_dir(save_dir) - plt.savefig(figname, dpi=dpi) - plt.close() - print("Done") - - def get_number_of_zero_decimals(nr): """ For numbers which are smaller than zero get the number of digits after the decimal @@ -1599,43 +816,6 @@ def sample_data_with_timestep(time_arr, data_arr, timestep): return (new_time_arr, new_data_arr) -def time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0): - """ - If there are gaps in time_arr --> fill them with respective time values. - Fill the corresponding data_arr values with nan. - - By default it is tried to fill the time array with continuously increasing times - based on the smallest time difference found there can still be discontinuities after - filling the arrays (because existing time values are not changed). - - But one can also give a fixed fill time step. - - Args: - time_arr (1D array): - times of data_arr in ms - data_arr (nD array): - the size of the specified dimension of data array must have the same length - as time_arr - fill_time_step (number, optional, default=None): - if there are gaps they are filled with this time step - axis (int): - which dimension of the data_arr belongs to the time_arr - - Returns: - time_arr (1D array): - time array with gaps filled - data_arr (nD array): - data array with gaps filled - """ - return time_data_fill_gaps( - time_arr, - data_arr, - fill_time_step=fill_time_step, - axis=axis, - fill="nan", - ) - - def time_data_fill_gaps( time_arr, data_arr, fill_time_step=None, axis=0, fill: str | float = "nan" ): @@ -1819,10 +999,9 @@ def get_maximum(input_data: list | np.ndarray | tuple | float): class PlotRecordings: """ Plot recordings from CompNeuroMonitors. - - TODO: CHeck if there are memory issues with large recordings or many subplots. """ + ### TODO: Check if there are memory issues with large recordings or many subplots. @check_types() def __init__( self, @@ -2452,7 +1631,7 @@ def _fill_subplot_other(self, plot_idx): ).astype(bool) ### fill gaps in time_arr and data_arr with nan - time_arr, data_arr = time_data_add_nan( + time_arr, data_arr = time_data_fill_gaps( time_arr=time_arr[mask], data_arr=data_arr[mask], axis=0 ) @@ -2674,3 +1853,180 @@ def _matrix_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean plt.title( f"Variable {variable} of {compartment} ({nr_neurons}) [{ef.sci(np.nanmin(data_arr))}, {ef.sci(np.nanmax(data_arr))}]" ) + + +def get_spike_features_of_chunk(chunk: int, results: CompNeuroExp._ResultsCl): + """ + Get the features of the spikes of a chunk of the results of a CompNeuroExp. + + !!! warning + The results data dict has to contain the population name as key "pop_name". + The spikes have to be recorded. + + Args: + chunk (int): + index of the chunk + results (CompNeuroExp._ResultsCl): + results of the experiment + + Returns: + spike_features (dict): + dictionary with the features of the spikes + """ + ### get number of spikes + spike_dict = results.recordings[chunk][f"{results.data['pop_name']};spike"] + t, _ = my_raster_plot(spike_dict) + nbr_spikes = len(t) + ### get time of 1st, 2nd, 3rd spike + if nbr_spikes > 0: + time_1st_spike = t[0] * results.recordings[chunk]["dt"] + if nbr_spikes > 1: + time_2nd_spike = t[1] * results.recordings[chunk]["dt"] + if nbr_spikes > 2: + time_3rd_spike = t[2] * results.recordings[chunk]["dt"] + else: + time_3rd_spike = None + else: + time_2nd_spike = None + time_3rd_spike = None + else: + time_1st_spike = None + time_2nd_spike = None + time_3rd_spike = None + ### get time of last spike + if nbr_spikes > 0: + time_last_spike = t[-1] * results.recordings[chunk]["dt"] + else: + time_last_spike = None + ### get CV of ISI + if nbr_spikes > 1: + isi = np.diff(t * results.recordings[chunk]["dt"]) + cv_isi = np.std(isi) / np.mean(isi) + else: + cv_isi = None + + return { + "spike_count": nbr_spikes, + "time_to_first_spike": time_1st_spike, + "time_to_second_spike": time_2nd_spike, + "time_to_third_spike": time_3rd_spike, + "time_to_last_spike": time_last_spike, + "ISI_CV": cv_isi, + } + + +def get_spike_features_loss_of_chunk( + chunk: int, + results1: CompNeuroExp._ResultsCl, + results2: CompNeuroExp._ResultsCl, + chunk2: None | int = None, + feature_list: list[str] | None = None, +): + """ + Calculate the loss/difference between the spike features of two chunks of the + results of CompNeuroExp. + + !!! warning + The results data dict has to contain the population name as key "pop_name". + The spikes have to be recorded. + + Args: + chunk (int): + index of the chunk + results1 (CompNeuroExp._ResultsCl): + results of the first experiment + results2 (CompNeuroExp._ResultsCl): + results of the second experiment + chunk2 (None|int): + index of the chunk of the second results, if None the same as chunk + feature_list (list[str]|None): + list of feature names which should be used to calculate the loss, if None + the default list is used + + Returns: + loss (float): + loss/difference between the spike features of the two chunks + """ + verbose = False + if chunk2 is None: + chunk2 = chunk + + ### get recording duration of chunk + nbr_periods = results1.recording_times.nbr_periods( + chunk=chunk, compartment=results1.data["pop_name"] + ) + chunk_duration_ms = 0 + chunk_duration_idx = 0 + for period in range(nbr_periods): + chunk_duration_ms += np.abs( + np.diff( + results1.recording_times.time_lims( + chunk=chunk, compartment=results1.data["pop_name"], period=period + ) + ) + ) + chunk_duration_idx += np.abs( + np.diff( + results1.recording_times.idx_lims( + chunk=chunk, compartment=results1.data["pop_name"], period=period + ) + ) + ) + + ### set a plausible "maximum" absolute difference for each feature + diff_max: dict[str, float] = { + "spike_count": chunk_duration_idx, + "time_to_first_spike": chunk_duration_ms, + "time_to_second_spike": chunk_duration_ms, + "time_to_third_spike": chunk_duration_ms, + "time_to_last_spike": chunk_duration_ms, + "ISI_CV": 1, + } + if verbose: + print(f"\ndiff_max: {diff_max}") + + ### set a plausible "close" absolute difference for each feature + diff_close: dict[str, float] = { + "spike_count": np.ceil(chunk_duration_ms / 200), + "time_to_first_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_second_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_third_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "time_to_last_spike": np.clip(chunk_duration_ms * 0.1, 5, 50), + "ISI_CV": 0.1, + } + if verbose: + print(f"\ndiff_close: {diff_close}\n") + + ### catch if features from feature_list are not supported + if feature_list is None: + feature_list = list(diff_max.keys()) + features_not_supported = [ + feature for feature in feature_list if feature not in diff_max + ] + if features_not_supported: + raise ValueError(f"Features not supported: {features_not_supported}") + + ### calculate and return the mean of the differences of the features + features_1 = get_spike_features_of_chunk(chunk, results1) + features_2 = get_spike_features_of_chunk(chunk2, results2) + + if verbose: + print(f"\nfeatures_1: {features_1}\n") + print(f"features_2: {features_2}\n") + loss = 0.0 + for feature in feature_list: + ### if both features are None use 0 + if features_1[feature] is None and features_2[feature] is None: + diff = 0.0 + ### if single feature is None use diff_max + elif features_1[feature] is None or features_2[feature] is None: + diff = diff_max[feature] + else: + diff = float(np.absolute(features_1[feature] - features_2[feature])) + ### scale the difference by diff_close and add to loss + loss += diff / diff_close[feature] + loss /= len(feature_list) + + if verbose: + print(f"loss: {loss}") + return loss diff --git a/src/CompNeuroPy/dbs.py b/src/CompNeuroPy/dbs.py index e60d810..0aed84a 100644 --- a/src/CompNeuroPy/dbs.py +++ b/src/CompNeuroPy/dbs.py @@ -14,7 +14,7 @@ from ANNarchy.core import ConnectorMethods import numpy as np from CompNeuroPy import model_functions as mf -from CompNeuroPy.generate_model import generate_model +from CompNeuroPy.generate_model import CompNeuroModel from typingchecker import check_types import inspect @@ -102,9 +102,11 @@ def __init__( ### self.axon_rate_amp is None --> use the axon_rate_amp_pop_name_list and axon_rate_amp_value_list to create the dict self.axon_rate_amp: dict[Population | str, float] = { ### key is either a Populaiton or the string "default" - get_population(pop_name[4:]) - if pop_name.startswith("pop;") - else pop_name: axon_rate_amp_val + ( + get_population(pop_name[4:]) + if pop_name.startswith("pop;") + else pop_name + ): axon_rate_amp_val for pop_name, axon_rate_amp_val in zip( self.axon_rate_amp_pop_name_list, self.axon_rate_amp_value_list ) @@ -148,9 +150,11 @@ def analyze_model( ### if key is a Population, use the name of the Population and prepend pop; ### if key is the string "default", use the string self.axon_rate_amp_pop_name_list = [ - f"pop;{axon_rate_amp_key.name}" - if isinstance(axon_rate_amp_key, Population) - else axon_rate_amp_key + ( + f"pop;{axon_rate_amp_key.name}" + if isinstance(axon_rate_amp_key, Population) + else axon_rate_amp_key + ) for axon_rate_amp_key in axon_rate_amp.keys() ] self.axon_rate_amp_value_list = list(axon_rate_amp.values()) @@ -284,9 +288,9 @@ def analyze_projections(self): ] ### get the parameters of the connector function - connector_function_parameter_dict[ - proj.name - ] = self.get_connector_parameters(proj) + connector_function_parameter_dict[proj.name] = ( + self.get_connector_parameters(proj) + ) ### get the names of the pre- and post-synaptic populations pre_post_pop_name_dict[proj.name] = (proj.pre.name, proj.post.name) @@ -531,9 +535,9 @@ def add_DBS_to_spiking_neuron_model(self, neuron_model_init_parameter_dict): ) ### 3rd add axon spike term - neuron_model_init_parameter_dict[ - "axon_spike" - ] = "pulse(t)*dbs_on*unif_var_dbs1 > 1-prob_axon_spike" + neuron_model_init_parameter_dict["axon_spike"] = ( + "pulse(t)*dbs_on*unif_var_dbs1 > 1-prob_axon_spike" + ) ### 4th add axon reset term neuron_model_init_parameter_dict[ @@ -544,9 +548,9 @@ def add_DBS_to_spiking_neuron_model(self, neuron_model_init_parameter_dict): """ ### 5th extend description - neuron_model_init_parameter_dict[ - "description" - ] = f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + ) return neuron_model_init_parameter_dict @@ -677,9 +681,9 @@ def add_DBS_to_rate_coded_neuron_model(self, neuron_model_init_parameter_dict): ) ### 3rd extend description - neuron_model_init_parameter_dict[ - "description" - ] = f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith DBS mechanisms implemented." + ) return neuron_model_init_parameter_dict @@ -795,14 +799,14 @@ def add_DBS_to_spiking_synapse_model(self, synapse_init_parameter_dict): synapse_init_parameter_dict["equations"] = "\n".join(equations_line_split_list) ### 3rd add pre_axon_spike - synapse_init_parameter_dict[ - "pre_axon_spike" - ] = "g_target+=ite(unif_var_dbs>> + # create DBS stimulator dbs = DBSstimulator( stimulated_population=population1, @@ -1012,7 +1016,7 @@ def __init__( axon_rate_amp: float | dict[Population | str, float] = 1.0, seed: int | None = None, auto_implement: bool = False, - model: generate_model | None = None, + model: CompNeuroModel | None = None, ) -> None: """ Initialize DBS stimulator. diff --git a/src/CompNeuroPy/examples/deap_cma.py b/src/CompNeuroPy/examples/deap_cma.py new file mode 100644 index 0000000..14bb8e7 --- /dev/null +++ b/src/CompNeuroPy/examples/deap_cma.py @@ -0,0 +1,133 @@ +""" +This example demonstrates how to use the DeapCma class to optimize parameters. +""" + +from CompNeuroPy import DeapCma +import numpy as np + + +### for DeapCma we need to define the evaluate_function +def evaluate_function(population): + """ + Calculate the loss for a population of individuals. + + Args: + population (np.ndarray): + population of individuals (i.e., parameter sets) to evaluate + + Returns: + loss_values (list[tuple]): + list of tuples, where each tuple contains the loss for an individual of the + population + """ + loss_list = [] + ### the population is a list of individuals + for individual in population: + ### the individual is a list of parameters + p0, p1, p2 = individual + ### calculate the loss of the individual + loss_of_individual = float((p0 - 3) ** 2 + (p1 - 7) ** 2 + (p2 - (-2)) ** 2) + ### insert the loss of the individual into the list of tuples + loss_list.append((loss_of_individual,)) + + return loss_list + + +def get_source_solutions(lb, ub): + """ + DeapCma can use source solutions to initialize the optimization process. This + function returns an example of source solutions. + + Source solutions are a list of tuples, where each tuple contains the parameters of + an individual (np.ndarray) and its loss (float). + + Returns: + source_solutions (list[tuple]): + list of tuples, where each tuple contains the parameters of an individual + and its loss + """ + ### create random solutions + source_solutions_parameters = np.random.uniform(0, 1, (100, 3)) * (ub - lb) + lb + ### evaluate the random solutions + source_solutions_losses = evaluate_function(source_solutions_parameters) + ### create a list of tuples, where each tuple contains the parameters of an + ### individual and its loss + source_solutions = [ + (source_solutions_parameters[idx], source_solutions_losses[idx][0]) + for idx in range(len(source_solutions_parameters)) + ] + ### only use the best 10 as source solutions + source_solutions = sorted(source_solutions, key=lambda x: x[1])[:10] + + return source_solutions + + +def main(): + ### define lower bounds of paramters to optimize + lb = np.array([-10, -10, 0]) + + ### define upper bounds of paramters to optimize + ub = np.array([10, 15, 5]) + + ### create an "minimal" instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + ) + + ### create an instance of the DeapCma class using all optional attributes + ### to initialize one could give a p0 array (same shape as lower and upper) and a + ### sig0 value or use source solutions (as shown here) + deap_cma_optional = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + max_evals=1000, + p0=None, + sig0=None, + param_names=["a", "b", "c"], + learn_rate_factor=1, + damping_factor=1, + verbose=True, + plot_file="logbook_optional.png", + cma_params_dict={}, + source_solutions=get_source_solutions(lb=lb, ub=ub), + hard_bounds=True, + ) + + ### run the optimization, since max_evals was not defined during initialization of + ### the DeapCma instance, it has to be defined here + ### it automatically saves a plot file showing the loss over the generations + deap_cma_result = deap_cma.run(max_evals=1000) + + ### run the optimization with all optional attributes + deap_cma_optional_result = deap_cma_optional.run(verbose=False) + + ### print the best parameters and its loss, since we did not define the names of the + ### parameters during initialization of the DeapCma instance, the names are param0, + ### param1, param2, also print everything that is in the dict returned by the run + best_param_dict = { + param_name: deap_cma_result[param_name] + for param_name in ["param0", "param1", "param2"] + } + print("\nFirst (minimal) optimization:") + print(f"Dict from run function contains: {list(deap_cma_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_result['best_fitness']}\n") + + ### print the same for the second optimization + best_param_dict = { + param_name: deap_cma_optional_result[param_name] + for param_name in ["a", "b", "c"] + } + print("Second optimization (with all optional attributes):") + print(f"Dict from run function contains: {list(deap_cma_optional_result.keys())}") + print(f"Best parameters: {best_param_dict}") + print(f"Loss of best parameters: {deap_cma_optional_result['best_fitness']}") + + return 1 + + +if __name__ == "__main__": + main() diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py index 4ba8467..05f67fa 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp.py @@ -1,18 +1,15 @@ -from CompNeuroPy import ( - cnp_clear, - compile_in_folder, - find_folder_with_prefix, - data_obj, - replace_names_with_dict, - timing_decorator, - print_df, - save_variables, - load_variables, - clear_dir, -) -from CompNeuroPy.neuron_models import poisson_neuron +from CompNeuroPy.generate_model import CompNeuroModel +from CompNeuroPy.experiment import CompNeuroExp +from CompNeuroPy.monitors import CompNeuroMonitors +from CompNeuroPy import model_functions as mf +from CompNeuroPy import extra_functions as ef +from CompNeuroPy import system_functions as sf +from CompNeuroPy import analysis_functions as af + from ANNarchy import ( Population, + Projection, + Synapse, get_population, Monitor, Network, @@ -25,11 +22,20 @@ simulate_until, Uniform, get_current_step, + projections, + populations, + Binomial, + CurrentInjection, + raster_plot, + set_seed, ) -from ANNarchy.core.Global import _network + +from ANNarchy.core import ConnectorMethods + +# from ANNarchy.core.Global import _network import numpy as np from scipy.interpolate import interp1d, interpn -from scipy.signal import find_peaks, argrelmin +from scipy.signal import find_peaks, argrelmin, argrelextrema import matplotlib.pyplot as plt import inspect import textwrap @@ -46,416 +52,635 @@ from scipy.stats import poisson from ANNarchy.extensions.bold import BoldMonitor from sklearn.linear_model import LinearRegression +import sympy as sp +from scipy.optimize import minimize, Bounds -class model_configurator: - def __init__( - self, - model, - target_firing_rate_dict, - interpolation_grid_points=10, - max_psp=10, - do_not_config_list=[], - print_guide=False, - I_app_variable="I_app", - ) -> None: +class ArrSampler: + """ + Class to store an array and sample from it. + """ + + def __init__(self, arr: np.ndarray, var_name_list: list[str]) -> None: """ Args: - model: CompNeuroPy generate_model object - it's not important if the model is created or compiled but after running - the model_configurator only the given model will exist so do not create - something else in ANNarchy! - - target_firing_rate_dict: dict - keys = population names of model which should be configured, values = target firing rates in Hz - - interpolation_grid_points: int, optional, default=10 - how many points should be used for the interpolation of the f-I-g curve on a single axis - - max_psp: int, optional, default=10 - maximum post synaptic potential in mV - - do_not_config_list: list, optional, default=[] - list with strings containing population names of populations which should not be configured - - print_guide: bool, optional, default=False - if you want to get information about what you could do with model_configurator - - I_app_variable: str, optional, default="I_app" - the name of the varaible in the populations which represents the applied current - TODO: not implemented yet, default value is always used - - Functions: - get_max_syn: - returns a dictionary with weight ranges for all afferent projections of the configured populations - """ - self.model = model - self.target_firing_rate_dict = target_firing_rate_dict - self.pop_name_list = list(target_firing_rate_dict.keys()) - for do_not_pop_name in do_not_config_list: - self.pop_name_list.remove(do_not_pop_name) - self.I_app_max_dict = {pop_name: None for pop_name in self.pop_name_list} - self.g_max_dict = {pop_name: None for pop_name in self.pop_name_list} - self.tau_dict = {pop_name: None for pop_name in self.pop_name_list} - self.nr_afferent_proj_dict = {pop_name: None for pop_name in self.pop_name_list} - self.net_many_dict = {pop_name: None for pop_name in self.pop_name_list} - self.net_single_dict = {pop_name: None for pop_name in self.pop_name_list} - self.net_single_v_clamp_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.max_weight_dict = {pop_name: None for pop_name in self.pop_name_list} - self.variable_init_sampler_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.f_I_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} - self.I_f_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} - self.afferent_projection_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.neuron_model_dict = {pop_name: None for pop_name in self.pop_name_list} - self.neuron_model_parameters_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.neuron_model_attributes_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.max_psp_dict = {pop_name: None for pop_name in self.pop_name_list} - self.possible_rates_dict = {pop_name: None for pop_name in self.pop_name_list} - self.extreme_firing_rates_df_dict = { - pop_name: None for pop_name in self.pop_name_list - } - self.prepare_psp_dict = {pop_name: None for pop_name in self.pop_name_list} - ### set max psp for a single spike - self.max_psp_dict = {pop_name: max_psp for pop_name in self.pop_name_list} - ### print things - self.log_exist = False - self.caller_name = "" - self.log("model configurator log:") - self.print_guide = print_guide - ### simulation things - self.simulation_dur = 5000 - self.simulation_dur_estimate_time = 50 - self.nr_neurons_per_net = 100 - - ### do things for which the model needs to be created (it will not be available later) - self.analyze_model() - - ### print guide - self._p_g(_p_g_1) - - def get_max_syn(self, cache=True, clear=False): - """ - get the weight dictionary for all populations given in target_firing_rate_dict - keys = population names, values = dict which contain values = afferent projection names, values = lists with w_min and w_max - """ - ### clear cache to create new cache - if cache and clear: - self.log("clear cache of get_max_syn") - clear_dir("./.model_configurator_cache/get_max_syn") - - ### check cache for get_max_syn - cache_worked = False - if cache: - try: - loaded_variables_dict = load_variables( - name_list=[ - "net_single_dict", - "prepare_psp_dict", - "I_app_max_dict", - "g_max_dict", - "syn_contr_dict", - "syn_load_dict", - ], - path="./.model_configurator_cache/get_max_syn", - ) - ( - self.net_single_dict, - self.prepare_psp_dict, - self.I_app_max_dict, - self.g_max_dict, - self.syn_contr_dict, - self.syn_load_dict, - ) = loaded_variables_dict.values() - ### create dummy network for single network and actually create network for single_v_clamp (single_v_clamp needed in get_base) - self.create_single_neuron_networks( - single_net=False, single_net_v_clamp=True, prepare_psp=False - ) - cache_worked = True - except: - cache_worked = False + arr (np.ndarray) + array with shape (n_samples, n_variables) + var_name_list (list[str]) + list of variable names + """ + self.arr_shape = arr.shape + self.var_name_list = var_name_list + ### check values of any variable are constant + self.is_const = np.std(arr, axis=0) <= np.mean(np.absolute(arr), axis=0) / 1000 + ### for the constant variables only the first value is used + self.constant_arr = arr[0, self.is_const] + ### array without the constant variables + self.not_constant_val_arr = arr[:, np.logical_not(self.is_const)] - if not cache_worked: - ### create single neuron networks - self.create_single_neuron_networks() - - ### get max synaptic things with single neuron networks - for pop_name in self.pop_name_list: - self.log(pop_name) - ### get max I_app and max weights (i.e. g_ampa, g_gaba) - txt = f"get max I_app, g_ampa and g_gaba using network_single for {pop_name}" - print(txt) - self.log(txt) - I_app_max, g_ampa_max, g_gaba_max = self.get_max_syn_currents( - pop_name=pop_name, + def sample(self, n=1, seed=0): + """ + Sample n samples from the array. + + Args: + n (int) + number of samples to be drawn + seed (int) + seed for the random number generator + + Returns: + ret_arr (np.ndarray) + array with shape (n, n_variables) + """ + ### get n random indices along the n_samples axis + rng = np.random.default_rng(seed=seed) + random_idx_arr = rng.integers(low=0, high=self.arr_shape[0], size=n) + ### sample with random idx + sample_arr = self.not_constant_val_arr[random_idx_arr] + ### create return array + ret_arr = np.zeros((n,) + self.arr_shape[1:]) + ### add samples to return array + ret_arr[:, np.logical_not(self.is_const)] = sample_arr + ### add constant values to return array + ret_arr[:, self.is_const] = self.constant_arr + + return ret_arr + + def set_init_variables(self, population: Population): + """ + Set the initial variables of the given population to the given values. + """ + variable_init_arr = self.sample(len(population), seed=0) + var_name_list = self.var_name_list + for var_name in population.variables: + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) + + +class AnalyzeModel: + """ + Class to analyze the given model to be able to reproduce it. + """ + + _connector_methods_dict = { + "One-to-One": ConnectorMethods.connect_one_to_one, + "All-to-All": ConnectorMethods.connect_all_to_all, + "Gaussian": ConnectorMethods.connect_gaussian, + "Difference-of-Gaussian": ConnectorMethods.connect_dog, + "Random": ConnectorMethods.connect_fixed_probability, + "Random Convergent": ConnectorMethods.connect_fixed_number_pre, + "Random Divergent": ConnectorMethods.connect_fixed_number_post, + "User-defined": ConnectorMethods.connect_with_func, + "MatrixMarket": ConnectorMethods.connect_from_matrix_market, + "Connectivity matrix": ConnectorMethods.connect_from_matrix, + "Sparse connectivity matrix": ConnectorMethods.connect_from_sparse, + "From File": ConnectorMethods.connect_from_file, + } + + def __init__(self, model: CompNeuroModel): + ### clear ANNarchy and create the model + self._clear_model(model=model, do_create=True) + + ### get population info (eq, params etc.) + self._analyze_populations(model=model) + + ### get projection info + self._analyze_projections(model=model) + + ### clear ANNarchy + self._clear_model(model=model, do_create=False) + + def _clear_model(self, model: CompNeuroModel, do_create: bool = True): + mf.cnp_clear(functions=False, constants=False) + if do_create: + model.create(do_compile=False) + + def _analyze_populations(self, model: CompNeuroModel): + """ + Get info of each population + + Args: + model (CompNeuroModel): + Model to be analyzed + """ + ### values of the paramters and variables of the population's neurons, keys are + ### the names of paramters and variables + self.neuron_model_attr_dict: dict[str, dict] = {} + ### arguments of the __init__ function of the Neuron class + self.neuron_model_init_parameter_dict: dict[str, dict] = {} + ### arguments of the __init__ function of the Population class + self.pop_init_parameter_dict: dict[str, dict] = {} + + ### for loop over all populations + for pop_name in model.populations: + pop: Population = get_population(pop_name) + ### get the neuron model attributes (parameters/variables) + ### old: self.neuron_model_parameters_dict + ### old: self.neuron_model_attributes_dict = keys() + self.neuron_model_attr_dict[pop.name] = pop.init + ### get a dict of all arguments of the __init__ function of the Neuron + ### ignore self + ### old: self.neuron_model_dict[pop_name] + init_params = inspect.signature(Neuron.__init__).parameters + self.neuron_model_init_parameter_dict[pop.name] = { + param: getattr(pop.neuron_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all arguments of the __init__ function of the Population + ### ignore self, storage_order and copied + init_params = inspect.signature(Population.__init__).parameters + self.pop_init_parameter_dict[pop.name] = { + param: getattr(pop, param) + for param in init_params + if param != "self" and param != "storage_order" and param != "copied" + } + + def _analyze_projections(self, model: CompNeuroModel): + """ + Get info of each projection + + Args: + model (CompNeuroModel): + Model to be analyzed + """ + ### parameters of the __init__ function of the Projection class + self.proj_init_parameter_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Synapse class + self.synapse_init_parameter_dict: dict[str, dict] = {} + ### values of the paramters and variables of the synapse, keys are the names of + ### paramters and variables + self.synapse_model_attr_dict: dict[str, dict] = {} + ### connector functions of the projections + self.connector_function_dict: dict = {} + ### parameters of the connector functions of the projections + self.connector_function_parameter_dict: dict = {} + ### names of pre- and post-synaptic populations of the projections + ### old: self.post_pop_name_dict and self.pre_pop_name_dict + self.pre_post_pop_name_dict: dict[str, tuple] = {} + ### sizes of pre- and post-synaptic populations of the projections + ### old: self.pre_pop_size_dict + self.pre_post_pop_size_dict: dict[str, tuple] = {} + + ### loop over all projections + for proj_name in model.projections: + proj: Projection = get_projection(proj_name) + ### get the synapse model attributes (parameters/variables) + self.synapse_model_attr_dict[proj.name] = proj.init + ### get a dict of all paramters of the __init__ function of the Synapse + init_params = inspect.signature(Synapse.__init__).parameters + self.synapse_init_parameter_dict[proj.name] = { + param: getattr(proj.synapse_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all paramters of the __init__ function of the Projection + init_params = inspect.signature(Projection.__init__).parameters + self.proj_init_parameter_dict[proj_name] = { + param: getattr(proj, param) + for param in init_params + if param != "self" and param != "synapse" and param != "copied" + } + + ### get the connector function of the projection and its parameters + ### raise errors for not supported connector functions + if ( + proj.connector_name == "User-defined" + or proj.connector_name == "MatrixMarket" + or proj.connector_name == "From File" + ): + raise ValueError( + f"Connector function '{self._connector_methods_dict[proj.connector_name].__name__}' not supported yet" ) - self.I_app_max_dict[pop_name] = I_app_max - self.g_max_dict[pop_name] = { - "ampa": g_ampa_max, - "gaba": g_gaba_max, - } - - ### obtain the synaptic contributions assuming max weights - self.syn_contr_dict = {} - for pop_name in self.pop_name_list: - self.syn_contr_dict[pop_name] = {} - for target_type in ["ampa", "gaba"]: - self.log(f"get synaptic contributions for {pop_name} {target_type}") - self.syn_contr_dict[pop_name][ - target_type - ] = self.get_syn_contr_dict( - pop_name=pop_name, - target_type=target_type, - use_max_weights=True, - normalize=True, - ) + ### get the connector function + self.connector_function_dict[proj.name] = self._connector_methods_dict[ + proj.connector_name + ] - ### create the synaptic load template dict - self.syn_load_dict = {} - for pop_name in self.pop_name_list: - self.syn_load_dict[pop_name] = [] - if "ampa" in self.afferent_projection_dict[pop_name]["target"]: - self.syn_load_dict[pop_name].append("ampa_load") - if "gaba" in self.afferent_projection_dict[pop_name]["target"]: - self.syn_load_dict[pop_name].append("gaba_load") - - ### save variables in cache - ### obtain variables which should be cached / are needed later - ### do not cache ANNarchy objects - net_single_dict_to_cache = {} - for key, val in self.net_single_dict.items(): - net_single_dict_to_cache[key] = { - "variable_init_sampler": val["variable_init_sampler"] - } - save_variables( - variable_list=[ - net_single_dict_to_cache, - self.prepare_psp_dict, - self.I_app_max_dict, - self.g_max_dict, - self.syn_contr_dict, - self.syn_load_dict, - ], - name_list=[ - "net_single_dict", - "prepare_psp_dict", - "I_app_max_dict", - "g_max_dict", - "syn_contr_dict", - "syn_load_dict", - ], - path="./.model_configurator_cache/get_max_syn", + ### get the parameters of the connector function + self.connector_function_parameter_dict[proj.name] = ( + self._get_connector_parameters(proj) ) - ### only return synaptic contributions smaller 1 - template_synaptic_contribution_dict = ( - self.get_template_synaptic_contribution_dict(given_dict=self.syn_contr_dict) - ) + ### get the names of the pre- and post-synaptic populations + self.pre_post_pop_name_dict[proj.name] = (proj.pre.name, proj.post.name) - self._p_g( - _p_g_after_get_weights( - template_weight_dict=self.g_max_dict, - template_synaptic_load_dict=self.syn_load_dict, - template_synaptic_contribution_dict=template_synaptic_contribution_dict, + ### get the sizes of the pre- and post-synaptic populations + self.pre_post_pop_size_dict[proj.name] = ( + proj.pre.size, + proj.post.size, + ) + + def _get_connector_parameters(self, proj: Projection): + """ + Get the parameters of the given connector function. + + Args: + proj (Projection): + Projection for which the connector parameters are needed + + Returns: + connector_parameters_dict (dict): + Parameters of the given connector function + """ + + if proj.connector_name == "One-to-One": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "All-to-All": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "allow_self_connections": proj._connection_args[2], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Gaussian": + return { + "amp": proj._connection_args[0], + "sigma": proj._connection_args[1], + "delays": proj._connection_args[2], + "limit": proj._connection_args[3], + "allow_self_connections": proj._connection_args[4], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Difference-of-Gaussian": + return { + "amp_pos": proj._connection_args[0], + "sigma_pos": proj._connection_args[1], + "amp_neg": proj._connection_args[2], + "sigma_neg": proj._connection_args[3], + "delays": proj._connection_args[4], + "limit": proj._connection_args[5], + "allow_self_connections": proj._connection_args[6], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Random": + return { + "probability": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Convergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Divergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "pre_post": proj._connection_args[2], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Sparse connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + + +class CreateSingleNeuronNetworks: + """ + Class to create single neuron networks for normal and voltage clamp mode. + + Attributes: + single_net_dict (dict): + Nested dict containing the single neuron networks for normal and voltage + clamp mode + keys: mode (str) + normal or v_clamp + values: dict + keys: pop_name (str) + population name + values: dict + keys: net, population, monitor, init_sampler + values: Network, Population, Monitor, ArrSampler + """ + + def __init__( + self, + model: CompNeuroModel, + analyze_model: AnalyzeModel, + do_not_config_list: list[str], + ): + """ + Args: + model (CompNeuroModel): + Model to be analyzed + analyze_model (AnalyzeModel): + Analyzed model + do_not_config_list (list[str]): + List of population names which should not be configured + """ + self._single_net_dict = {} + ### create the single neuron networks for normal and voltage clamp mode + for mode in ["normal", "v_clamp"]: + self._single_net_dict[mode] = {} + self._create_single_neuron_networks( + model=model, + analyze_model=analyze_model, + do_not_config_list=do_not_config_list, + mode=mode, ) - ) - return self.max_weight_dict - def get_syn_contr_dict( - self, pop_name: str, target_type: str, use_max_weights=False, normalize=False - ) -> dict: + def single_net(self, pop_name: str, mode: str): """ - get the relative synaptic contribution list of a population for a given target type - weights are obtained from the afferent_projection_dict, if there are no weights --> use max weights + Return the information of the single neuron network for the given population and + mode. Args: - pop_name: str - population name + pop_name (str): + Name of the population + mode (str): + Mode for which the single neuron network should be returned (normal or + v_clamp) + + Returns: + ReturnSingleNeuronNetworks: + Information of the single neuron network with Attributes: net, + population, monitor, init_sampler + """ + return self.ReturnSingleNeuronNetworks(self._single_net_dict[mode][pop_name]) - target_type: str - target type of the afferent projections of the population + class ReturnSingleNeuronNetworks: + def __init__(self, single_net_dict): + self.net: Network = single_net_dict["net"] + self.population: Population = single_net_dict["population"] + self.monitor: Monitor = single_net_dict["monitor"] + self.init_sampler: ArrSampler = single_net_dict["init_sampler"] - use_max_weights: bool, optional, default=False - if True the max weights are used, if False the weights from the afferent_projection_dict are used + def init_sampler(self, model: CompNeuroModel, do_not_config_list: list[str]): + """ + Return the init samplers for all populations of the normal mode. All samplers + are returned in an object with a get method to get the sampler for a specific + population. + + Args: + model (CompNeuroModel): + Model to be analyzed + do_not_config_list (list[str]): + List of population names which should not be configured Returns: - rel_syn_contr_dict: dict - keys = projection names, values = relative synaptic contributions - """ - ### g_max have to be obtained already - assert not ( - isinstance(self.g_max_dict[pop_name][target_type], type(None)) - ), "ERROR, get_rel_syn_contr_list: g_max have to be obtained already" - ### get list of relative synaptic contributions - proj_name_list = [] - rel_syn_contr_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - proj_target_type = proj_dict["proj_target_type"] - weight = proj_dict["proj_weight"] - if isinstance(weight, type(None)) or use_max_weights: - weight = self.g_max_dict[pop_name][target_type] - if proj_target_type == target_type: - rel_syn_contr_list.append(proj_dict["spike_frequency"] * weight) - proj_name_list.append(proj_name) - ### normalize the list - if normalize: - rel_syn_contr_arr = np.array(rel_syn_contr_list) - rel_syn_contr_arr = rel_syn_contr_arr / np.sum(rel_syn_contr_arr) - rel_syn_contr_list = rel_syn_contr_arr.tolist() - ### combine proj_name_list and rel_syn_contr_list to an dict - rel_syn_contr_dict = { - proj_name: rel_syn_contr - for proj_name, rel_syn_contr in zip(proj_name_list, rel_syn_contr_list) - } + AllSampler: + Object with a get method to get the init sampler for a specific + population + """ + init_sampler_dict = {} + for pop_name in model.populations: + if pop_name in do_not_config_list: + continue + init_sampler_dict[pop_name] = self._single_net_dict["normal"][pop_name][ + "init_sampler" + ] + return self.AllSampler(init_sampler_dict) + + class AllSampler: + def __init__(self, init_sampler_dict: dict[str, ArrSampler]): + self.init_sampler_dict = init_sampler_dict - return rel_syn_contr_dict + def get(self, pop_name: str): + """ + Get the init sampler for the given population. + + Args: + pop_name (str): + Name of the population + + Returns: + sampler (ArrSampler): + Init sampler for the given population + """ + sampler: ArrSampler = self.init_sampler_dict[pop_name] + return sampler - def create_single_neuron_networks( - self, single_net=True, single_net_v_clamp=True, prepare_psp=True + def _create_single_neuron_networks( + self, + model: CompNeuroModel, + analyze_model: AnalyzeModel, + do_not_config_list: list[str], + mode: str, ): - ### clear ANNarchy - cnp_clear() - - ### create the single neuron networks - for pop_name in self.pop_name_list: - txt = f"create network_single for {pop_name}" - print(txt) - self.log(txt) - ### the network with the standard neuron - if single_net: - self.net_single_dict[pop_name] = self.create_net_single( - pop_name=pop_name - ) - else: - ### dummy network for the pop - net_single_dummy = Network() - pop_single_dummy = Population( - 1, - neuron=Neuron(equations="r=1"), - name=f"dummy_single_{pop_name}", - ) - mon_single_dummy = Monitor(pop_single_dummy, ["r"]) - net_single_dummy.add([pop_single_dummy, mon_single_dummy]) - - ### the network with the voltage clamp version neuron - if single_net_v_clamp: - self.net_single_v_clamp_dict[ - pop_name - ] = self.create_net_single_voltage_clamp(pop_name=pop_name) - else: - ### dummy network for the pop - net_single_v_clamp_dummy = Network() - pop_single_v_clamp_dummy = Population( - 1, - neuron=Neuron(equations="r=1"), - name=f"dummy_single_v_clamp_{pop_name}", - ) - mon_single_v_clamp_dummy = Monitor(pop_single_v_clamp_dummy, ["r"]) - net_single_v_clamp_dummy.add( - [pop_single_v_clamp_dummy, mon_single_v_clamp_dummy] - ) - ### get v_rest and correspodning I_app_hold - if prepare_psp: - self.prepare_psp_dict[pop_name] = self.find_v_rest_for_psp( - pop_name, do_plot=False - ) + """ + Create the single neuron networks for the given mode. Sets the single_net_dict. + + Args: + model (CompNeuroModel): + Model to be analyzed + analyze_model (AnalyzeModel): + Analyzed model + do_not_config_list (list[str]): + List of population names which should not be configured + mode (str): + Mode for which the single neuron networks should be created + """ + + ### loop over populations which should be configured + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + ### store the dict containing the network etc + self._single_net_dict[mode][pop_name] = self._create_net_single( + pop_name=pop_name, analyze_model=analyze_model, mode=mode + ) - def create_net_single(self, pop_name): + def _create_net_single(self, pop_name: str, analyze_model: AnalyzeModel, mode: str): """ - creates a network with the neuron type of the population given by pop_name - the number of neurons is 1 + Creates a network with the neuron type of the population given by pop_name for + the given mode. The population size is set to 1. Args: - pop_name: str - population name - """ - - ### for stop condition for recording psp --> add v_before_psp and v_psp_thresh to equations/parameters - - ### get the initial arguments of the neuron - neuron_model = self.neuron_model_dict[pop_name] - ### names of arguments - init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) - init_arguments_name_list.remove("self") - init_arguments_name_list.remove("name") - init_arguments_name_list.remove("description") - ### arguments dict - init_arguments_dict = { - init_arguments_name: getattr(neuron_model, init_arguments_name) - for init_arguments_name in init_arguments_name_list - } - ### add v_before_psp=v at the beginning of the equations - equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() - equations_line_split_list.insert(0, "v_before_psp = v") - init_arguments_dict["equations"] = "\n".join(equations_line_split_list) - ### add v_psp_thresh to the parameters - parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() - parameters_line_split_list.append("v_psp_thresh = 0 : population") - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - - ### create neuron model with new equations - neuron_model_new = Neuron(**init_arguments_dict) + pop_name (str): + Name of the population + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the network should be created + + Returns: + net_single_dict (dict): + Dict containing the Network, Population, Monitor and ArrSampler objects + """ + ### create the adjusted neuron model for the stop condition + neuron_model_new = self._get_single_neuron_neuron_model( + pop_name=pop_name, analyze_model=analyze_model, mode=mode + ) ### create the single neuron population - single_neuron = Population( - 1, - neuron=neuron_model_new, - name=f"single_neuron_{pop_name}", - stop_condition=f"((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", + pop_single_neuron = self._get_single_neuron_population( + pop_name=pop_name, + neuron_model_new=neuron_model_new, + analyze_model=analyze_model, + mode=mode, ) - ### set the attributes of the neuron - for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: - setattr(single_neuron, attr_name, attr_val) ### create Monitor for single neuron - mon_single = Monitor(single_neuron, ["spike", "v"]) + if mode == "normal": + mon_single = Monitor(pop_single_neuron, ["spike", "v"]) + elif mode == "v_clamp": + mon_single = Monitor(pop_single_neuron, ["v_clamp_rec_sign"]) - ### create network with single neuron + ### create network with single neuron and compile it net_single = Network() - net_single.add([single_neuron, mon_single]) - compile_in_folder( - folder_name=f"single_net_{pop_name}", silent=True, net=net_single - ) - - ### get the values of the variables after 2000 ms simulation - variable_init_sampler = self.get_init_neuron_variables( - net_single, net_single.get(single_neuron) + net_single.add([pop_single_neuron, mon_single]) + mf.compile_in_folder( + folder_name=f"single_net_{mode}_{pop_name}", silent=True, net=net_single ) ### network dict net_single_dict = { "net": net_single, - "population": net_single.get(single_neuron), + "population": net_single.get(pop_single_neuron), "monitor": net_single.get(mon_single), - "variable_init_sampler": variable_init_sampler, + "init_sampler": None, } + ### for v_clamp we are done here + if mode == "v_clamp": + return net_single_dict + + ### for normal neuron get the init sampler for the variables of the neuron model + ### (to initialize a population of the neuron model) + init_sampler = self._get_neuron_model_init_sampler( + net=net_single, pop=net_single.get(pop_single_neuron) + ) + net_single_dict["init_sampler"] = init_sampler + return net_single_dict - def get_init_neuron_variables(self, net, pop): + def _get_single_neuron_neuron_model( + self, pop_name: str, analyze_model: AnalyzeModel, mode=str + ): + """ + Create the adjusted neuron model for the given mode. + + Args: + pop_name (str): + Name of the population + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the neuron model should be created + + Returns: + neuron_model_new (Neuron): + Adjusted neuron model + """ + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = ( + analyze_model.neuron_model_init_parameter_dict[pop_name].copy() + ) + ### Define the attributes of the neuron model as sympy symbols + neuron_model_attributes_name_list = list( + analyze_model.neuron_model_attr_dict[pop_name].keys() + ) + ### add v_before_psp and v_psp_thresh to equations/parameters, for the stop + ### condition below + self._adjust_neuron_model( + neuron_model_init_parameter_dict, + neuron_model_attributes_name_list, + mode=mode, + ) + ### create the adjusted neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + return neuron_model_new + + def _get_single_neuron_population( + self, + pop_name: str, + neuron_model_new: Neuron, + analyze_model: AnalyzeModel, + mode: str, + ): """ - get the variables of the given population after simulating 2000 ms + Create the single neuron population for the given mode. Args: - net: ANNarchy network - the network which contains the pop + pop_name (str): + Name of the population + neuron_model_new (Neuron): + Adjusted neuron model + analyze_model (AnalyzeModel): + Analyzed model + mode (str): + Mode for which the population should be created + + Returns: + pop_single_neuron (Population): + Single neuron population + """ + if mode == "normal": + pop_single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_{pop_name}", + stop_condition="((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", + ) + elif mode == "v_clamp": + ### create the single neuron population + pop_single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_v_clamp_{pop_name}", + ) + + ### get the stored parameters and variables + neuron_model_attr_dict = analyze_model.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_single_neuron, attr_name, attr_val) + return pop_single_neuron + + def _get_neuron_model_init_sampler(self, net: Network, pop: Population): + """ + Create a sampler for the initial values of the variables of the neuron model by + simulating the neuron for 10000 ms and afterwards simulating 2000 ms and + sampling the variables every dt. - pop: ANNarchy population - the population whose variables are obtained + Args: + net (Network): + Network with the single neuron population + pop (Population): + Single neuron population + Returns: + sampler (ArrSampler): + Sampler for the initial values of the variables of the neuron model """ - ### reset neuron and deactivate input + + ### reset network and deactivate input net.reset() pop.I_app = 0 @@ -470,2889 +695,2356 @@ def get_init_neuron_variables(self, net, pop): net.simulate(dt()) get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) var_arr[time_idx, :] = get_arr[:, 0] + + ### reset network after simulation net.reset() - ### create a sampler with the data samples of from the 1000 ms simulation - sampler = self.var_arr_sampler(var_arr, var_name_list) + ### create a sampler with the data samples from the21000 ms simulation + sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) return sampler - def create_net_single_voltage_clamp(self, pop_name): + def _adjust_neuron_model( + self, + neuron_model_init_parameter_dict: dict, + neuron_model_attributes_name_list: list[str], + mode: str, + ): """ - creates a network with the neuron type of the population given by pop_name - the number of neurons is 1 - - The equation wich defines the chagne of v is set to zero and teh change of v - is stored in the new variable v_clamp_rec + Adjust the parameters and equations of the neuron model for the given mode. Args: - pop_name: str - population name - """ - - ### get the initial arguments of the neuron - neuron_model = self.neuron_model_dict[pop_name] - ### names of arguments - init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) - init_arguments_name_list.remove("self") - init_arguments_name_list.remove("name") - init_arguments_name_list.remove("description") - ### arguments dict - init_arguments_dict = { - init_arguments_name: getattr(neuron_model, init_arguments_name) - for init_arguments_name in init_arguments_name_list - } - ### get new equations for voltage clamp - equations_new = self.get_voltage_clamp_equations(init_arguments_dict, pop_name) - init_arguments_dict["equations"] = equations_new - ### add v_clamp_rec_thresh to the parameters - parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() - parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - - ### for each afferent population create a binomial spike train equation string - ### add it to the equations - ### and add the related parameters to the parameters - - ### get the afferent populations - afferent_population_list = [] - proj_target_type_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_name = proj_dict["pre_pop_name"] - afferent_population_list.append(pre_pop_name) - proj_target_type_list.append(proj_dict["proj_target_type"]) - - ### split the equations and parameters string - equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() - - parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() - - ### add the binomial spike train equations and parameters - ( - equations_line_split_list, - parameters_line_split_list, - ) = self.add_binomial_input( - equations_line_split_list, - parameters_line_split_list, - afferent_population_list, - proj_target_type_list, + neuron_model_init_parameter_dict (dict): + Dict with the parameters and equations of the neuron model + neuron_model_attributes_name_list (list[str]): + List of the names of the attributes of the neuron model + mode (str): + Mode for which the neuron model should be adjusted + """ + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### get the parameters of the neuron model as a list of strings + parameters_line_split_list = str( + neuron_model_init_parameter_dict["parameters"] + ).splitlines() + + if mode == "normal": + ### add v_before_psp=v at the beginning of the equations + equations_line_split_list.insert(0, "v_before_psp = v") + ### add v_psp_thresh to the parameters + parameters_line_split_list.append("v_psp_thresh = 0 : population") + elif mode == "v_clamp": + ### get new equations for voltage clamp + equations_new_list = CreateVoltageClampEquations( + equations_line_split_list, neuron_model_attributes_name_list + ).eq_new + neuron_model_init_parameter_dict["equations"] = equations_new_list + ### add v_clamp_rec_thresh to the parameters + parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") + + ### join equations and parameters to a string and store them in the dict + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) + neuron_model_init_parameter_dict["parameters"] = "\n".join( + parameters_line_split_list ) - ### combine string lines to multiline strings again - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - init_arguments_dict["equations"] = "\n".join(equations_line_split_list) - ### create neuron model with new equations - neuron_model_new = Neuron(**init_arguments_dict) +class PreparePSP: + """ + Find v_rest, corresponding I_hold (in case of self-active neurons) and an + init_sampler to initialize the neuron model for the PSP calculation for each + population. + """ - ### create the single neuron population - single_neuron_v_clamp = Population( - 1, - neuron=neuron_model_new, - name=f"single_neuron_v_clamp_{pop_name}", - ) + def __init__( + self, + model: CompNeuroModel, + single_nets: CreateSingleNeuronNetworks, + do_not_config_list: list[str], + simulator: "Simulator", + do_plot: bool, + figure_folder: str, + ): + """ + Args: + model (CompNeuroModel): + Model to be prepared + do_not_config_list (list[str]): + List of populations which should not be configured + do_plot (bool): + If True, plot the membrane potential + """ + self._single_nets = single_nets + self._prepare_psp_dict = {} + self._simulator = simulator + self._figure_folder = figure_folder + ### loop over all populations + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + ### find initial v_rest using the voltage clamp network + sf.Logger().log( + f"[{pop_name}]: search v_rest with y(X) = delta_v_2000(v=X) using grid search" + ) + v_rest, delta_v_v_rest, variables_v_rest = self._find_v_rest_initial( + pop_name=pop_name, + do_plot=do_plot, + ) + sf.Logger().log( + f"[{pop_name}]: found v_rest={v_rest} with delta_v_2000(v=v_rest)={delta_v_v_rest}" + ) + ### check if v is constant after setting v to v_rest by simulating the normal + ### single neuron network for 2000 ms + v_rest_is_constant, v_rest_arr = self._get_v_rest_is_const( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + do_plot=do_plot, + ) + + if v_rest_is_constant: + ### v_rest found (last v value of the previous simulation), no + ### I_app_hold needed + v_rest = v_rest_arr[-1] + I_app_hold = 0 + else: + ### there is no resting_state i.e. neuron is self-active --> find + ### smallest negative I_app to silence neuron + sf.Logger().log( + f"[{pop_name}]: neuron seems to be self-active --> find smallest I_app to silence the neuron" + ) + v_rest, I_app_hold = self._find_I_app_hold( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + ) + sf.Logger().log( + f"[{pop_name}]: final values: I_app_hold = {I_app_hold}, v_rest = {v_rest}" + ) - ### set the attributes of the neuron - for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: - setattr(single_neuron_v_clamp, attr_name, attr_val) + ### get the sampler for the initial variables + psp_init_sampler = self._get_init_neuron_variables_for_psp( + pop_name=pop_name, + v_rest=v_rest, + I_app_hold=I_app_hold, + ) + ### store the prepare PSP information + self._prepare_psp_dict[pop_name] = {} + self._prepare_psp_dict[pop_name]["v_rest"] = v_rest + self._prepare_psp_dict[pop_name]["I_app_hold"] = I_app_hold + self._prepare_psp_dict[pop_name]["psp_init_sampler"] = psp_init_sampler - ### create Monitor for single neuron - mon_single = Monitor(single_neuron_v_clamp, ["v_clamp_rec_sign"]) + def get(self, pop_name: str): + """ + Return the prepare PSP information for the given population. - ### create network with single neuron - net_single = Network() - net_single.add([single_neuron_v_clamp, mon_single]) - compile_in_folder( - folder_name=f"single_v_clamp_net_{pop_name}", silent=True, net=net_single + Args: + pop_name (str): + Name of the population + + Returns: + ReturnPreparePSP: + Prepare PSP information for the given population with Attributes: v_rest, + I_app_hold, psp_init_sampler + """ + return self.ReturnPreparePSP( + v_rest=self._prepare_psp_dict[pop_name]["v_rest"], + I_app_hold=self._prepare_psp_dict[pop_name]["I_app_hold"], + psp_init_sampler=self._prepare_psp_dict[pop_name]["psp_init_sampler"], ) - ### network dict - net_single_dict = { - "net": net_single, - "population": net_single.get(single_neuron_v_clamp), - "monitor": net_single.get(mon_single), - } + class ReturnPreparePSP: + def __init__( + self, v_rest: float, I_app_hold: float, psp_init_sampler: ArrSampler + ): + self.v_rest = v_rest + self.I_app_hold = I_app_hold + self.psp_init_sampler = psp_init_sampler - return net_single_dict + def _get_init_neuron_variables_for_psp( + self, pop_name: str, v_rest: float, I_app_hold: float + ): + """ + Get the initial variables of the neuron model for the PSP calculation. + + Args: + pop_name (str): + Name of the population + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron + + Returns: + sampler (ArrSampler): + Sampler with the initial variables of the neuron model + """ + ### get the names of the variables of the neuron model + var_name_list = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population.variables + ### get the variables of the neuron model after 5000 ms + var_arr = self._simulator.get_v_psp( + v_rest=v_rest, I_app_hold=I_app_hold, pop_name=pop_name + ) + ### create a sampler with this single data sample + sampler = ArrSampler(arr=var_arr, var_name_list=var_name_list) + return sampler - def find_v_rest_for_psp(self, pop_name, do_plot=False): + def _find_I_app_hold( + self, + pop_name: str, + variables_v_rest: dict, + ): """ - using both single networks to find v_rest and I_app_hold + Find the current which silences the neuron. + + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential + + Returns: + v_rest (float): + Resting membrane potential + I_app_hold (float): + Current which silences the neuron + """ + ### find I_app_hold with find_x_bound + sf.Logger().log( + f"[{pop_name}]: search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)" + ) + + I_app_hold = -ef.find_x_bound( + ### negative current initially reduces v then v climbs back up --> + ### get_v_change_after_v_rest checks how much v changes during second half of + ### 2000 ms simulation + y=lambda X_val: -self._get_v_change_after_v_rest( + pop_name=pop_name, + variables_v_rest=variables_v_rest, + ### find_x_bound only uses positive values for X and + ### increases them, expecting to increase y, therefore use -X for I_app + ### (increasing X will "increase" negative current) and negative sign for + ### the returned value (for no current input the change is positive, this + ### should decrease to zero, with negative sign: for no current input the + ### change is negative, this should increase above zero) + I_app=-X_val, + ), + ### y is initially negative and should increase above 0, therefore search for + ### y_bound=0 with bound_type="greater" + x0=0, + y_bound=0, + tolerance=0.01, + bound_type="greater", + ) + ### again simulate the neuron with the obtained I_app_hold to get the new v_rest + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app_hold, + do_plot=False, + ) + v_rest = v_rest_arr[-1] + return v_rest, I_app_hold + + def _find_v_rest_initial( + self, + pop_name: str, + do_plot: bool, + ): """ + Find the initial v_rest with the voltage clamp single neuron network for the + given population. Furthermore, get the change of v durign setting v_rest and the + stady state variables of the neuron (at the end of the simulation). + + Args: + pop_name (str): + Name of the population + do_plot (bool): + True if plots should be created, False otherwise - ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can only be >= 0) - self.log("search v_rest with y(X) = delta_v_2000(v=X) using grid search") + Returns: + v_rest (float): + Resting membrane potential + detla_v_v_rest (float): + Change of the membrane potential during setting v_rest as membrane + potential + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential + """ + ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can + ### only be >= 0) v_arr = np.linspace(-90, -20, 200) v_clamp_arr = np.array( [ - self.get_v_clamp_2000( - v=X_val, - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - ) - for X_val in v_arr + self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_val) + for v_val in v_arr ] ) - v_rest = np.min(v_arr[argrelmin(v_clamp_arr)[0]]) + v_clamp_min_idx = argrelmin(v_clamp_arr)[0] + v_rest = np.min(v_arr[v_clamp_min_idx]) if do_plot: plt.figure() plt.plot(v_arr, v_clamp_arr) plt.axvline(v_rest, color="k") plt.axhline(0, color="k", ls="dashed") - plt.savefig(f"v_clamp_{pop_name}.png") + plt.savefig(f"{self._figure_folder}/v_clamp_{pop_name}.png") plt.close("all") - ### do again the simulation with the obtained v_rest to get the stady state values - detla_v_rest = ( - self.get_v_clamp_2000( - v=v_rest, - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - ) - * dt() + ### do again the simulation only with the obtained v_rest to get the detla_v for + ### v_rest + detla_v_v_rest = ( + self._simulator.get_v_clamp_2000(pop_name=pop_name, v=v_rest) * dt() ) - obtained_variables = { - var_name: getattr( - self.net_single_v_clamp_dict[pop_name]["population"], var_name - ) - for var_name in self.net_single_v_clamp_dict[pop_name][ - "population" - ].variables + population = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).population + ### and the stady state variables of the neuron + variables_v_rest = { + var_name: getattr(population, var_name) for var_name in population.variables } - self.log( - f"for {pop_name} found v_rest={v_rest} with delta_v_2000(v=v_rest)={detla_v_rest}" - ) + return v_rest, detla_v_v_rest, variables_v_rest + + def _get_v_rest_is_const(self, pop_name: str, variables_v_rest: dict, do_plot=bool): + """ + Check if the membrane potential is constant after setting it to v_rest. + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation + do_plot (bool): + True if plots should be created, False otherwise + + Returns: + v_rest_is_constant (bool): + True if the membrane potential is constant, False otherwise + v_rest_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ ### check if the neuron stays at v_rest with normal neuron - ### if it stays --> use new value as v_rest (its even a bit finer as before) - ### if it not stays --> find I_app which holds the membrane potential constant - v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables) - v_rest_arr_is_const = ( - np.std(v_rest_arr, axis=0) - <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=0, + do_plot=do_plot, ) - if v_rest_arr_is_const: - ### v_rest found, no I_app_hold needed - v_rest = v_rest_arr[-1] - I_app_hold = 0 - self.log(f"final v_rest = {v_rest_arr[-1]}") - else: - ### there is no v_rest i.e. neuron is self-active --> find smallest negative I_app to silence neuron - self.log( - "neuron seems to be self-active --> find smallest I_app to silence the neuron" - ) - - ### negative current initially reduces v - ### then v climbs back up - ### check if the second half of v is constant if yes fine if not increase negative I_app - ### find I_app_hold with incremental_continuous_bound_search - self.log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") - I_app_hold = -self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_v_rest_arr_const( - pop_name=pop_name, - obtained_variables=obtained_variables, - I_app=-X_val, - ), - y_bound=0, - X_0=0, - y_0=self.get_v_rest_arr_const( - pop_name=pop_name, - obtained_variables=obtained_variables, - I_app=0, - ), - X_increase=detla_v_rest, - accept_non_dicontinuity=True, - bound_type="greater", - ) - ### again simulate the neuron with the obtained I_app_hold to get the new v_rest - v_rest_arr = self.get_new_v_rest_2000( - pop_name, obtained_variables, I_app=I_app_hold - ) - v_rest = v_rest_arr[-1] - self.log(f"I_app_hold = {I_app_hold}, resulting v_rest = {v_rest}") - - ### get the sampler for the initial variables - variable_init_sampler = self.get_init_neuron_variables_for_psp( - net=self.net_single_dict[pop_name]["net"], - pop=self.net_single_dict[pop_name]["population"], - v_rest=v_rest, - I_app_hold=I_app_hold, + v_rest_arr_is_const = ( + np.std(v_rest_arr) <= np.mean(np.absolute(v_rest_arr)) / 1000 ) + return v_rest_arr_is_const, v_rest_arr - return { - "v_rest": v_rest, - "I_app_hold": I_app_hold, - "variable_init_sampler": variable_init_sampler, - } - - def get_v_rest_arr_const( - self, pop_name, obtained_variables, I_app, return_bool=False + def _get_v_change_after_v_rest( + self, pop_name: str, variables_v_rest: dict, I_app: float ): """ - sets I_app and obtained varaibles in single neuron - simulates 2000 ms and returns how much the v changes - 0 = constant, negative = not constant - """ - v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables, I_app=I_app) - v_rest_arr = v_rest_arr[len(v_rest_arr) // 2 :] + Check how much the membrane potential changes after setting it to v_rest. - if return_bool: - return 0 <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( - v_rest_arr, axis=0 - ) - else: - return np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( - v_rest_arr, axis=0 - ) + Args: + pop_name (str): + Name of the population + variables_v_rest (dict): + Stady state variables of the neuron during setting v_rest as membrane + potential, used as initial variables for the simulation + do_plot (bool): + True if plots should be created, False otherwise + + Returns: + change_after_v_rest (np.array): + Change of the membrane potential after setting it to v_rest + """ + ### simulate 2000 ms after setting v_rest + v_rest_arr = self._simulator.get_v_2000( + pop_name=pop_name, + initial_variables=variables_v_rest, + I_app=I_app, + do_plot=False, + ) + ### check how much v changes during the second half + ### std(v) - mean(v)/1000 should be close to 0, the larger the value the more v + ### changes + change_after_v_rest = ( + np.std(v_rest_arr[len(v_rest_arr) // 2 :], axis=0) + - np.mean(np.absolute(v_rest_arr[len(v_rest_arr) // 2 :]), axis=0) / 1000 + ) + return change_after_v_rest - def get_new_v_rest_2000( - self, pop_name, obtained_variables, I_app=None, do_plot=True + +class Simulator: + """ + Class with simulations for the single neuron networks. + """ + + def __init__( + self, + single_nets: CreateSingleNeuronNetworks, + figure_folder: str, + prepare_psp: PreparePSP | None = None, ): """ - use single_net to simulate 2000 ms and return v + Args: + single_nets (CreateSingleNeuronNetworks): + Single neuron networks for normal and voltage clamp mode + figure_folder (str): + Folder where the figures should be saved + prepare_psp (PreparePSP): + Prepare PSP information + """ + self._single_nets = single_nets + self._prepare_psp = prepare_psp + self._figure_folder = figure_folder + + def get_v_clamp_2000( + self, + pop_name: str, + v: float | None = None, + I_app: float | None = None, + ) -> float: + """ + Simulates the v_clamp single neuron network of the given pop_name for 2000 ms + and returns the v_clamp_rec value of the single neuron after 2000 ms. The + returned values is "dv/dt". Therefore, to get the hypothetical change of v for a + single time step multiply it with dt! + + Args: + pop_name (str): + Name of the population + v (float): + Membrane potential (does not change over time due to voltage clamp) + I_app (float): + Applied current + + Returns: + v_clamp_rec (float): + v_clamp_rec value of the single neuron after 2000 ms + """ + ### get the network, population, init_sampler + net = self._single_nets.single_net(pop_name=pop_name, mode="v_clamp").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).population + init_sampler = self._single_nets.single_net( + pop_name=pop_name, mode="v_clamp" + ).init_sampler + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### set v and I_app + if v is not None: + population.v = v + if I_app is not None: + population.I_app = I_app + ### simulate 2000 ms + net.simulate(2000) + ### return the v_clamp_rec value of the single neuron after 2000 ms + return population.v_clamp_rec[0] + + def get_v_2000( + self, pop_name, initial_variables, I_app=None, do_plot=False + ) -> np.ndarray: """ - net = self.net_single_dict[pop_name]["net"] - pop = self.net_single_dict[pop_name]["population"] - monitor = self.net_single_dict[pop_name]["monitor"] + Simulate normal single neuron 2000 ms and return v for this duration. + + Args: + pop_name (str): + Name of the population + initial_variables (dict): + Initial variables of the neuron model + I_app (float): + Applied current + do_plot (bool): + If True, plot the membrane potential + + Returns: + v_arr (np.array): + Membrane potential for the 2000 ms simulation with shape: (time_steps,) + """ + ### get the network, population, monitor + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + monitor = self._single_nets.single_net(pop_name=pop_name, mode="normal").monitor + ### reset network net.reset() - ### set variables - for var_name, var_val in obtained_variables.items(): - if var_name in pop.variables: - setattr(pop, var_name, var_val) - if not isinstance(I_app, type(None)): - pop.I_app = I_app + net.set_seed(0) + ### set the initial variables of the neuron model + for var_name, var_val in initial_variables.items(): + if var_name in population.variables: + setattr(population, var_name, var_val) + ### set I_app + if I_app is not None: + population.I_app = I_app ### simulate net.simulate(2000) v_arr = monitor.get("v")[:, 0] if do_plot: plt.figure() - plt.title(f"{pop.I_app}") + plt.title(f"{population.I_app}") plt.plot(v_arr) plt.savefig(f"tmp_v_rest_{pop_name}.png") plt.close("all") return v_arr - def get_nr_spikes_from_v_rest_2000( - self, pop_name, obtained_variables, I_app=None, do_plot=True - ): + def get_v_psp(self, v_rest: float, I_app_hold: float, pop_name: str) -> np.ndarray: """ - use single_net to simulate 2000 ms and return number spikes + Simulate the single neuron network of the given pop_name for 5000 ms and return + the variables of the neuron model after 5000 ms. + + Args: + v_rest (float): + Resting potential + I_app_hold (float): + Applied current to hold the resting potential + pop_name (str): + Name of the population + + Returns: + var_arr (np.array): + Variables of the neuron model after 5000 ms with shape: (1, n_vars) """ - net = self.net_single_dict[pop_name]["net"] - pop = self.net_single_dict[pop_name]["population"] - mon = self.net_single_dict[pop_name]["monitor"] + + ### get the network, population, monitor + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + ### reset network net.reset() - ### set variables - for var_name, var_val in obtained_variables.items(): - if var_name in pop.variables: - setattr(pop, var_name, var_val) - if not isinstance(I_app, type(None)): - pop.I_app = I_app + net.set_seed(0) + ### set the initial variables of the neuron model + population.v = v_rest + population.I_app = I_app_hold ### simulate - simulate(2000) - ### get spikes - spike_dict = mon.get("spike") - nr_spikes = len(spike_dict[0]) - return nr_spikes - - def log(self, txt): - caller_frame = inspect.currentframe().f_back - caller_name = caller_frame.f_code.co_name - - if caller_name == self.caller_name: - txt = f"{textwrap.indent(str(txt), ' ')}" - else: - txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" - - self.caller_name = caller_name - - if self.log_exist: - with open("model_conf_log", "a") as f: - print(txt, file=f) - else: - with open("model_conf_log", "w") as f: - print(txt, file=f) - self.log_exist = True + net.simulate(5000) + ### get the variables of the neuron after 5000 ms in the shape (1, n_vars) + var_name_list = list(population.variables) + var_arr = np.zeros((1, len(var_name_list))) + get_arr = np.array( + [getattr(population, var_name) for var_name in population.variables] + ) + var_arr[0, :] = get_arr[:, 0] + return var_arr - def _p_g(self, txt): - """ - prints guiding text + def get_ipsp( + self, + pop_name: str, + g_ampa: float = 0, + g_gaba: float = 0, + do_plot: bool = False, + ): """ - print_width = min([os.get_terminal_size().columns, 80]) + Simulate the single neuron network of the given pop_name for max 5000 ms. The + neuron is hold at the resting potential by setting the applied current to + I_app_hold. Then the conductances g_ampa and g_gaba are applied (simulating a + single incoming ampa/gaba spike). The maximum of the (negative) difference of + the membrane potential and the resting potential is returned as the IPSP. - if self.print_guide: - print("\n[model_configurator guide]:") - for line in txt.splitlines(): - wrapped_text = textwrap.fill( - line, width=print_width - 5, replace_whitespace=False - ) - wrapped_text = textwrap.indent(wrapped_text, " |") - print(wrapped_text) - print("") + Args: + pop_name (str): + Name of the population + g_ampa (float): + Conductance of the ampa synapse + g_gaba (float): + Conductance of the gaba synapse + do_plot (bool): + If True, plot the membrane potential - def _p_w(self, txt): - """ - prints warning - """ - print_width = min([os.get_terminal_size().columns, 80]) + Returns: + psp (float): + Maximum of the (negative) difference of the membrane potential and the + resting potential + """ + ### get the network, population, monitor from single nets + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + monitor = self._single_nets.single_net(pop_name=pop_name, mode="normal").monitor + ### get init_sampler, I_app_hold from prepare_psp + init_sampler = self._prepare_psp.get(pop_name=pop_name).psp_init_sampler + I_app_hold = self._prepare_psp.get(pop_name=pop_name).I_app_hold + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### set I_app (I_app_hold) to hold the resting potential + population.I_app = I_app_hold + ### simulate 50 ms initial duration + net.simulate(50) + ### get the current v and set it as v_psp_thresh for the population's stop + ### condition + v_rec_rest = population.v[0] + population.v_psp_thresh = v_rec_rest + ### apply given conductances --> changes v, causes psp + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate until v is near v_rec_rest again or until 5000 ms + net.simulate_until(max_duration=5000, population=population) + ### get v and spike dict to calculate psp + v_rec = monitor.get("v")[:, 0] + spike_dict = monitor.get("spike") + ### if neuron spiked only check psps until spike time, otherwise until last + ### (current) time step + spike_timestep_list = spike_dict[0] + [net.get_current_step()] + end_timestep = int(round(min(spike_timestep_list), 0)) + ### find ipsp + ### 1st calculate difference of v and v_rest + v_diff = v_rec[:end_timestep] - v_rec_rest + ### clip diff between None and zero, only take negative values (ipsp) + v_diff = np.clip(v_diff, None, 0) + ### add a small value to the clipped values, thus only large enough negative + ### values considered as ipsp + v_diff = v_diff + 0.01 + ### get the minimum of the difference as ipsp + psp = np.min(v_diff) + ### multiply with -1 to get the positive value of the ipsp + psp = -1 * psp - print("\n[model_configurator WARNING]:") - for line in str(txt).splitlines(): - wrapped_text = textwrap.fill( - line, width=print_width - 5, replace_whitespace=False + if do_plot: + plt.figure() + plt.title( + f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" + ) + plt.plot(v_rec[:end_timestep]) + plt.plot([0, end_timestep], [v_rec_rest, v_rec_rest], "k--") + plt.xlim(0, end_timestep) + plt.tight_layout() + plt.savefig( + f"{self._figure_folder}/tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" ) - wrapped_text = textwrap.indent(wrapped_text, " |") - print(wrapped_text) - print("") + plt.close("all") - def get_base(self): + return psp + + def get_firing_rate( + self, pop_name: str, I_app: float = 0, g_ampa: float = 0, g_gaba: float = 0 + ): """ - Obtain the baseline currents for the configured populations to obtian the target firing rates - with the currently set weights, set by .set_weights or .set_syn_load - - return: - I_base_dict, dict - Dictionary with baseline curretns for all configured populations. - """ - - ### create many neuron network - net_many_dict = self.create_many_neuron_network() - - ### use voltage clamp networks and many neuron networks to get baseline currents - I_base_dict = {} - target_firing_rate_changed = True - nr_max_iter = 1 - nr_iter = 0 - while target_firing_rate_changed and nr_iter < nr_max_iter: - ### get baseline current values, if target firing rates could not - ### be reached, try again with new target firing rates - ( - target_firing_rate_changed, - I_base_dict, - ) = self.find_base_current(net_many_dict) - nr_iter += 1 - - return I_base_dict - - def find_base_current(self, net_many_dict): - """ - search through whole I_app space - for each population simulate a network with 10000 neurons, each neuron has a different I_app value - g_ampa and g_gaba values are internally created using - the weigths stored in the afferent_projection dict - and target firing rates stored in the target_firing_rate_dict - """ - - I_app_arr_list = [] - weight_list_list = [] - pre_pop_name_list_list = [] - rate_list_list = [] - eff_size_list_list = [] - ### get lists which define the current weights to the afferent populations - ### get lists which define the current rates of the afferent populations - ### get lists with the names of the afferent populations - ### the length of the lists has to be the number of networks i.e. the number of populations - for pop_name in self.pop_name_list: - ### get the weights, names, rates of the afferent populations - weight_list = self.afferent_projection_dict[pop_name]["weights"] - proj_name_list = self.afferent_projection_dict[pop_name]["projection_names"] - pre_pop_name_list = [ - self.get_proj_dict(proj_name)["pre_pop_name"] - for proj_name in proj_name_list - ] - rate_list = self.get_rate_list_for_pop(pop_name) - eff_size_list = self.get_eff_size_list_for_pop(pop_name) - ### get correct magnitude of I_app using the voltage clamp networks - I_app_magnitude = self.get_I_app_magnitude( - pop_name, - pre_pop_name_list=pre_pop_name_list, - eff_size_list=eff_size_list, - rate_list=rate_list, - weight_list=weight_list, - ) - ### get the I_app_arr - I_app_arr = np.linspace( - I_app_magnitude, - I_app_magnitude + self.I_app_max_dict[pop_name], - self.nr_neurons_per_net, - ) - ### append these lists to the list for all post populations i.e. networks - weight_list_list.append(weight_list) - pre_pop_name_list_list.append(pre_pop_name_list) - rate_list_list.append(rate_list) - eff_size_list_list.append(eff_size_list) - I_app_arr_list.append(I_app_arr) - - ### create list with variable_init_samplers of populations - variable_init_sampler_list = [ - self.net_single_dict[pop_name]["variable_init_sampler"] - for pop_name in self.pop_name_list - ] + Simulate the single neuron network of the given pop_name for 500 ms initial + duration and 5000 ms. An input current I_app and the conductances g_ampa and + g_gaba are applied. The firing rate is calculated from the spikes in the last + 5000 ms. - ### get firing rates obtained with all I_app values - ### rates depend on the current weights and the current target firing rates - nr_networks = len(self.pop_name_list) - possible_firing_rates_list_list = parallel_run( - method=get_rate_parallel, - networks=net_many_dict["network_list"], - **{ - "population": net_many_dict["population_list"], - "variable_init_sampler": variable_init_sampler_list, - "monitor": net_many_dict["monitor_list"], - "I_app_arr": I_app_arr_list, - "weight_list": weight_list_list, - "pre_pop_name_list": pre_pop_name_list_list, - "rate_list": rate_list_list, - "eff_size_list": eff_size_list_list, - "simulation_dur": [self.simulation_dur] * nr_networks, - }, - ) + Args: + pop_name (str): + Name of the population + I_app (float, optional): + Applied current + g_ampa (float, optional): + Conductance of the ampa synapse + g_gaba (float, optional): + Conductance of the gaba synapse - ### catch if target firing rate in any population cannot be reached - I_app_best_dict = {} - target_firing_rate_changed = False - for pop_idx, pop_name in enumerate(self.pop_name_list): - target_firing_rate = self.target_firing_rate_dict[pop_name] - possible_firing_rates_arr = np.array( - possible_firing_rates_list_list[pop_idx] - ) - I_app_arr = I_app_arr_list[pop_idx] - print(f"firing rates for pop {pop_name}") - print(f"{I_app_arr}") - print(f"{possible_firing_rates_arr}\n") - possible_f_min = possible_firing_rates_arr.min() - possible_f_max = possible_firing_rates_arr.max() - if not ( - target_firing_rate >= possible_f_min - and target_firing_rate <= possible_f_max - ): - new_target_firing_rate = np.array([possible_f_min, possible_f_max])[ - np.argmin( - np.absolute( - np.array([possible_f_min, possible_f_max]) - - target_firing_rate - ) - ) - ] - ### if the possible firing rates are too small --> what (high) firing rate could be maximally reached with a hypothetical g_ampa_max and I_app_max - ### if the possible firing rates are too large --> waht (low) firing rate could be reached with g_gaba_max and -I_app_max - warning_txt = f"WARNING get_possible_rates: target firing rate of population {pop_name}({target_firing_rate}) cannot be reached.\nPossible range with current synaptic load: [{round(possible_f_min,1)},{round(possible_f_max,1)}].\nSet firing rate to {round(new_target_firing_rate,1)}." - self._p_w(warning_txt) - self.log(warning_txt) - self.target_firing_rate_dict[pop_name] = new_target_firing_rate - target_firing_rate = self.target_firing_rate_dict[pop_name] - target_firing_rate_changed = True - ### find best I_app for reaching target firing rate - best_idx = np.argmin( - np.absolute(possible_firing_rates_arr - target_firing_rate) - ) - ### take all possible firing rates in range target firing rate +-10 - lower_rate = max([0, target_firing_rate - 10]) - higher_rate = target_firing_rate + 10 - rate_range_idx_arr = ( - (possible_firing_rates_arr >= lower_rate).astype(int) - * (possible_firing_rates_arr <= higher_rate).astype(int) - ).astype(bool) - possible_firing_rates_arr = possible_firing_rates_arr[rate_range_idx_arr] - I_app_arr = I_app_arr[rate_range_idx_arr] - ### now do linear fit to find I_app for target firing rate - if len(I_app_arr) > 10: - reg = LinearRegression().fit( - X=possible_firing_rates_arr.reshape(-1, 1), y=I_app_arr - ) - I_app_best_dict[pop_name] = reg.predict( - np.array([[target_firing_rate]]) - )[0] - else: - I_app_best_dict[pop_name] = 0 - plt.figure(figsize=(6.4, 4.8 * 2)) - plt.subplot(211) - plt.plot(I_app_arr, possible_firing_rates_arr) - plt.axhline(target_firing_rate, color="k") - plt.axvline(I_app_best_dict[pop_name], color="r") - plt.subplot(212) - plt.plot( - I_app_arr, np.absolute(possible_firing_rates_arr - target_firing_rate) - ) - plt.tight_layout() - plt.savefig(f"possible_firing_rate_{pop_name}.png", dpi=300) - plt.close("all") + Returns: + rate (float): + Firing rate in Hz + """ + + ### get the network, population, monitor, init_sampler from single nets + net = self._single_nets.single_net(pop_name=pop_name, mode="normal").net + population = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).population + monitor = self._single_nets.single_net(pop_name=pop_name, mode="normal").monitor + init_sampler = self._single_nets.single_net( + pop_name=pop_name, mode="normal" + ).init_sampler + ### reset network + net.reset() + net.set_seed(0) + ### set the initial variables of the neuron model + if init_sampler is not None: + init_sampler.set_init_variables(population) + ### slow down conductances (i.e. make them constant) + population.tau_ampa = 1e20 + population.tau_gaba = 1e20 + ### apply given variables + population.I_app = I_app + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate 500 ms initial duration + 5000 ms + net.simulate(500 + 5000) + ### get rate for the last 5000 ms + spike_dict = monitor.get("spike") + time_list = np.array(spike_dict[0]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (5000 / 1000) - if target_firing_rate_changed and False: - print_df(pd.DataFrame(self.afferent_projection_dict)) - print_df(pd.DataFrame(self.g_max_dict)) - ### TODO cannot reach firing rates for example for thal because I_app_max is too small, this +100Hz method seems not to work well - ### maybe use the weights and a voltage clamp neuron to find I_app - ### like with I_app_hold - ### weights i.e. spike trains cause dv/dt to be e.g. extremely negative --> then find I_app to make dv/dt zero - ### this I_app should then be "near" the I_app needed to reach the target firing rate - quit() + return rate - return [target_firing_rate_changed, I_app_best_dict] - def get_I_app_magnitude( +class ModelConfigurator: + def __init__( self, - pop_name, - pre_pop_name_list=[], - eff_size_list=[], - rate_list=[], - weight_list=[], + model: CompNeuroModel, + target_firing_rate_dict: dict, + max_psp: float = 10.0, + do_not_config_list: list[str] = [], + print_guide: bool = False, + I_app_variable: str = "I_app", + cache: bool = False, + clear_cache: bool = False, + log_file: str | None = None, ): - """ - Get the correct magnitude of I_app for the given population. - The correct magnitude is the magnitude which is to negate the synaptic currents caused by the afferent populations. - Use the curretn weights and rates from the afferent_projection_dict and target_firing_rate_dict. - """ - print(f"get v clamp of {pop_name}") - print(f"pre_pop_name_list: {pre_pop_name_list}") - print(f"eff_size_list: {eff_size_list}") - print(f"rate_list: {rate_list}") - print(f"weight_list: {weight_list}") - print(f"I_app_hold: {self.prepare_psp_dict[pop_name]['I_app_hold']}") - print(f"v_rest: {self.prepare_psp_dict[pop_name]['v_rest']}") - - detla_v_rest_0 = ( - self.get_v_clamp_2000( - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], - v=None, - I_app=0, - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - pre_pop_name_list=pre_pop_name_list, - eff_size_list=eff_size_list, - rate_list=rate_list, - weight_list=weight_list, - return_1000=True, - ) - * dt() + ### store the given variables + self._model = model + self._do_not_config_list = do_not_config_list + self._target_firing_rate_dict = target_firing_rate_dict + self._base_dict = None + self._figure_folder = "model_conf_figures" ### TODO add this to figures + ### create the figure folder + sf.create_dir(self._figure_folder) + ### initialize logger + sf.Logger(log_file=log_file) + ### analyze the given model, create model before analyzing, then clear ANNarchy + self._analyze_model = AnalyzeModel(model=self._model) + ### create the CompNeuroModel object for the reduced model (the model itself is + ### not created yet) + self._model_reduced = CreateReducedModel( + model=self._model, + analyze_model=self._analyze_model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, ) - - if detla_v_rest_0 > 0: - I_app_sign = -1 + ### try to load the cached variables + if clear_cache: + sf.clear_dir(".model_config_cache") + cache_worked = False + if cache: + try: + ### load the cached variables + cache_loaded = sf.load_variables( + name_list=["init_sampler", "max_syn"], + path=".model_config_cache", + ) + cache_worked = True + except FileNotFoundError: + pass + ### create the single neuron networks (networks are compiled and ready to be + ### simulated), normal model for searching for max conductances, max input + ### current, resting firing rate; voltage clamp model for preparing the PSP + ### simulationssearching, i.e., for resting potential and corresponding input + ### current I_hold (for self-active neurons) + if not cache_worked: + self._single_nets = CreateSingleNeuronNetworks( + model=self._model, + analyze_model=self._analyze_model, + do_not_config_list=do_not_config_list, + ) + ### get the init sampler for the populations + self._init_sampler = self._single_nets.init_sampler( + model=self._model, do_not_config_list=do_not_config_list + ) + ### create simulator with single_nets + self._simulator = Simulator( + single_nets=self._single_nets, + figure_folder=self._figure_folder, + prepare_psp=None, + ) + else: + self._init_sampler: CreateSingleNeuronNetworks.AllSampler = cache_loaded[ + "init_sampler" + ] + ### get the resting potential and corresponding I_hold for each population using + ### the voltage clamp networks + if not cache_worked: + self._prepare_psp = PreparePSP( + model=self._model, + single_nets=self._single_nets, + do_not_config_list=do_not_config_list, + simulator=self._simulator, + do_plot=False, + figure_folder=self._figure_folder, + ) + self._simulator = Simulator( + single_nets=self._single_nets, + figure_folder=self._figure_folder, + prepare_psp=self._prepare_psp, + ) + ### get the maximum synaptic conductances and input currents for each population + if not cache_worked: + self._max_syn = GetMaxSyn( + model=self._model, + simulator=self._simulator, + do_not_config_list=do_not_config_list, + max_psp=max_psp, + target_firing_rate_dict=target_firing_rate_dict, + ).max_syn_getter else: - I_app_sign = 1 - - self.log("search I_app_magnitude with y(X) = detla_v(I_app=X)") - I_app_magnitude = I_app_sign * self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_v_clamp_2000( - net=self.net_single_v_clamp_dict[pop_name]["net"], - population=self.net_single_v_clamp_dict[pop_name]["population"], - monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], - v=None, - I_app=I_app_sign * X_val, - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" + self._max_syn = cache_loaded["max_syn"] + ### cache single_nets, prepare_psp, max_syn + if cache and not cache_worked: + sf.save_variables( + variable_list=[ + self._init_sampler, + self._max_syn, ], - pre_pop_name_list=pre_pop_name_list, - eff_size_list=eff_size_list, - rate_list=rate_list, - weight_list=weight_list, - return_1000=True, + name_list=["init_sampler", "max_syn"], + path=".model_config_cache", ) - * dt(), - y_bound=0, - X_0=0, - y_0=detla_v_rest_0, - alpha_abs=0.005, + ### get the weights dictionaries + self._weight_dicts = GetWeights( + model=self._model, + do_not_config_list=do_not_config_list, + analyze_model=self._analyze_model, + max_syn=self._max_syn, ) - print(f"I_app_magnitude: {I_app_magnitude}\n") - - return I_app_magnitude - - def get_rate_list_for_pop(self, pop_name): - """ - get the rate list for the afferent populations of the given population + def set_weights(self, weight_dict: dict[str, float]): """ - rate_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_name = proj_dict["pre_pop_name"] - pre_rate = self.target_firing_rate_dict[pre_pop_name] - rate_list.append(pre_rate) - return rate_list + Set the weights of the model. - def get_eff_size_list_for_pop(self, pop_name): - """ - get the effective size list for the afferent populations of the given population + Args: + weight_dict (dict[str, float]): + Dict with the weights for each projection """ - eff_size_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_size = proj_dict["pre_pop_size"] - proj_prob = proj_dict["proj_prob"] - eff_size = int(round(pre_pop_size * proj_prob, 0)) - eff_size_list.append(eff_size) - return eff_size_list + self._weight_dicts.weight_dict = weight_dict + self._check_if_not_config_pops_have_correct_rates() - def set_base(self, I_base_dict=None, I_base_variable="base_mean"): + def set_syn_load( + self, + syn_load_dict: dict[str, float], + syn_contribution_dict: dict[str, dict[str, float]], + ): """ - Set baseline currents in model, compile model and set weights in model. + Set the synaptic load of the model. Args: - I_base_dict: dict, optional, default=None - Dictionary with baseline currents for all populations, if None the baselines are obtained by .get_base - - I_base_variable: str, optional, default="mean_base" - Name of the variable which represents the baseline current in the configured populations. They all have to have the same variable. - """ - ### check I_base_dict - if isinstance(I_base_dict, type(None)): - I_base_dict = self.get_base() - - ### clear annarchy, create model and set baselines and weights - cnp_clear() - self.model.create(do_compile=False) - ### set initial variables of populations - for pop_name in self.pop_name_list: - population = get_population(pop_name) - variable_init_sampler = self.net_single_dict[pop_name][ - "variable_init_sampler" - ] - self.set_init_variables(population, variable_init_sampler) - ### set baselines - for pop_name in I_base_dict.keys(): - get_val = getattr(get_population(pop_name), I_base_variable) - try: - set_val = np.ones(len(get_val)) * I_base_dict[pop_name] - except: - set_val = I_base_dict[pop_name] - setattr(get_population(pop_name), I_base_variable, set_val) - ### compile - self.model.compile() - ### set weights - for pop_name in self.pop_name_list: - for proj_idx, proj_name in enumerate( - self.afferent_projection_dict[pop_name]["projection_names"] - ): - weight_val = self.afferent_projection_dict[pop_name]["weights"][ - proj_idx - ] - get_projection(proj_name).w = weight_val - - return I_base_dict + syn_load_dict (dict[str, float]): + Dict with ampa and gaba synaptic load for each population + syn_contribution_dict (dict[str, dict[str, float]]): + Dict with the contribution of the afferent projections to the ampa and + gaba synaptic load of each population + """ + self._weight_dicts.syn_load_dict = syn_load_dict + self._weight_dicts.syn_contribution_dict = syn_contribution_dict + self._check_if_not_config_pops_have_correct_rates() - def set_init_variables(self, population, variable_init_sampler): + def _check_if_not_config_pops_have_correct_rates(self): """ - Set the initial variables of the given population to the given values. + Check if the populations which should not be configured have the correct firing + rates. """ - variable_init_arr = variable_init_sampler.sample(len(population), seed=0) - var_name_list = variable_init_sampler.var_name_list - for var_name in population.variables: - if var_name in var_name_list: - set_val = variable_init_arr[:, var_name_list.index(var_name)] - setattr(population, var_name, set_val) + ### initialize the normal model + compile the model + self._init_model_with_fitted_base(base_dict=self._base_dict) - def get_time_in_x_sec(self, x): - """ - Args: - x: int - how many seconds add to the current time + ### record spikes of the do_not_config populations + mon = CompNeuroMonitors( + mon_dict={ + pop_name: ["spike"] for pop_name in self._do_not_config_list + } # _model.populations # tmp test + ) + mon.start() + ### simulate the model for 5000 ms + # get_population("stn").I_app = 8 # tmp test + simulate(5000) + + ### get the firing rates + recordings = mon.get_recordings() + for pop_name in self._do_not_config_list: + spike_dict = recordings[0][f"{pop_name};spike"] + t, _ = raster_plot(spike_dict) + spike_count = len(t) + pop_size = len(get_population(pop_name)) + firing_rate = spike_count / (5 * pop_size) + if np.abs(firing_rate - self._target_firing_rate_dict[pop_name]) > 1: + sf.Logger().log( + f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + ) + print( + f"Warning: Population {pop_name} has a firing rate of {firing_rate} instead of {self._target_firing_rate_dict[pop_name]}" + ) - return: - formatted_future_time: str - string of the future time in HH:MM:SS - """ - # Get the current time - current_time = datetime.datetime.now() + # ### tmp plot + # recording_times = mon.get_recording_times() - # Add 10 seconds to the current time - future_time = current_time + datetime.timedelta(seconds=x) + # af.PlotRecordings( + # figname="tmp.png", + # recordings=recordings, + # recording_times=recording_times, + # shape=(len(self._model.populations), 1), + # plan={ + # "position": list(range(1, len(self._model.populations) + 1)), + # "compartment": self._model.populations, + # "variable": ["spike"] * len(self._model.populations), + # "format": ["hybrid"] * len(self._model.populations), + # }, + # ) + # quit() - # Format future_time as HH:MM:SS - formatted_future_time = future_time.strftime("%H:%M:%S") + def set_base(self): + """ + Set the baseline currents of the model, found for the current weights to reach + the target firing rates. The model is compiled after setting the baselines. + """ + ### get the base dict + if self._base_dict is None: + self.get_base() - return formatted_future_time + ### initialize the normal model + set the baselines with the base dict + self._init_model_with_fitted_base(base_dict=self._base_dict) - def get_interpolation(self): + def get_base(self): """ - get the interpolations to - predict f with I_app, g_ampa and g_gaba + Get the baseline currents of the model. - sets the class variable self.f_I_g_curve_dict --> for each population a f_I_g_curve function + Returns: + base_dict (dict[str, float]): + Dict with the baseline currents for each population + """ + ### get the base dict + self._base_dict = GetBase( + model_normal=self._model, + model_reduced=self._model_reduced.model_reduced, + target_firing_rate_dict=self._target_firing_rate_dict, + weight_dicts=self._weight_dicts, + do_not_config_list=self._do_not_config_list, + init_sampler=self._init_sampler, + max_syn=self._max_syn, + ).base_dict + return self._base_dict + + def _init_model_with_fitted_base(self, base_dict: dict[str, float] | None = None): + """ + Initialize the neurons of the model using the init_sampler, set the baseline + currents of the model from the base dict (containing fitted baselines) and the + weights from the weight dicts and compile the model. + """ + ### clear ANNarchy and create the normal model + mf.cnp_clear(functions=False, constants=False) + self._model.create(do_compile=False) + ### set the initial variables of the neurons + for pop_name, init_sampler in self._init_sampler.init_sampler_dict.items(): + init_sampler.set_init_variables(get_population(pop_name)) + ### set the baseline currents + if base_dict is not None: + for pop_name, I_app in base_dict.items(): + setattr(get_population(pop_name), "I_app", I_app) + ### compile the model + self._model.compile() + ### set the weights + for proj_name, weight in self._weight_dicts.weight_dict.items(): + setattr(get_projection(proj_name), "w", weight) + + +class Minimize: + def __init__( + self, func, yt, x0, lb, ub, tol_error, tol_convergence, max_it + ) -> None: """ + Args: + func (Callable): + Function which takes a vector as input and returns a vector as output + target_values (np.array): + Target output vector of the function + x0 (np.array): + Initial input vector + lb (np.array): + Lower bounds of the input vector + ub (np.array): + Upper bounds of the input vector + tol_error (float): + If the error is below this value the optimization stops + tol_convergence (float): + If the change of the error stays below this value the optimization stops + max_it (int): + Maximum number of iterations + """ + ### TODO continue here, I think it works but neuron models explode + x = x0 + x_old = x0 + y = yt + error = np.ones(x0.shape) * 20 + error_old = np.ones(x0.shape) * 20 + it = 0 + search_gradient_diff = np.ones(x0.shape) + alpha = np.ones(x0.shape) + error_list = [] + dx_list = [] + dy_list = [] + x_list = [] + y_list = [] + it_list = [] + + def error_changed(error_list, tol, n=3): + if len(error_list) < 2: + return True + return (np.max(error_list[-n:]) - np.min(error_list[-n:])) > tol + + ### TODO not check if error is small enough but if the change of the error + ### converges, for this, check the mean of the last 10 error changes + while it < max_it and error_changed(error_list, tol_convergence): + print("\n\nnext iteration") + y_old = y + y = func(x) + dx_list.append(x - x_old) + dy_list.append(y - y_old) + ### TODO if x did not change much, use the previous gradient again + print(f"x: {x}") + print(f"y: {y}") + x_list.append(x) + y_list.append(y) + it_list.append(it) + ### here we know the new y(x) + ### check if the error sign changed + error_old = error + error = yt - y + ### if error is small enough stop the optimization + if np.all(np.abs(error) < tol_error): + break + error_sign_changed = np.sign(error) != np.sign(error_old) + print(f"error_sign_changed: {error_sign_changed}") + ### get how much the error (in total, not for individual inputs) changed + error_list.append(np.mean(np.abs(error))) + print(f"error_list: {error_list}\n") + ### if the error sign changed: + ### - TODO check if error is larger as before, if yes -> use again the previous x, if use previous x also compute current y + ### - we calculate (as usual) a new gradient + ### - we reduce alpha, so this time the step is smaller + error_increased = np.abs(error) > np.abs(error_old) + x[error_sign_changed & error_increased] = x_old[ + error_sign_changed & error_increased + ] + if np.any(error_sign_changed & error_increased): + y = func(x) + + # TODO I do not understand this example, this message was printed but x did not change + # next iteration + # x: [12.56441496 40.92615539 18.96717589 90.30010779] + # y: [30.00888889 59.99777778 50.01333333 96.85333333] + # error_sign_changed: [False False False False] + # error_list: [23.759444444444448, 2.517777777777779, 78.90388888888889, 22.96944444444445] + + # x_plus: [13.56441496 40.92615539 18.96717589 90.30010779] + # y_plus: [32.22222222 60.10888889 50.08666667 97.41111111] + + # x_plus: [12.56441496 42.92615539 18.96717589 90.30010779] + # y_plus: [30.00888889 62.06666667 50.01333333 91.96666667] + + # x_plus: [12.56441496 40.92615539 19.96717589 90.30010779] + # y_plus: [30.00888889 59.89333333 51.14666667 96.79333333] + + # x_plus: [ 12.56441496 40.92615539 18.96717589 132.96677446] + # y_plus: [ 30.00888889 59.99777778 50.01333333 214.46666667] + + # delta_y: [-8.88888889e-03 2.22222222e-03 -1.33333333e-02 -9.18533333e+01] + # grad: + # [[ 2.21333333e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00] + # [ 1.11111111e-01 2.06888889e+00 -1.04444444e-01 0.00000000e+00] + # [ 7.33333333e-02 0.00000000e+00 1.13333333e+00 0.00000000e+00] + # [ 5.57777778e-01 -4.88666667e+00 -6.00000000e-02 1.17613333e+02]] + # Solution vector x: [-4.01606426e-03 7.08996344e-04 -1.15048429e-02 -7.80934579e-01] + # delta_y from solution: [-8.88888889e-03 2.22222222e-03 -1.33333333e-02 -9.18533333e+01] + + # next iteration + # x: [12.56200532 40.92757338 18.96027299 76.97215765] + # y: [30.01333333 60.00444444 50.00444444 61.82444444] + # error_sign_changed: [False True False False] + # error_list: [23.759444444444448, 2.517777777777779, 78.90388888888889, 22.96944444444445, 14.211666666666666] + + # some errors changed sign and increased + # x: [12.56200532 40.92615539 18.96027299 76.97215765] + # y: [30.01333333 60.02 50.00444444 61.86 ] + + print("some errors changed sign and increased") + print(f"x: {x}") + print(f"y: {y}\n") + x_list.append(x) + y_list.append(y) + it_list.append(it) + alpha[error_sign_changed] /= 2 + alpha[~error_sign_changed] += (1 - alpha[~error_sign_changed]) / 5 + ### calculate the gradient i.e. change of the output values for each input + grad = np.zeros((yt.shape[0], x0.shape[0])) + for i in range(len(x0)): + ### search for the gradient of the i-th input, increase the stepwidth + ### which is used to calculate the gradient if the gradient for the + ### associated output value is below 1 + while grad[i, i] < 1: + x_plus = x.copy() + ### change only the currently selected input whose gradient should be + ### calculated + x_plus[i] += search_gradient_diff[i] + y_plus = func(x_plus) + print(f"x_plus: {x_plus}") + print(f"y_plus: {y_plus}\n") + grad[:, i] = y_plus - y + ### if gradient is above 10 reduce the search gradient diff + if grad[i, i] >= 10: + search_gradient_diff[i] /= 1.5 + ### if gradient is below 1 increase the search gradient diff + elif grad[i, i] < 1: + search_gradient_diff[i] *= 2 + ### calculate the wanted change of the output values + delta_y = yt - y + print(f"delta_y: {delta_y}") + print(f"grad:\n{grad}") + + # Example coefficient matrix A (m x n matrix) + A = grad + + # Right-hand side vector b (m-dimensional vector) + b = delta_y + + # Solve the system using least squares method + solution, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None) + + # Output the results + print("Solution vector x:", solution) + + # Calculate the matrix-vector product Ax + Ax = np.dot(A, solution) + + # Output the matrix-vector product and compare with b + print("delta_y from solution:", Ax) + + ### solution contains the info how much each input should change (how many + ### times the change of gradient is needed to reach the target values) + x_old = x + x = x + solution * search_gradient_diff * alpha + it += 1 + + self.x = x + self.success = np.all(np.abs(error) < tol_error) + + x_arr = np.array(x_list) + y_arr = np.array(y_list) + it_arr = np.array(it_list) + + plt.close("all") + plt.figure() + for idx in range(4): + ax = plt.subplot(4, 1, idx + 1) + ### plot the x values + plt.plot(it_arr, x_arr[:, idx]) + plt.ylabel(f"x{idx}") + ### second y axis on the right for the y values + ax2 = ax.twinx() + ax2.plot(it_arr, y_arr[:, idx], color="red") + ax2.set_ylabel(f"y{idx}", color="red") + plt.xlabel("iteration") + plt.tight_layout() + plt.savefig("optimization.png") - ### create model - net_many_dict = self.create_many_neuron_network() + plt.close("all") + plt.figure() + dx_arr = x_arr[1:] - x_arr[:-1] + dx_ausgehend_von = x_arr[:-1] + dy_arr = y_arr[1:] - y_arr[:-1] + dy_ausgehend_von = y_arr[:-1] + for idx in range(4): + ax = plt.subplot(4, 1, idx + 1) + ### plot the x values + plt.plot(dx_ausgehend_von[:, idx], dy_arr[:, idx] / dx_arr[:, idx]) + plt.ylabel(f"dy{idx}/dx{idx}") + plt.xlabel("x") + plt.tight_layout() + plt.savefig("dy_dx_asugehend_x.png") - ### get interpolation data - txt = "get interpolation data..." - print(txt) - self.log(txt) - ### for each population get the input arrays for I_app, g_ampa and g_gaba - ### while getting inputs define which values should be used later - input_dict = self.get_input_for_many_neurons_net() - ### create list with variable_init_samplers of populations - variable_init_sampler_list = [ - self.net_single_dict[pop_name]["variable_init_sampler"] - for pop_name in self.pop_name_list +class GetBase: + def __init__( + self, + model_normal: CompNeuroModel, + model_reduced: CompNeuroModel, + target_firing_rate_dict: dict, + weight_dicts: "GetWeights", + do_not_config_list: list[str], + init_sampler: CreateSingleNeuronNetworks.AllSampler, + max_syn: "GetMaxSyn.MaxSynGetter", + ): + self._model_normal = model_normal + self._model_reduced = model_reduced + self._weight_dicts = weight_dicts + self._do_not_config_list = do_not_config_list + self._init_sampler = init_sampler + self._max_syn = max_syn + ### get the populations names of the configured populations + self._pop_names_config = [ + pop_name + for pop_name in model_normal.populations + if pop_name not in do_not_config_list ] + ### convert the target firing rate dict to an array + self._target_firing_rate_arr = [] + print(self._pop_names_config) + for pop_name in self._pop_names_config: + self._target_firing_rate_arr.append(target_firing_rate_dict[pop_name]) + self._target_firing_rate_arr = np.array(self._target_firing_rate_arr) + ### get the base currents + self._prepare_get_base() + self._base_dict = self._get_base() + + @property + def base_dict(self): + return self._base_dict + + def _set_model_weights(self): + ### loop over all populations which should be configured + for pop_name in self._pop_names_config: + ### loop over all target types + for target_type in ["ampa", "gaba"]: + ### get afferent projections of the corresponding target type + afferent_projection_list = self._weight_dicts._get_afferent_proj_names( + pop_name=pop_name, target=target_type + ) + ### loop over all afferent projections + for proj_name in afferent_projection_list: + ### set weight of the projection in the conductance-calculating + ### input current population + proj_weight = self._weight_dicts.weight_dict[proj_name] + setattr( + get_population(f"{pop_name}_{target_type}_aux"), + f"weights_{proj_name}", + proj_weight, + ) - ### run the run_parallel with a reduced simulation duration and obtain a time estimate for the full duration - ### TODO use directly measureing simulation time to get time estimate - start = time() - parallel_run( - method=get_rate_parallel, - number=self.nr_networks, - **{ - "pop_name_list": [self.pop_name_list] * self.nr_networks, - "population_list": [list(net_many_dict["population_dict"].values())] - * self.nr_networks, - "variable_init_sampler_list": [variable_init_sampler_list] - * self.nr_networks, - "monitor_list": [list(net_many_dict["monitor_dict"].values())] - * self.nr_networks, - "I_app_list": input_dict["I_app_list"], - "g_ampa_list": input_dict["g_ampa_list"], - "g_gaba_list": input_dict["g_gaba_list"], - "simulation_dur": [dt()] * self.nr_networks, - }, - ) - reset() - end = time() - offset_time = end - start - start = time() - parallel_run( - method=get_rate_parallel, - number=self.nr_networks, - **{ - "pop_name_list": [self.pop_name_list] * self.nr_networks, - "population_list": [list(net_many_dict["population_dict"].values())] - * self.nr_networks, - "variable_init_sampler_list": [variable_init_sampler_list] - * self.nr_networks, - "monitor_list": [list(net_many_dict["monitor_dict"].values())] - * self.nr_networks, - "I_app_list": input_dict["I_app_list"], - "g_ampa_list": input_dict["g_ampa_list"], - "g_gaba_list": input_dict["g_gaba_list"], - "simulation_dur": [self.simulation_dur_estimate_time] - * self.nr_networks, - }, - ) - reset() - end = time() - time_estimate = np.clip( - round( - (end - start - offset_time) - * (self.simulation_dur / self.simulation_dur_estimate_time), - 0, - ), - 0, - None, + def _prepare_get_base(self): + ### clear ANNarchy + mf.cnp_clear(functions=False, constants=False) + ### create and compile the model + self._model_reduced.create() + ### create monitors for recording the spikes of all populations + ### for CompNeuroMonitors we need the "_reduced" suffix + mon = CompNeuroMonitors( + mon_dict={ + f"{pop_name}_reduced": ["spike"] + for pop_name in self._model_normal.populations + } ) + ### create the experiment + self._exp = self.MyExperiment(monitors=mon) + ### initialize all populations with the init sampler + for pop_name in self._pop_names_config: + ### for get_population we need the "_reduced" suffix + self._init_sampler.get(pop_name=pop_name).set_init_variables( + get_population(f"{pop_name}_reduced") + ) + ### set the model weights + self._set_model_weights() + ### store the model state for all populations + self._exp.store_model_state(compartment_list=self._model_reduced.populations) + ### set lower and upper bounds and initial guess + self._lb = [] + self._ub = [] + self._x0 = [] + for pop_name in self._pop_names_config: + self._lb.append(-self._max_syn.get(pop_name=pop_name).I_app) + self._ub.append(self._max_syn.get(pop_name=pop_name).I_app) + self._x0.append(0.0) + + def _get_base(self): + """ + Perform the optimization to find the base currents for the target firing rates. - txt = f"start parallel_run of many neurons network on {self.nr_networks} threads, will take approx. {time_estimate} s (end: {self.get_time_in_x_sec(x=time_estimate)})..." - print(txt) - self.log(txt) - ### simulate the many neurons network with the input arrays splitted into the network populations sizes - ### and get the data of all populations - ### run_parallel - start = time() - f_rec_arr_list_list = parallel_run( - method=get_rate_parallel, - number=self.nr_networks, - **{ - "pop_name_list": [self.pop_name_list] * self.nr_networks, - "population_list": [list(net_many_dict["population_dict"].values())] - * self.nr_networks, - "variable_init_sampler_list": [variable_init_sampler_list] - * self.nr_networks, - "monitor_list": [list(net_many_dict["monitor_dict"].values())] - * self.nr_networks, - "I_app_list": input_dict["I_app_list"], - "g_ampa_list": input_dict["g_ampa_list"], - "g_gaba_list": input_dict["g_gaba_list"], - "simulation_dur": [self.simulation_dur] * self.nr_networks, - }, - ) - end = time() - txt = f"took {end-start} s" - print(txt) - self.log(txt) - - ### combine the list of outputs from parallel_run to one output per population - output_of_populations_dict = self.get_output_of_populations( - f_rec_arr_list_list, input_dict + Returns: + base_dict (dict): + Dict with the base currents for each population + """ + + ### Perform the optimization using Minimize class + result = Minimize( + func=self._get_firing_rate, + yt=self._target_firing_rate_arr, + x0=np.array(self._x0), + lb=np.array(self._lb), + ub=np.array(self._ub), + tol_error=1, + tol_convergence=0.1, + max_it=20, ) - ### create interpolation for each population - ### it can be a 1D to 3D interpolation, default (if everything works fine) is - ### 3D interpolation with "x": "I_app", "y": "g_ampa", "z": "g_gaba" - for pop_name in self.pop_name_list: - ### get whole input arrays - I_app_value_array = None - g_ampa_value_array = None - g_gaba_value_array = None - if self.I_app_max_dict[pop_name] > 0: - I_app_value_array = input_dict["I_app_arr_dict"][pop_name] - if self.g_max_dict[pop_name]["ampa"] > 0: - g_ampa_value_array = input_dict["g_ampa_arr_dict"][pop_name] - if self.g_max_dict[pop_name]["gaba"] > 0: - g_gaba_value_array = input_dict["g_gaba_arr_dict"][pop_name] - - ### get the interpolation - self.f_I_g_curve_dict[pop_name] = self.get_interp_3p( - values=output_of_populations_dict[pop_name], - model_conf_obj=self, - var_name_dict={"x": "I_app", "y": "g_ampa", "z": "g_gaba"}, - x=I_app_value_array, - y=g_ampa_value_array, - z=g_gaba_value_array, - ) + optimized_inputs = result.x + if not result.success: + sf.Logger().log("Optimization failed, target firing rates not reached!") + print("Optimization failed, target firing rates not reached!") + base_dict = { + pop_name: optimized_inputs[idx] + for idx, pop_name in enumerate(self._pop_names_config) + } + return base_dict + + def _objective_function_deap(self, population): + """ + Objective function wrapper for the DeapCma optimization. - self.did_get_interpolation = True + Args: + population (list): + List of individuals with input currents for each model population - ### with interpolation get the firing rates for all extreme values of I_app, g_ampa, g_gaba - for pop_name in self.pop_name_list: - self.extreme_firing_rates_df_dict[ - pop_name - ] = self.get_extreme_firing_rates_df(pop_name) + Returns: + loss_list (list): + List of losses for each individual of the population + """ + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = self._objective_function(I_app_list=individual) + loss_list.append((loss_of_individual,)) + return loss_list - def get_extreme_firing_rates_df(self, pop_name): + def _objective_function(self, I_app_list: list[float]): """ - get the firing rates for all extreme values of I_app, g_ampa, g_gaba + Objective function to minimize the difference between the target firing rates and + the firing rates of the model with the given input currents. Args: - pop_name: str - popualtion name - - return: - table_df: pandas dataframe - containing the firing rates for all extreme values of I_app, g_ampa, g_gaba - """ - I_app_list = [-self.I_app_max_dict[pop_name], self.I_app_max_dict[pop_name]] - g_ampa_list = [0, self.g_max_dict[pop_name]["ampa"]] - g_gaba_list = [0, self.g_max_dict[pop_name]["gaba"]] - ### create all combiniations of I_app_list, g_ampa_list, g_gaba_list in a single list - comb_list = self.get_all_combinations_of_lists( - [I_app_list, g_ampa_list, g_gaba_list] - ) + I_app_list (list[float]): + List with the input currents for each population - ### get the firing rates for all combinations - f_list = [] - for I_app, g_ampa, g_gaba in comb_list: - f_list.append( - self.f_I_g_curve_dict[pop_name](x=I_app, y=g_ampa, z=g_gaba)[0] - ) + Returns: + diff (float): + Difference between the target firing rates and the firing rates of the + model with the given input currents + """ + ### get the firing rates of the model with the given input currents + rate_arr = self._get_firing_rate(I_app_list) + ### calculate the difference between the target firing rates and the firing rates + ### of the model with the given input currents + diff = self._target_firing_rate_arr - rate_arr + return np.sum(diff**2) + + def _get_firing_rate(self, I_app_list: list[float]): + ### convert the I_app_list to a dict + I_app_dict = {} + counter = 0 + for pop_name in self._pop_names_config: + ### for the I_app_dict we need the "_reduced" suffix + I_app_dict[f"{pop_name}_reduced"] = I_app_list[counter] + counter += 1 + ### run the experiment + results = self._exp.run(I_app_dict) + ### get the firing rates from the recorded spikes + rate_list = [] + rate_dict = {} + for pop_name in self._pop_names_config: + ### for the spike dict we need the "_reduced" suffix + spike_dict = results.recordings[0][f"{pop_name}_reduced;spike"] + t, _ = raster_plot(spike_dict) + ### only take spikes after the first 500 ms + t = t[t > 500] + nbr_spikes = len(t) + ### divide number of spikes by the number of neurons and the duration in s + rate = nbr_spikes / (4.5 * get_population(f"{pop_name}_reduced").size) + rate_list.append(rate) + rate_dict[pop_name] = rate + # sf.Logger().log(f"I_app_dict: {I_app_dict}") + # sf.Logger().log(f"Firing rates: {rate_dict}") + + # af.PlotRecordings( + # figname="firing_rates.png", + # recordings=results.recordings, + # recording_times=results.recording_times, + # shape=(len(self._model_normal.populations), 1), + # plan={ + # "position": list(range(1, len(self._model_normal.populations) + 1)), + # "compartment": [ + # f"{pop_name}_reduced" for pop_name in self._model_normal.populations + # ], + # "variable": ["spike"] * len(self._model_normal.populations), + # "format": ["hybrid"] * len(self._model_normal.populations), + # }, + # ) + return np.array(rate_list) + + class MyExperiment(CompNeuroExp): + def run(self, I_app_dict: dict[str, float]): + """ + Simulate the model for 5000 ms with the given input currents. - ### now get the same for names - I_app_name_list = ["min", "max"] - g_ampa_name_list = ["min", "max"] - g_gaba_name_list = ["min", "max"] - ### create all combiniations of I_app_name_list, g_ampa_name_list, g_gaba_name_list in a single list - comb_name_list = self.get_all_combinations_of_lists( - [I_app_name_list, g_ampa_name_list, g_gaba_name_list] - ) + Args: + I_app_dict (dict[str, float]): + Dict with the input currents for each population - ### create a dict as table with header I_app, g_ampa, g_gaba - table_dict = { - "I_app": np.array(comb_name_list)[:, 0].tolist(), - "g_ampa": np.array(comb_name_list)[:, 1].tolist(), - "g_gaba": np.array(comb_name_list)[:, 2].tolist(), - "f": f_list, - } + Returns: + results (CompNeuroResults): + Results of the simulation + """ + ### reset to initial state + self.reset() + set_seed(0) + ### activate monitor + self.monitors.start() + ### set the input currents + for pop_name, I_app in I_app_dict.items(): + get_population(pop_name).I_app = I_app + ### simulate 5000 ms + simulate(5000) + ### return results + return self.results() + + +class CreateReducedModel: + """ + Class to create a reduced model from the original model. It is accessable via the + attribute model_reduced. + + Attributes: + model_reduced (CompNeuroModel): + Reduced model, created but not compiled + """ - ### create a pandas dataframe from the table_dict - table_df = pd.DataFrame(table_dict) + def __init__( + self, + model: CompNeuroModel, + analyze_model: AnalyzeModel, + reduced_size: int, + do_create: bool = False, + do_compile: bool = False, + verbose: bool = False, + ) -> None: + """ + Prepare model for reduction. - return table_df + Args: + model (CompNeuroModel): + Model to be reduced + reduced_size (int): + Size of the reduced populations + """ + ### set the attributes + self._model = model + self._analyze_model = analyze_model + self._reduced_size = reduced_size + self._verbose = verbose + ### recreate model with reduced populations and projections + self.model_reduced = CompNeuroModel( + model_creation_function=self.recreate_model, + name=f"{model.name}_reduced", + description=f"{model.description}\nWith reduced populations and projections.", + do_create=do_create, + do_compile=do_compile, + compile_folder_name=f"{model.compile_folder_name}_reduced", + ) - def get_all_combinations_of_lists(self, list_of_lists): + def recreate_model(self): """ - get all combinations of lists in a single list - example: [[1,2],[3,4],[5,6]] --> [[1,3,5],[1,3,6],[1,4,5],[1,4,6],[2,3,5],[2,3,6],[2,4,5],[2,4,6]] + Recreates the model with reduced populations and projections. """ - return list(itertools.product(*list_of_lists)) + ### 1st for each population create a reduced population + for pop_name in self._model.populations: + self.create_reduced_pop(pop_name) + ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population + for pop_name in self._model.populations: + self.create_spike_collecting_aux_pop(pop_name) + ## 3rd for each population which has afferents create a population for incoming spikes for each target type + for pop_name in self._model.populations: + self.create_conductance_aux_pop(pop_name, target="ampa") + self.create_conductance_aux_pop(pop_name, target="gaba") - def get_output_of_populations(self, f_rec_arr_list_list, input_dict): + def create_reduced_pop(self, pop_name: str): """ - restructure the output of run_parallel so that for each population a single array with firing rates is obtained + Create a reduced population from the given population. Args: - f_rec_arr_list_list: list of lists of arrays - first lists contain different network runs, second level lists contain arrays for the different populations - return: - output_pop_dict: dict of arrays - for each population a single array with firing rates - """ - output_pop_dict = {} - for pop_name in self.pop_name_list: - output_pop_dict[pop_name] = [] - ### first loop selecting the network - for f_rec_arr_list in f_rec_arr_list_list: - ### second loop selecting the population - for pop_idx, pop_name in enumerate(self.pop_name_list): - ### append the recorded values to the array of the corresponding population - output_pop_dict[pop_name].append(f_rec_arr_list[pop_idx]) - - ### concatenate the arrays of the individual populations - for pop_name in self.pop_name_list: - output_pop_dict[pop_name] = np.concatenate(output_pop_dict[pop_name]) - - ### use the input dict to only use values which should be used - ### lis of lists, first list level = networks, second list level = populations then you get array with input values - ### so same format as f_rec_arr_list_list - use_I_app_arr_list_list = input_dict["use_I_app_list"] - use_g_ampa_arr_list_list = input_dict["use_g_ampa_list"] - use_g_gaba_arr_list_list = input_dict["use_g_gaba_list"] - - ### now get for each population an array which contains the info if the values should be used - use_output_pop_dict = {} - for pop_name in self.pop_name_list: - use_output_pop_dict[pop_name] = [] - ### first loop selecting the network - for net_idx in range(len(use_I_app_arr_list_list)): - use_I_app_arr_list = use_I_app_arr_list_list[net_idx] - use_g_ampa_arr_list = use_g_ampa_arr_list_list[net_idx] - use_g_gaba_arr_list = use_g_gaba_arr_list_list[net_idx] - ### second loop selecting the population - for pop_idx, pop_name in enumerate(self.pop_name_list): - ### only use values if for all input values use is True - use_I_app_arr = use_I_app_arr_list[pop_idx] - use_g_ampa_arr = use_g_ampa_arr_list[pop_idx] - use_g_gaba_arr = use_g_gaba_arr_list[pop_idx] - use_value_arr = np.logical_and(use_I_app_arr, use_g_ampa_arr) - use_value_arr = np.logical_and(use_value_arr, use_g_gaba_arr) - ### append the recorded values to the array of the corresponding population - use_output_pop_dict[pop_name].append(use_value_arr) - - ### concatenate the arrays of the individual populations - for pop_name in self.pop_name_list: - use_output_pop_dict[pop_name] = np.concatenate( - use_output_pop_dict[pop_name] + pop_name (str): + Name of the population to be reduced + """ + if self._verbose: + print(f"create_reduced_pop for {pop_name}") + ### 1st check how the population is connected + _, is_postsynaptic, ampa, gaba = self.how_pop_is_connected(pop_name) + + ### 2nd recreate neuron model + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = ( + self._analyze_model.neuron_model_init_parameter_dict[pop_name].copy() + ) + ### if the population is a postsynaptic population adjust the synaptic + ### conductance equations + if is_postsynaptic: + neuron_model_init_parameter_dict = self.adjust_neuron_model( + neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba ) + ### create the new neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + + ### 3rd recreate the population + ### get the stored parameters of the __init__ function of the Population + pop_init_parameter_dict = self._analyze_model.pop_init_parameter_dict[ + pop_name + ].copy() + ### replace the neuron model with the new neuron model + pop_init_parameter_dict["neuron"] = neuron_model_new + ### replace the size with the reduced size (if reduced size is smaller than the + ### original size) + ### TODO add model requirements somewhere, here requirements = geometry = int + pop_init_parameter_dict["geometry"] = min( + [pop_init_parameter_dict["geometry"][0], self._reduced_size] + ) + ### append _reduce to the name + pop_init_parameter_dict["name"] = f"{pop_name}_reduced" + ### create the new population + pop_new = Population(**pop_init_parameter_dict) - ### finaly only use values defined by ues_output... - for pop_name in self.pop_name_list: - output_pop_dict[pop_name] = output_pop_dict[pop_name][ - use_output_pop_dict[pop_name] - ] - - return output_pop_dict + ### 4th set the parameters and variables of the population's neurons + ### get the stored parameters and variables + neuron_model_attr_dict = self._analyze_model.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_new, attr_name, attr_val) - def get_input_for_many_neurons_net(self): + def create_spike_collecting_aux_pop(self, pop_name: str): """ - get the inputs for the parallel many neurons network simulation + Create a spike collecting population for the given population. - need a list of dicts, keys=pop_name, lsit=number of networks - """ + Args: + pop_name (str): + Name of the population for which the spike collecting population should be created + """ + ### get all efferent projections + efferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[0] == pop_name + ] + ### check if pop has efferent projections + if len(efferent_projection_list) == 0: + return + if self._verbose: + print(f"create_spike_collecting_aux_pop for {pop_name}") + ### create the spike collecting population + pop_aux = Population( + 1, + neuron=self.SpikeProbCalcNeuron( + reduced_size=min( + [ + self._analyze_model.pop_init_parameter_dict[pop_name][ + "geometry" + ][0], + self._reduced_size, + ] + ) + ), + name=f"{pop_name}_spike_collecting_aux", + ) + ### create the projection from reduced pop to spike collecting aux pop + proj = Projection( + pre=get_population(pop_name + "_reduced"), + post=pop_aux, + target="ampa", + name=f"proj_{pop_name}_spike_collecting_aux", + ) + proj.connect_all_to_all(weights=1) - ### create dicts with lists for the populations - I_app_arr_list_dict = {} - g_ampa_arr_list_dict = {} - g_gaba_arr_list_dict = {} - use_I_app_arr_list_dict = {} - use_g_ampa_arr_list_dict = {} - use_g_gaba_arr_list_dict = {} - I_app_arr_dict = {} - g_ampa_arr_dict = {} - g_gaba_arr_dict = {} - for pop_name in self.pop_name_list: - ### prepare grid for I, g_ampa and g_gaba - ### bounds - g_ampa_max = self.g_max_dict[pop_name]["ampa"] - g_gaba_max = self.g_max_dict[pop_name]["gaba"] - I_max = self.I_app_max_dict[pop_name] + def create_conductance_aux_pop(self, pop_name: str, target: str): + """ + Create a conductance calculating population for the given population and target. - ### create value_arrays - I_app_value_array = np.linspace( - -I_max, I_max, self.nr_vals_interpolation_grid + Args: + pop_name (str): + Name of the population for which the conductance calculating population should be created + target (str): + Target type of the conductance calculating population + """ + ### get all afferent projections + afferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self._analyze_model.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[1] == pop_name + ] + ### check if pop has afferent projections + if len(afferent_projection_list) == 0: + return + ### get all afferent projections with target type + afferent_target_projection_list = [ + proj_name + for proj_name in afferent_projection_list + if self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == target + ] + ### check if there are afferent projections with target type + if len(afferent_target_projection_list) == 0: + return + if self._verbose: + print(f"create_conductance_aux_pop for {pop_name} target {target}") + ### get projection informations + ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) + projection_dict = { + proj_name: { + "pre_size": self._analyze_model.pop_init_parameter_dict[ + self._analyze_model.pre_post_pop_name_dict[proj_name][0] + ]["geometry"][0], + "connection_prob": self._analyze_model.connector_function_parameter_dict[ + proj_name + ][ + "probability" + ], + "weights": self._analyze_model.connector_function_parameter_dict[ + proj_name + ]["weights"], + "pre_name": self._analyze_model.pre_post_pop_name_dict[proj_name][0], + } + for proj_name in afferent_target_projection_list + } + ### create the conductance calculating population + pop_aux = Population( + self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0], + neuron=self.InputCalcNeuron(projection_dict=projection_dict), + name=f"{pop_name}_{target}_aux", + ) + ### set number of synapses parameter for each projection + for proj_name, vals in projection_dict.items(): + number_synapses = Binomial( + n=vals["pre_size"], p=vals["connection_prob"] + ).get_values( + self._analyze_model.pop_init_parameter_dict[pop_name]["geometry"][0] ) - g_ampa_value_array = np.linspace( - 0, g_ampa_max, self.nr_vals_interpolation_grid + setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) + ### create the "current injection" projection from conductance calculating + ### population to the reduced post population + proj = CurrentInjection( + pre=pop_aux, + post=get_population(f"{pop_name}_reduced"), + target=f"incomingaux{target}", + name=f"proj_{pop_name}_{target}_aux", + ) + proj.connect_current() + ### create projection from spike_prob calculating aux neurons of presynaptic + ### populations to conductance calculating aux population + for proj_name, vals in projection_dict.items(): + pre_pop_name = vals["pre_name"] + pre_pop_spike_collecting_aux = get_population( + f"{pre_pop_name}_spike_collecting_aux" ) - g_gaba_value_array = np.linspace( - 0, g_gaba_max, self.nr_vals_interpolation_grid + proj = Projection( + pre=pre_pop_spike_collecting_aux, + post=pop_aux, + target=f"spikeprob_{pre_pop_name}", + name=f"{proj_name}_spike_collecting_to_conductance", ) + proj.connect_all_to_all(weights=1) - ### store these value arrays for each pop - I_app_arr_dict[pop_name] = I_app_value_array - g_ampa_arr_dict[pop_name] = g_ampa_value_array - g_gaba_arr_dict[pop_name] = g_gaba_value_array + def how_pop_is_connected(self, pop_name): + """ + Check how a population is connected. If the population is a postsynaptic and/or + presynaptic population, check if it gets ampa and/or gaba input. - ### create use values arrays - use_I_app_array = np.array([I_max > 0] * self.nr_vals_interpolation_grid) - use_g_ampa_array = np.array( - [g_ampa_max > 0] * self.nr_vals_interpolation_grid - ) - use_g_gaba_array = np.array( - [g_gaba_max > 0] * self.nr_vals_interpolation_grid - ) - ### use at least a single value - use_I_app_array[0] = True - use_g_ampa_array[0] = True - use_g_gaba_array[0] = True - - ### get all combinations (grid) of value_arrays - I_g_arr = np.array( - list( - itertools.product( - *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] - ) - ) - ) + Args: + pop_name (str): + Name of the population to check - ### get all combinations (grid) of the use values arrays - use_I_g_arr = np.array( - list( - itertools.product( - *[use_I_app_array, use_g_ampa_array, use_g_gaba_array] - ) - ) - ) + Returns: + is_presynaptic (bool): + True if the population is a presynaptic population, False otherwise + is_postsynaptic (bool): + True if the population is a postsynaptic population, False otherwise + ampa (bool): + True if the population gets ampa input, False otherwise + gaba (bool): + True if the population gets gaba input, False otherwise + """ + is_presynaptic = False + is_postsynaptic = False + ampa = False + gaba = False + ### loop over all projections + for proj_name in self._model.projections: + ### check if the population is a presynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][0] == pop_name: + is_presynaptic = True + ### check if the population is a postsynaptic population in any projection + if self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name: + is_postsynaptic = True + ### check if the projection target is ampa or gaba + if ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "ampa" + ): + ampa = True + elif ( + self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == "gaba" + ): + gaba = True - ### individual value arrays from combinations - I_app_arr = I_g_arr[:, 0] - g_ampa_arr = I_g_arr[:, 1] - g_gaba_arr = I_g_arr[:, 2] + return is_presynaptic, is_postsynaptic, ampa, gaba - ### individual use values arrays from combinations - use_I_app_arr = use_I_g_arr[:, 0] - use_g_ampa_arr = use_I_g_arr[:, 1] - use_g_gaba_arr = use_I_g_arr[:, 2] + def adjust_neuron_model( + self, neuron_model_init_parameter_dict, ampa=True, gaba=True + ): + """ + Add the new synaptic input coming from the auxillary population to the neuron + model. - ### split the arrays for the networks - networks_size_list = np.array( - [self.nr_neurons_of_pop_per_net] * self.nr_networks - ) - split_idx_arr = np.cumsum(networks_size_list)[:-1] - ### after this split the last array may be smaller than the others --> append zeros - ### value arrays - I_app_arr_list = np.split(I_app_arr, split_idx_arr) - g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) - g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) - ### use value arrays - use_I_app_arr_list = np.split(use_I_app_arr, split_idx_arr) - use_g_ampa_arr_list = np.split(use_g_ampa_arr, split_idx_arr) - use_g_gaba_arr_list = np.split(use_g_gaba_arr, split_idx_arr) - - ### check if last network is smaler - if self.nr_last_network < self.nr_neurons_of_pop_per_net: - ### if yes --> append zeros to value arrays - ### and append False to use values arrays - nr_of_zeros_append = round( - self.nr_neurons_of_pop_per_net - self.nr_last_network, 0 - ) - ### value arrays - I_app_arr_list[-1] = np.concatenate( - [I_app_arr_list[-1], np.zeros(nr_of_zeros_append)] - ) - g_ampa_arr_list[-1] = np.concatenate( - [g_ampa_arr_list[-1], np.zeros(nr_of_zeros_append)] - ) - g_gaba_arr_list[-1] = np.concatenate( - [g_gaba_arr_list[-1], np.zeros(nr_of_zeros_append)] - ) - ### use values arrays - use_I_app_arr_list[-1] = np.concatenate( - [use_I_app_arr_list[-1], np.array([False] * nr_of_zeros_append)] - ) - use_g_ampa_arr_list[-1] = np.concatenate( - [use_g_ampa_arr_list[-1], np.array([False] * nr_of_zeros_append)] - ) - use_g_gaba_arr_list[-1] = np.concatenate( - [use_g_gaba_arr_list[-1], np.array([False] * nr_of_zeros_append)] - ) - - ### store the array lists into the population dicts - ### value arrays - I_app_arr_list_dict[pop_name] = I_app_arr_list - g_ampa_arr_list_dict[pop_name] = g_ampa_arr_list - g_gaba_arr_list_dict[pop_name] = g_gaba_arr_list - ### use value arrays - use_I_app_arr_list_dict[pop_name] = use_I_app_arr_list - use_g_ampa_arr_list_dict[pop_name] = use_g_ampa_arr_list - use_g_gaba_arr_list_dict[pop_name] = use_g_gaba_arr_list - - ### restructure the dict of lists into a list for networks of list for populations - I_app_list = [] - g_ampa_list = [] - g_gaba_list = [] - use_I_app_list = [] - use_g_ampa_list = [] - use_g_gaba_list = [] - for net_idx in range(self.nr_networks): - ### value arrays - I_app_list.append( - [ - I_app_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - g_ampa_list.append( - [ - g_ampa_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - g_gaba_list.append( - [ - g_gaba_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - ### use values arrays - use_I_app_list.append( - [ - use_I_app_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - use_g_ampa_list.append( - [ - use_g_ampa_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - use_g_gaba_list.append( - [ - use_g_gaba_arr_list_dict[pop_name][net_idx] - for pop_name in self.pop_name_list - ] - ) - - return { - "I_app_list": I_app_list, - "g_ampa_list": g_ampa_list, - "g_gaba_list": g_gaba_list, - "use_I_app_list": use_I_app_list, - "use_g_ampa_list": use_g_ampa_list, - "use_g_gaba_list": use_g_gaba_list, - "I_app_arr_dict": I_app_arr_dict, - "g_ampa_arr_dict": g_ampa_arr_dict, - "g_gaba_arr_dict": g_gaba_arr_dict, - } + Args: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + ampa (bool): + True if the population gets ampa input and therefore the ampa conductance + needs to be adjusted, False otherwise + gaba (bool): + True if the population gets gaba input and therefore the gaba conductance + needs to be adjusted, False otherwise - for pop_name in self.pop_name_list: - ### prepare grid for I, g_ampa and g_gaba - ### bounds - g_ampa_max = self.g_max_dict[pop_name]["ampa"] - g_gaba_max = self.g_max_dict[pop_name]["gaba"] - I_max = self.I_app_max_dict[pop_name] - ### number of points for individual value arrays: I, g_ampa and g_gaba - number_of_points = np.round( - self.nr_neurons_net_many_total ** (1 / 3), 0 - ).astype(int) - ### create value_arrays - I_app_value_array = np.linspace(-I_max, I_max, number_of_points) - g_ampa_value_array = np.linspace(0, g_ampa_max, number_of_points) - g_gaba_value_array = np.linspace(0, g_gaba_max, number_of_points) - ### get all combinations (grid) of value_arrays - I_g_arr = np.array( - list( - itertools.product( - *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] - ) + Returns: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + with DBS mechanisms added + """ + ### 1st adjust the conductance equations + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### search for equation with dg_ampa/dt and dg_gaba/dt + for line_idx, line in enumerate(equations_line_split_list): + if ( + self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") + and ampa + ): + ### add " + tau_ampa*g_incomingauxampa/dt" + ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa + equations_line_split_list[line_idx] = ( + "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" ) - ) - ### individual value arrays from combinations - I_app_arr = I_g_arr[:, 0] - g_ampa_arr = I_g_arr[:, 1] - g_gaba_arr = I_g_arr[:, 2] - - ### split the arrays into the sizes of the many-neuron networks - split_idx_arr = np.cumsum(self.nr_many_neurons_list[pop_name])[:-1] - - I_app_arr_list = np.split(I_app_arr, split_idx_arr) - g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) - g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) - - class get_interp_3p: - def __init__( - self, values, model_conf_obj, var_name_dict, x=None, y=None, z=None - ) -> None: - """ - x, y, and z are the increasing gid steps on the interpolation grid - set z=None to get 2D interpiolation - set y and z = None to get 1D interpolation - """ - self.x = x - self.y = y - self.z = z - self.values = values - self.model_conf_obj = model_conf_obj - self.var_name_dict = var_name_dict - if ( - isinstance(self.x, type(None)) - and isinstance(self.y, type(None)) - and isinstance(self.z, type(None)) + self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") + and gaba ): - error_msg = ( - "ERROR get_interp_3p: at least one of x,y,z has to be an array" + ### add " + tau_gaba*g_incomingauxgaba/dt" + ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba + equations_line_split_list[line_idx] = ( + "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" ) - model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - - def __call__(self, x=None, y=None, z=None): - ### check x - if isinstance(x, type(None)): - if not isinstance(self.x, type(None)): - error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['x']} were given but sample points are missing!" - self.model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - tmp_x = 0 - else: - if isinstance(self.x, type(None)): - warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['x']} are given but no interpolation values for {self.var_name_dict['x']} were given!" - self.model_conf_obj.log(warning_txt) - x = None - tmp_x = 0 - else: - tmp_x = x - - ### check y - if isinstance(y, type(None)): - if not isinstance(self.y, type(None)): - error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['y']} were given but sample points are missing!" - self.model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - tmp_y = 0 - else: - if isinstance(self.y, type(None)): - warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['y']} are given but no interpolation values for {self.var_name_dict['y']} were given!" - self.model_conf_obj.log(warning_txt) - y = None - tmp_y = 0 - else: - tmp_y = y - - ### check z - if isinstance(z, type(None)): - if not isinstance(self.y, type(None)): - error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['z']} were given but sample points are missing!" - self.model_conf_obj.log(error_msg) - raise AssertionError(error_msg) - tmp_z = 0 - else: - if isinstance(self.z, type(None)): - warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['z']} are given but no interpolation values for {self.var_name_dict['z']} were given!" - self.model_conf_obj._p_w(warning_txt) - self.model_conf_obj.log(warning_txt) - z = None - tmp_z = 0 - else: - tmp_z = z - - ### get input arrays - input_arr_dict = { - "x": np.array(tmp_x).reshape(-1), - "y": np.array(tmp_y).reshape(-1), - "z": np.array(tmp_z).reshape(-1), - } - - ### check if the arrays with size larger 1 have same size - size_arr = np.array([val.size for val in input_arr_dict.values()]) - mask = size_arr > 1 - if True in mask: - input_size = size_arr[mask][0] - if not (input_size == size_arr[mask]).all(): - raise ValueError( - "ERROR model_configurator get_interp_3p: x,y,z sample points have to be either single values or arrays. All arrays have to have same size" - ) - - ### if there are inputs only consisting of a single value --> duplicate to increase size if there are also array inputs - for idx, larger_1 in enumerate(mask): - if not larger_1 and True in mask: - val = input_arr_dict[list(input_arr_dict.keys())[idx]][0] - input_arr_dict[list(input_arr_dict.keys())[idx]] = ( - np.ones(input_size) * val - ) - - ### get the sample points - use_variable_names_list = ["x", "y", "z"] - if isinstance(x, type(None)): - use_variable_names_list.remove("x") - if isinstance(y, type(None)): - use_variable_names_list.remove("y") - if isinstance(z, type(None)): - use_variable_names_list.remove("z") - point_arr = np.array( - [input_arr_dict[var_name] for var_name in use_variable_names_list] - ).T - - ### get the grid points, only use these which are not None - use_variable_names_list = ["x", "y", "z"] - if isinstance(self.x, type(None)): - use_variable_names_list.remove("x") - if isinstance(self.y, type(None)): - use_variable_names_list.remove("y") - if isinstance(self.z, type(None)): - use_variable_names_list.remove("z") - - interpolation_grid_arr_dict = { - "x": self.x, - "y": self.y, - "z": self.z, - } - points = tuple( - [ - interpolation_grid_arr_dict[var_name] - for var_name in use_variable_names_list - ] - ) + ### join list to a string + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) - ### get shape of values - values_shape = tuple( - [ - interpolation_grid_arr_dict[var_name].size - for var_name in use_variable_names_list - ] - ) + ### 2nd extend description + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." + ) - return interpn( - points=points, - values=self.values.reshape(values_shape), - xi=point_arr, - ) + return neuron_model_init_parameter_dict - def set_syn_load(self, synaptic_load_dict, synaptic_contribution_dict=None): + def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): """ - Args: - synaptic_load_dict: dict or number - either a dictionary with keys = all population names the model_configurator should configure - or a single number between 0 and 1 - The dictionary values should be lists which contain either 2 values for ampa and gaba load, - only 1 value if the population has only ampa or gaba input. - For the strucutre of the dictionary check the print_guide - - synaptic_contribution_dict: dict, optional, default=None - by default the synaptic contributions of all afferent projections is equal - one can define other contributions in this dict - give for each affernt projection the contribution to the synaptic load of the target population - For the strucutre of the dictionary check the print_guide - """ - - ### set synaptic load - ### is dict --> replace internal dict values - if isinstance(synaptic_load_dict, dict): - ### check if correct number of population - if len(list(synaptic_load_dict.keys())) != len( - list(self.syn_load_dict.keys()) - ): - error_msg = f"ERROR set_syn_load: wrong number of populations given with 'synaptic_load_dict' given={len(list(synaptic_load_dict.keys()))}, expected={len(list(self.syn_load_dict.keys()))}" - self.log(error_msg) - raise ValueError(error_msg) - ### loop over all populations - for pop_name in synaptic_load_dict.keys(): - ### cehck pop name - if pop_name not in list(self.syn_load_dict.keys()): - error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - value_list = synaptic_load_dict[pop_name] - ### check value list - if len(value_list) != len(self.syn_load_dict[pop_name]): - error_msg = f"ERROR set_syn_load: for population {pop_name}, {len(self.syn_load_dict[pop_name])} syn load values should be given but {len(value_list)} were given" - self.log(error_msg) - raise ValueError(error_msg) - if not ( - (np.array(value_list) <= 1).all() - and (np.array(value_list) >= 0).all() - ): - error_msg = f"ERROR set_syn_load: the values for synaptic loads should be equal or smaller than 1, given for population {pop_name}: {value_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### replace internal values with given values - self.syn_load_dict[pop_name] = value_list - else: - ### is not a dict --> check number - try: - synaptic_load = float(synaptic_load_dict) - except: - error_msg = "ERROR set_syn_load: if synaptic_load_dict is not a dictionary it should be a single number!" - self.log(error_msg) - raise ValueError(error_msg) - if not (synaptic_load <= 1 and synaptic_load >= 0): - error_msg = "ERROR set_syn_load: value for synaptic_loadshould be equal or smaller than 1" - self.log(error_msg) - raise ValueError(error_msg) - ### replace internal values with given value - for pop_name in self.syn_load_dict.keys(): - for idx in range(len(self.syn_load_dict[pop_name])): - self.syn_load_dict[pop_name][idx] = synaptic_load - ### transform syn load dict in correct form with projection target type keys - syn_load_dict = {} - for pop_name in self.pop_name_list: - syn_load_dict[pop_name] = {} - if ( - "ampa" in self.afferent_projection_dict[pop_name]["target"] - and "gaba" in self.afferent_projection_dict[pop_name]["target"] - ): - syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] - syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][1] - elif "ampa" in self.afferent_projection_dict[pop_name]["target"]: - syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] - syn_load_dict[pop_name]["gaba"] = 0 - elif "gaba" in self.afferent_projection_dict[pop_name]["target"]: - syn_load_dict[pop_name]["ampa"] = 0 - syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][0] - self.syn_load_dict = syn_load_dict - - ### set synaptic contribution - if not isinstance(synaptic_contribution_dict, type(None)): - ### loop over all given populations - for pop_name in synaptic_contribution_dict.keys(): - ### check pop_name - if pop_name not in list(self.syn_contr_dict.keys()): - error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### loop over given projection target type (ampa,gaba) - for given_proj_target_type in synaptic_contribution_dict[ - pop_name - ].keys(): - ### check given target type - if not ( - given_proj_target_type == "ampa" - or given_proj_target_type == "gaba" - ): - error_msg = f"ERROR set_syn_load: with the synaptic_contribution_dict for each given population a 'ampa' and/or 'gaba' dictionary contianing the corresponding afferent projections should be given, given key={given_proj_target_type}" - self.log(error_msg) - raise ValueError(error_msg) - ### check if for the projection target type the correct number of projections is given - given_proj_name_list = list( - synaptic_contribution_dict[pop_name][ - given_proj_target_type - ].keys() - ) - internal_proj_name_list = list( - self.syn_contr_dict[pop_name][given_proj_target_type].keys() - ) - if len(given_proj_name_list) != len(internal_proj_name_list): - error_msg = f"ERROR set_syn_load: in synaptic_contribution_dict for population {pop_name} and target_type {given_proj_target_type} wrong number of projections is given\ngiven={given_proj_name_list}, expected={internal_proj_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### check if given contributions for the target type sum up to 1 - given_contribution_arr = np.array( - list( - synaptic_contribution_dict[pop_name][ - given_proj_target_type - ].values() - ) - ) - if round(given_contribution_arr.sum(), 6) != 1: - error_msg = f"ERROR set_syn_load: given synaptic contributions for population {pop_name} and target_type {given_proj_target_type} do not sum up to 1: given={given_contribution_arr}-->{round(given_contribution_arr.sum(),6)}" - self.log(error_msg) - raise ValueError(error_msg) - ### loop over given afferent projections - for proj_name in given_proj_name_list: - ### check if projection name exists - if proj_name not in internal_proj_name_list: - error_msg = f"ERROR set_syn_load: given projection {proj_name} given with synaptic_contribution_dict no possible projection, possible={internal_proj_name_list}" - self.log(error_msg) - raise ValueError(error_msg) - ### replace internal value of the projection with given value - self.syn_contr_dict[pop_name][given_proj_target_type][ - proj_name - ] = synaptic_contribution_dict[pop_name][ - given_proj_target_type - ][ - proj_name - ] - - ### set the weights in the afferent_projection_dict based on the given synaptic contributions - for pop_name in self.pop_name_list: - weight_list = [] - for proj_name in self.afferent_projection_dict[pop_name][ - "projection_names" - ]: - ### get proj info - proj_dict = self.get_proj_dict(proj_name) - proj_target_type = proj_dict["proj_target_type"] - - ### obtain the weight using the given syn_contr_dict and the syn_contr_max_dict (assuming max weights) - target_type_contr_dict = self.syn_contr_dict[pop_name][proj_target_type] - target_type_contr_max_dict = self.get_syn_contr_dict( - pop_name=pop_name, - target_type=proj_target_type, - use_max_weights=True, - normalize=True, - ) - ### convert the synaptic contribution dicts to arrays - target_type_contr_arr = np.array(list(target_type_contr_dict.values())) - target_type_contr_max_arr = np.array( - list(target_type_contr_max_dict.values()) - ) - ### get the transformation from synaptic contributions assuming max weights to given synaptic contributions - contr_transform_arr = target_type_contr_max_arr / target_type_contr_arr - ### normalize the transform_arr by the largest scaling --> obtain the weight factors - contr_transform_arr /= contr_transform_arr.max() - ### get the weight of the current projection - weight = ( - self.g_max_dict[pop_name][proj_target_type] - * contr_transform_arr[ - list(target_type_contr_dict.keys()).index(proj_name) - ] - ) - ### append weight to weight list - weight_list.append(weight) - ### replace the weights in the afferent_projection_dict - self.afferent_projection_dict[pop_name]["weights"] = weight_list - - ### now scale the weights based on the synaptic load - for pop_name in self.pop_name_list: - for target_type in ["ampa", "gaba"]: - ### get the synaptic load based on the weights - syn_load = self.get_syn_load(pop_name=pop_name, target_type=target_type) - ### if the obtained syn load with the weights is smaller than the given target syn load - ### print warning because upscaling is not possible, syn load is smaller than the user wanted - print( - f"syn_load={syn_load}, target={self.syn_load_dict[pop_name][target_type]}" - ) - if syn_load < self.syn_load_dict[pop_name][target_type]: - ### the weights cannot be upscaled because syn_load was obtained with max weights - ### --> print a warning - warning_txt = f"WARNING set_syn_load: the synaptic load for population {pop_name} and target_type {target_type} cannot reach teh given synaptic load using the given synaptic contributions without scaling the weights over the maximum weights!\ngiven syn_load={self.syn_load_dict[pop_name][target_type]}, obtained syn_load={syn_load}" - self.log(warning_txt) - self._p_w(warning_txt) - ### update the syn_load_dict with the obtained syn_load - self.syn_load_dict[pop_name][target_type] = syn_load - elif syn_load > 0: - ### get the weights - weight_arr = np.array( - self.afferent_projection_dict[pop_name]["weights"] - ) - ### get the proj target type array - proj_target_type_arr = np.array( - self.afferent_projection_dict[pop_name]["target"] - ) - ### select the weights for the target type - weight_arr = weight_arr[proj_target_type_arr == target_type] - ### scale the weights - weight_arr *= self.syn_load_dict[pop_name][target_type] / syn_load - ### update the weights in the afferent_projection_dict - weight_idx_arr = np.where(proj_target_type_arr == target_type)[0] - for weight_idx_new, weight_idx_original in enumerate( - weight_idx_arr - ): - self.afferent_projection_dict[pop_name]["weights"][ - weight_idx_original - ] = weight_arr[weight_idx_new] - - ### print guide - self._p_g(_p_g_after_set_syn_load) - - def get_syn_load(self, pop_name: str, target_type: str) -> float: - """ - Calculates the synaptic load of a population for a given target type for the given weights of the afferent_projection_dict + Check if a equation string has the form "tau*dvar/dt = -var". Args: - pop_name: str - name of the population - - target_type: str - either 'ampa' or 'gaba' + line (str): + Equation string + var_name (str): + Name of the variable + tau_name (str): + Name of the time constant Returns: - syn_load: float - synaptic load of the population for the given target type + is_solution_correct (bool): + True if the equation is as expected, False otherwise """ - ### get the proj target type array - proj_target_type_arr = np.array( - self.afferent_projection_dict[pop_name]["target"] - ) - if target_type in proj_target_type_arr: - ### get the weights - weight_arr = np.array(self.afferent_projection_dict[pop_name]["weights"]) - ### select the weights for the target type - weight_arr = weight_arr[proj_target_type_arr == target_type] - ### get the pre size - size_arr = np.array(self.afferent_projection_dict[pop_name]["size"]) - ### select the pre size for the target type - size_arr = size_arr[proj_target_type_arr == target_type] - ### get the probaility - prob_arr = np.array(self.afferent_projection_dict[pop_name]["probability"]) - ### select the probability for the target type - prob_arr = prob_arr[proj_target_type_arr == target_type] - ### get the firing rate - firing_rate_arr = np.array( - self.afferent_projection_dict[pop_name]["target firing rate"] - ) - ### select the firing rate for the target type - firing_rate_arr = firing_rate_arr[proj_target_type_arr == target_type] + if var_name not in line: + return False - ### get the synaptic load based on weights, sizes, probabilities and max weights - syn_load = np.sum(weight_arr * size_arr * prob_arr * firing_rate_arr) / ( - self.g_max_dict[pop_name][target_type] - * np.sum(size_arr * prob_arr * firing_rate_arr) - ) - else: - syn_load = 0 + # Define the variables + var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") - return syn_load + # Given equation as a string + equation_str = line - def get_template_synaptic_contribution_dict(self, given_dict): - """ - converts the full template dict with all keys for populations, target-types and projections into a reduced dict - which only contains the keys which lead to values smaller 1 - """ + # Parse the equation string + lhs, rhs = equation_str.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs) - ret_dict = {} - for key in given_dict.keys(): - if isinstance(given_dict[key], dict): - rec_dict = self.get_template_synaptic_contribution_dict(given_dict[key]) - if len(rec_dict) > 0: - ret_dict[key] = self.get_template_synaptic_contribution_dict( - given_dict[key] - ) - else: - if given_dict[key] < 1: - ret_dict[key] = given_dict[key] + # Form the equation + equation = sp.Eq(lhs, rhs) - return ret_dict + # Solve the equation for var + try: + solution = sp.solve(equation, var) + except: + ### equation is not solvable with variables means it is not as expected + return False - def divide_almost_equal(self, number, num_parts): - # Calculate the quotient and remainder - quotient, remainder = divmod(number, num_parts) + # Given solution to compare + expected_solution_str = f"-{tau_name}*d{var_name}/dt" + expected_solution = sp.sympify(expected_solution_str) - # Initialize a list to store the almost equal integers - result = [quotient] * num_parts + # Check if the solution is as expected + is_solution_correct = solution[0] == expected_solution - # Distribute the remainder evenly among the integers - for i in range(remainder): - result[i] += 1 + return is_solution_correct - return result + class SpikeProbCalcNeuron(Neuron): + """ + Neuron model to calculate the spike probabilities of the presynaptic neurons. + """ - def analyze_model(self): + def __init__(self, reduced_size=1): + """ + Args: + reduced_size (int): + Reduced size of the associated presynaptic population + """ + parameters = f""" + reduced_size = {reduced_size} : population + tau= 1.0 : population + """ + equations = """ + tau*dr/dt = g_ampa/reduced_size - r + g_ampa = 0 + """ + super().__init__(parameters=parameters, equations=equations) + + class InputCalcNeuron(Neuron): """ - prepares the creation of the single neuron and many neuron networks + This neurons gets the spike probabilities of the pre neurons and calculates the + incoming spikes for each projection. It accumulates the incoming spikes of all + projections (of the same target type) and calculates the conductance increase + for the post neuron. """ - ### clear ANNarchy and create the model - cnp_clear() - self.model.create(do_compile=False) - - ### get the neuron models from the model - for pop_name in self.pop_name_list: - self.neuron_model_dict[pop_name] = get_population(pop_name).neuron_type - self.neuron_model_parameters_dict[pop_name] = get_population( - pop_name - ).init.items() - self.neuron_model_attributes_dict[pop_name] = get_population( - pop_name - ).attributes - - ### do further things for which the model needs to be created - ### get the afferent projection dict for the populations (model needed!) - for pop_name in self.pop_name_list: - ### get afferent projection dict - self.log(f"get the afferent_projection_dict for {pop_name}") - self.afferent_projection_dict[pop_name] = self.get_afferent_projection_dict( - pop_name=pop_name - ) + def __init__(self, projection_dict): + """ + Args: + projection_dict (dict): + keys: names of afferent projections (of the same target type) + values: dict with keys "weights", "pre_name" + """ - ### create dictionary with timeconstants of g_ampa and g_gaba of the populations - for pop_name in self.pop_name_list: - self.tau_dict[pop_name] = { - "ampa": get_population(pop_name).tau_ampa, - "gaba": get_population(pop_name).tau_gaba, - } + ### create parameters + parameters = [ + f""" + number_synapses_{proj_name} = 0 + weights_{proj_name} = {vals['weights']} + """ + for proj_name, vals in projection_dict.items() + ] + parameters = "\n".join(parameters) - ### get the post_pop_name_dict - self.post_pop_name_dict = {} - for proj_name in self.model.projections: - self.post_pop_name_dict[proj_name] = get_projection(proj_name).post.name - - ### get the pre_pop_name_dict - self.pre_pop_name_dict = {} - for proj_name in self.model.projections: - self.pre_pop_name_dict[proj_name] = get_projection(proj_name).pre.name - - ### get the pre_pop_size_dict - self.pre_pop_size_dict = {} - for proj_name in self.model.projections: - self.pre_pop_size_dict[proj_name] = get_projection(proj_name).pre.size - - ### clear ANNarchy --> the model is not available anymore - cnp_clear() - - def compile_net_many_sequential(self): - network_list = [ - net_many_dict["net"] - for net_many_dict_list in self.net_many_dict.values() - for net_many_dict in net_many_dict_list - ] - for net in network_list: - self.compile_net_many(net=net) - - def compile_net_many_parallel(self): - nr_available_workers = int(multiprocessing.cpu_count() / 2) - network_list = [ - net_many_dict["net"] - for net_many_dict_list in self.net_many_dict.values() - for net_many_dict in net_many_dict_list - ] - with multiprocessing.Pool(nr_available_workers) as p: - p.map(self.compile_net_many, network_list) - - ### for each network have network idx - ### network 0 is base network - ### netork 1,2,3...N are the single neuron networks for the N populations - ### start idx = N+1 (inclusive), end_idx = number many networks + N (inclusive) - for net_idx in range( - len(self.pop_name_list) + 1, len(network_list) + len(self.pop_name_list) + 1 - ): - ### get the name of the run folder of the network - ### search for a folder which starts with run_ - ### there should only be 1 --> get run_folder_name as str - run_folder_name = find_folder_with_prefix( - base_path=f"annarchy_folders/many_net_{net_idx}", prefix="run_" + ### create equations + equations = [ + f""" + incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} + """ + for proj_name, vals in projection_dict.items() + ] + equations = "\n".join(equations) + sum_of_conductance_increase = ( + "r = " + + "".join( + [ + f"incoming_spikes_{proj_name} * weights_{proj_name} + " + for proj_name in projection_dict.keys() + ] + )[:-3] ) - run_folder_name = f"/scratch/olmai/Projects/PhD/CompNeuroPy/CompNeuroPy/examples/model_configurator/annarchy_folders/many_net_{net_idx}//{run_folder_name}" + equations = equations + "\n" + sum_of_conductance_increase - print(run_folder_name) - ### import the ANNarchyCore.so module from this folder - spec = importlib.util.spec_from_file_location( - f"ANNarchyCore{net_idx}", f"{run_folder_name}/ANNarchyCore{net_idx}.so" - ) - foo = importlib.util.module_from_spec(spec) - spec.loader.exec_module(foo) + super().__init__(parameters=parameters, equations=equations) - ### overwrite the entries in the network manager - _network[net_idx]["instance"] = foo - _network[net_idx]["compiled"] = True - _network[net_idx]["directory"] = run_folder_name - def get_afferent_projection_dict(self, pop_name): - """ - creates a dictionary containing - projection_names - target firing rate - probability - size - target - for each afferent projection (=first level of keys) of the specified population +class GetMaxSyn: + """ + Find the maximal synaptic input for each population. + """ + def __init__( + self, + model: CompNeuroModel, + simulator: Simulator, + do_not_config_list: list[str], + max_psp: float, + target_firing_rate_dict: dict[str, float], + ): + """ Args: - pop_name: str - populaiton name - - return: dict of dicts - """ - ### check if model is available - if not self.model.created: - error_msg = "ERROR model_configurator get_afferent_projection_dict: the model has to be created!" - self.log(error_msg) - raise AssertionError(error_msg) - ### get projection names - afferent_projection_dict = {} - afferent_projection_dict["projection_names"] = [] - for projection in self.model.projections: - if get_projection(projection).post.name == pop_name: - afferent_projection_dict["projection_names"].append(projection) - - self.nr_afferent_proj_dict[pop_name] = len( - afferent_projection_dict["projection_names"] - ) - - ### get target firing rates resting-state for afferent projections - afferent_projection_dict["target firing rate"] = [] - afferent_projection_dict["probability"] = [] - afferent_projection_dict["size"] = [] - afferent_projection_dict["target"] = [] - for projection in afferent_projection_dict["projection_names"]: - pre_pop_name = get_projection(projection).pre.name - ### target firing rate - afferent_projection_dict["target firing rate"].append( - self.target_firing_rate_dict[pre_pop_name] - ) - ### probability, _connection_args only if connect_fixed_prob (i.e. connector_name==Random) - afferent_projection_dict["probability"].append( - get_projection(projection)._connection_args[0] + model (CompNeuroModel): + Model to be analyzed + simulator (Simulator): + Simulator object for simulations with the single neuron networks + do_not_config_list (list[str]): + List of populations which should not be configured + max_psp (float): + Maximal postsynaptic potential in mV + target_firing_rate_dict (dict[str, float]): + Target firing rate for each population + """ + self._simulator = simulator + self._max_syn_dict = {} + ### loop over all populations + for pop_name in model.populations: + ### skip populations which should not be configured + if pop_name in do_not_config_list: + continue + + ### get max g_gabe + g_gaba_max = self._get_max_g_gaba(pop_name=pop_name, max_psp=max_psp) + + ### get max g_ampa + g_ampa_max = self._get_max_g_ampa(pop_name=pop_name, g_gaba_max=g_gaba_max) + + ### get max I_app + I_app_max = self._get_max_I_app( + pop_name=pop_name, target_firing_rate_dict=target_firing_rate_dict ) - ### size - afferent_projection_dict["size"].append(len(get_projection(projection).pre)) - ### target type - afferent_projection_dict["target"].append(get_projection(projection).target) - return afferent_projection_dict + ### store the maximal synaptic input in dict + self._max_syn_dict[pop_name] = {} + self._max_syn_dict[pop_name]["g_gaba"] = g_gaba_max + self._max_syn_dict[pop_name]["g_ampa"] = g_ampa_max + self._max_syn_dict[pop_name]["I_app"] = I_app_max - def get_max_syn_currents(self, pop_name: str) -> list: - """ - obtain I_app_max, g_ampa_max and g_gaba max. - f_max = f_0 + f_t + 100 - I_app_max causes f_max (increases f from f_0 to f_max) - g_gaba_max causes max IPSP - g_ampa_max cancels out g_gaba_max IPSP + @property + def max_syn_getter(self): + return self.MaxSynGetter(self._max_syn_dict) - Args: - pop_name: str - population name from original model + class MaxSynGetter: + def __init__(self, max_syn_dict: dict) -> None: + self._max_syn_dict = max_syn_dict - return: - list containing [I_max, g_ampa_max, g_gaba_max] + def get(self, pop_name: str): + """ + Return the maximal synaptic input for the given population. - Abbreviations: - f_max: max firing rate + Args: + pop_name (str): + Name of the population + + Returns: + ReturnMaxSyn: + Maximal synaptic input for the given population with Attributes: g_gaba, + g_ampa, I_app + """ + return self.ReturnMaxSyn( + g_gaba=self._max_syn_dict[pop_name]["g_gaba"], + g_ampa=self._max_syn_dict[pop_name]["g_ampa"], + I_app=self._max_syn_dict[pop_name]["I_app"], + ) - f_0: firing rate without syn currents + class ReturnMaxSyn: + def __init__(self, g_gaba: float, g_ampa: float, I_app: float): + self.g_gaba = g_gaba + self.g_ampa = g_ampa + self.I_app = I_app - f_t: target firing rate + def _get_max_g_gaba(self, pop_name: str, max_psp: float): """ + Find the maximal g_gaba for the given population. A single spike with maximal + g_gaba should result in a inhibitory postsynaptic potential of max_psp. - ### TODO: problem for g_gaba: what if resting potential is <=-90... - ### find g_gaba max using max IPSP - self.log("search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)") - g_gaba_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_ipsp( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], - g_gaba=X_val, - ), - y_bound=self.max_psp_dict[pop_name], - X_0=0, - y_0=0, - alpha_abs=0.005, - X_increase=0.1, - ) + Args: + pop_name (str): + Name of the population + max_psp (float): + Maximal postsynaptic potential in mV - ### for g_ampa EPSPs can lead to spiking - ### --> find g_ampa max by "overriding" IPSP of g_gaba max - self.log( - f"search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" + Returns: + g_gaba_max (float): + Maximal g_gaba + """ + ### find g_gaba max using max IPSP + sf.Logger().log( + f"[{pop_name}]: search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)" ) - g_ampa_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_ipsp( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], - g_ampa=X_val, - g_gaba=g_gaba_max, - ), - y_bound=0, - X_0=0, - y_0=self.get_ipsp( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.prepare_psp_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], - g_ampa=0, - g_gaba=g_gaba_max, + return ef.find_x_bound( + y=lambda X_val: self._simulator.get_ipsp( + pop_name=pop_name, + g_gaba=X_val, ), - alpha_abs=0.005, - X_increase=g_gaba_max / 10, - ) - - ### get f_0 and f_max - f_0 = self.get_rate( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.net_single_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - )[0] - f_max = f_0 + self.target_firing_rate_dict[pop_name] + 100 - - ### find I_max with f_0, and f_max using incremental_continuous_bound_search - self.log("search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0)") - I_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_rate( - net=self.net_single_dict[pop_name]["net"], - population=self.net_single_dict[pop_name]["population"], - variable_init_sampler=self.net_single_dict[pop_name][ - "variable_init_sampler" - ], - monitor=self.net_single_dict[pop_name]["monitor"], - I_app=X_val, - )[0], - y_bound=f_max, - X_0=0, - y_0=f_0, - alpha_abs=1, + x0=0, + y_bound=max_psp, + tolerance=0.005, ) - return [I_max, g_ampa_max, g_gaba_max] - - def incremental_continuous_bound_search( - self, - y_X, - y_bound, - X_0, - y_0, - alpha_rel=0.01, - alpha_abs=None, - n_it_max=100, - X_increase=1, - saturation_thresh=10, - saturation_warning=True, - accept_non_dicontinuity=False, - bound_type="equal", - ): + def _get_max_g_ampa(self, pop_name: str, g_gaba_max: float): """ - you have system X --> y - you want X for y=y_bound (either upper or lower bound) - if you increase X (from starting point) y gets closer to y_bound! - - expectes a continuous funciton without from P_0(X_0,y_0) to P_bound(X_bound, y_bound) - if it finds a saturation or non-continuous "step" on the way to P_bound it will return - the X_bound for the end of the continuous part from P_0 to P_bound --> y_bound will not - be reached + Find the maximal g_ampa for the given population. The maximal g_ampa should + override the maximal IPSP of g_gaba. Args: - y_X: function - returns a single number given a single number, call like y = y_X(X) - increasing X should bring y closer to y_bound - - y_bound: number - the bound for y for which an X_bound should be found - - X_0: number - start value of X, from where the search should start - - y_0: number - start value of y which results from X_0 - - alpha_rel: number, optional, default=0.001 - allowed relative tolerance for deviations of y from y_bound - if alpha_abs is given it overrides alpha_rel + pop_name (str): + Name of the population + g_gaba_max (float): + Maximal g_gaba - alpha_abs: number, optional, default=None - allowed absolute tolerance for deviations of y from y_bound - if alpha_abs is given it overrides alpha_rel - - n_it_max: number, optional, default=100 - maximum of iterations to find X_bound - - X_increase: number, optional, default=1 - the first increase of X (starting from X_0) to obtain the first new y_val - i.e. first calculation is: y_val = y_X(X_0+X_increase) - - saturation_thresh: number, optional, default=5 - if y does not change while increasing X by X_increase the search will stop - after this number of trials - - saturation_warning: bool, optional, default=True - if you want to get a warning when the saturation is reached during search - - accept_non_dicontinuity: bool, optional, default=False - if you do not want to search only in the first continuous search space - - bound_type: str, optional, default="equal" - equal, greater or less - equal: result should be near bound within tolerance - greater: result should be at least larger bound within tolerance - less: result should be smaller bound within tolerance - - return: - X_bound: - X value which causes y=y_bound + Returns: + g_ampa_max (float): + Maximal g_ampa """ - ### TODO catch difference to target goes up in both directions - ### then nothing new is predicted --> fails - - self.log( - f"find X_bound for: y_0(X_0={X_0})={y_0} --> y_bound(X_bound=??)={y_bound}" + ### find g_ampa max by "overriding" IPSP of g_gaba max + sf.Logger().log( + f"[{pop_name}]: search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" ) - ### get tolerance - tolerance = abs(y_bound - y_0) * alpha_rel - if not isinstance(alpha_abs, type(None)): - tolerance = alpha_abs - - ### define stop condition - if bound_type == "equal": - stop_condition = ( - lambda y_val, n_it: ( - ((y_bound - tolerance) <= y_val) - and (y_val <= (y_bound + tolerance)) - ) - or n_it >= n_it_max - ) - elif bound_type == "greater": - stop_condition = ( - lambda y_val, n_it: ( - ((y_bound - 0) <= y_val) and (y_val <= (y_bound + 2 * tolerance)) - ) - or n_it >= n_it_max - ) - elif bound_type == "less": - stop_condition = ( - lambda y_val, n_it: ( - ((y_bound - 2 * tolerance) <= y_val) and (y_val <= (y_bound + 0)) - ) - or n_it >= n_it_max + def func(x): + ipsp = self._simulator.get_ipsp( + pop_name=pop_name, + g_gaba=g_gaba_max, + g_ampa=x, ) - - ### check if y(X) is increasing - is_increasing = y_bound > y_0 - - ### search for X_val - X_list_predict = [X_0] - y_list_predict = [y_0] - X_list_all = [X_0] - y_list_all = [y_0] - n_it_first_round = 0 - X_val = X_0 + X_increase - y_val = y_0 - y_not_changed_counter = 0 - X_change_predicted = X_increase - while not stop_condition(y_val, n_it_first_round): - ### get y_val for X - y_val_pre = y_val - y_val = y_X(X_val) - y_change = y_val_pre - y_val - - ### store search history - X_list_all.append(X_val) - y_list_all.append(y_val) - - ### get next X_val depending on if y_val changed or not - if abs(y_change) > 0: - ### append X_val and y_val to y_list/X_list - y_list_predict.append(y_val) - X_list_predict.append(X_val) - ### predict new X_val using y_bound as predictor - X_val_pre = X_val - X_val = self.predict_1d( - X=y_list_predict, y=X_list_predict, X_pred=y_bound - )[0] - X_change_predicted = X_val - X_val_pre - ### now actually update X_val - X_val = X_val_pre + X_change_predicted * (1 + y_not_changed_counter / 2) - else: - ### just increase X_val - X_val = X_val + X_change_predicted * (1 + y_not_changed_counter / 2) - - ### check saturation of y_val - if abs(y_change) < tolerance: - ### increase saturation counter - ### saturation counter also increases updates of X_val - y_not_changed_counter += 1 + ### find_x_bound tries to increase x to increase y, therefore, use + ### -ipsp, since initially the ipsp is maximal, thus, y is negative and by + ### increasing x it should increase to 0 + y = -ipsp + ### next problem: find_x_bound expects a function which increases beyond the + ### bound but -ipsp can maximum reach 0, thus, use -ipsp + x + if y >= 0: + return y + x else: - ### reset saturation counter - y_not_changed_counter = 0 - - ### break if y_val saturated - if y_not_changed_counter >= saturation_thresh: - break + return y - ### increase iterator - n_it_first_round += 1 - - ### catch the initial point already satisified stop condition - if len(X_list_all) == 1: - warning_txt = "WARNING incremental_continuous_bound_search: search did not start because initial point already satisfied stop condition!" - self._p_w(warning_txt) - self.log(warning_txt) - return X_0 - - ### warning if search saturated - if (y_not_changed_counter >= saturation_thresh) and saturation_warning: - warning_txt = f"WARNING incremental_continuous_bound_search: search saturated at y={y_list_predict[-1]} while searching for X_val for y_bound={y_bound}" - self._p_w(warning_txt) - self.log(warning_txt) - self.log("initial search lists:") - self.log("all:") - self.log(np.array([X_list_all, y_list_all]).T) - self.log("predict:") - self.log(np.array([X_list_predict, y_list_predict]).T) - - ### if search saturated right at the begining --> search failed (i.e. y did not change while increasing X) - if (y_not_changed_counter >= saturation_thresh) and len(X_list_predict) == 1: - error_msg = "ERROR incremental_continuous_bound_search: search failed because changing X_val did not change y_val" - self.log(error_msg) - raise AssertionError(error_msg) - - ### get best X value for which y is closest to y_bound - idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) - X_bound = X_list_predict[idx_best] - - ### sort y_list_predict and corresponding X_list_predict - ### get value pair which is before bound and value pair which is behind bound - ### if this does not work... use previous X_0 and X_bound - sort_idx_arr = np.argsort(y_list_predict) - X_arr_predict_sort = np.array(X_list_predict)[sort_idx_arr] - y_arr_predict_sort = np.array(y_list_predict)[sort_idx_arr] - over_y_bound_arr = y_arr_predict_sort > y_bound - over_y_bound_changed_idx = np.where(np.diff(over_y_bound_arr))[0] - if len(over_y_bound_changed_idx) == 1: - if over_y_bound_changed_idx[0] < len(y_arr_predict_sort): - X_aside_change_list = [ - X_arr_predict_sort[over_y_bound_changed_idx[0]], - X_arr_predict_sort[over_y_bound_changed_idx[0] + 1], - ] - y_aside_change_list = [ - y_arr_predict_sort[over_y_bound_changed_idx[0]], - y_arr_predict_sort[over_y_bound_changed_idx[0] + 1], - ] - X_0 = min(X_aside_change_list) - X_bound = max(X_aside_change_list) - y_0 = min(y_aside_change_list) - self.log("predict sorted:") - self.log(np.array([X_arr_predict_sort, y_arr_predict_sort, over_y_bound_arr]).T) - self.log(over_y_bound_changed_idx) - - ### if y cannot get larger or smaller than y_bound one has to check if you not "overshoot" with X_bound - ### --> fine tune result by investigating the space between X_0 and X_bound and predict a new X_bound - self.log(f"X_0: {X_0}, X_bound:{X_bound} for final predict list") - X_space_arr = np.linspace(X_0, X_bound, 100) - y_val = y_0 - [-1, 1][int(is_increasing)] - X_list_predict = [] - y_list_predict = [] - X_list_all = [] - y_list_all = [] - did_break = False - n_it_second_round = 0 - for X_val in X_space_arr: - y_val_pre = y_val - y_val = y_X(X_val) - X_list_all.append(X_val) - y_list_all.append(y_val) - if y_val != y_val_pre: - ### if y_val changed - ### append X_val and y_val to y_list/X_list - y_list_predict.append(y_val) - X_list_predict.append(X_val) - ### if already over y_bound -> stop - if y_val > y_bound and is_increasing: - did_break = True - break - if y_val < y_bound and not is_increasing: - did_break = True - break - n_it_second_round += 1 - ### if did break early --> use again finer bounds - if did_break and n_it_second_round < 90: - X_space_arr = np.linspace( - X_list_predict[-2], X_list_predict[-1], 100 - n_it_second_round - ) - y_val = y_list_predict[-2] - for X_val in X_space_arr: - y_val_pre = y_val - y_val = y_X(X_val) - X_list_all.append(X_val) - y_list_all.append(y_val) - if y_val != y_val_pre: - ### if y_val changed - ### append X_val and y_val to y_list/X_list - y_list_predict.append(y_val) - X_list_predict.append(X_val) - ### if already over y_bound -> stop - if y_val > y_bound and is_increasing: - break - if y_val < y_bound and not is_increasing: - break - ### sort value lists - sort_idx_all_arr = np.argsort(X_list_all) - X_list_all = (np.array(X_list_all)[sort_idx_all_arr]).tolist() - y_list_all = (np.array(y_list_all)[sort_idx_all_arr]).tolist() - sort_idx_predict_arr = np.argsort(X_list_predict) - X_list_predict = (np.array(X_list_predict)[sort_idx_predict_arr]).tolist() - y_list_predict = (np.array(y_list_predict)[sort_idx_predict_arr]).tolist() - - ### log - self.log("final predict lists:") - self.log("all:") - self.log(np.array([X_list_all, y_list_all]).T) - self.log("predict:") - self.log(np.array([X_list_predict, y_list_predict]).T) - - ### check if there is a discontinuity in y_all, starting with the first used value in y_predict - ### update all values with first predict value - first_y_used_in_predict = y_list_predict[0] - idx_first_y_in_all = y_list_all.index(first_y_used_in_predict) - y_list_all = y_list_all[idx_first_y_in_all:] - ### get discontinuity - discontinuity_idx_list = self.get_discontinuity_idx_list(y_list_all) - self.log("discontinuity_idx_list") - self.log(f"{discontinuity_idx_list}") - if len(discontinuity_idx_list) > 0 and not accept_non_dicontinuity: - ### there is a discontinuity - discontinuity_idx = discontinuity_idx_list[0] - ### only use values until discontinuity - y_bound_new = y_list_all[discontinuity_idx] - idx_best = y_list_predict.index(y_bound_new) - X_val_best = X_list_predict[idx_best] - y_val_best = y_list_predict[idx_best] - ### print warning - warning_txt = f"WARNING incremental_continuous_bound_search: found discontinuity, only reached y={y_bound_new} while searching for y_bound={y_bound}" - self._p_w(warning_txt) - ### log - self.log(warning_txt) - self.log( - f"discontinuities detected --> only use last values until first discontinuity: X={X_val_best}, y={y_val_best}" - ) - else: - ### there is no discontinuity - ### there can still be duplicates in the y_list --> remove them - ### get arrays - X_arr_predict = np.array(X_list_predict) - y_arr_predict = np.array(y_list_predict) - ### get unique indices - _, unique_indices = np.unique(y_arr_predict, return_index=True) - ### get arrays without duplicates in y_list - X_arr_predict = X_arr_predict[unique_indices] - y_arr_predict = y_arr_predict[unique_indices] - - ### now predict final X_val using y_arr - X_val = self.predict_1d( - X=y_arr_predict, y=X_arr_predict, X_pred=y_bound, linear=False - )[0] - y_val = y_X(X_val) - - ### append it to lists - X_list_predict.append(X_val) - y_list_predict.append(y_val) - - ### find best - idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) - X_val_best = X_list_predict[idx_best] - y_val_best = y_list_predict[idx_best] - - ### log - self.log(f"final values: X={X_val_best}, y={y_val_best}") - - ### warning for max iteration search - if not (n_it_first_round < n_it_max): - warning_txt = f"WARNING incremental_continuous_bound_search: reached max iterations to find X_bound to get y_bound={y_bound}, found X_bound causes y={y_val_best}" - self._p_w(warning_txt) - self.log(warning_txt) - - return X_val_best - - def get_discontinuity_idx_list(self, arr): - """ - Args: - arr: array-like - array for which its checked if there are discontinuities - """ - arr = np.array(arr) - range_data = arr.max() - arr.min() - diff_arr = np.diff(arr) - diff_rel_range_arr = diff_arr / range_data - diff_rel_range_abs_arr = np.absolute(diff_rel_range_arr) - peaks = find_peaks( - diff_rel_range_abs_arr, prominence=10 * np.mean(diff_rel_range_abs_arr) + return ef.find_x_bound( + y=func, + x0=0, + y_bound=0, + tolerance=0.005, ) - peaks_idx_list = peaks[0] - - return peaks_idx_list - - def predict_1d(self, X, y, X_pred, linear=True): - """ - Args: - X: array-like - X values - - y: array-like - y values, same size as X_values - - X_pred: array-like or number - X value(s) for which new y value(s) are predicted - - linear: bool, optional, default=True - if interpolation is linear - return: - Y_pred_arr: array - predicted y values for X_pred + def _get_max_I_app(self, pop_name: str, target_firing_rate_dict: dict[str, float]): """ - if not linear: - if len(X) >= 4: - y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="cubic") - elif len(X) >= 3: - y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="quadratic") - else: - y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="linear") - y_pred_arr = y_X(X_pred) - return y_pred_arr.reshape(1) - - def get_rate_dict( - self, - net, - population_dict, - variable_init_sampler_dict, - monitor_dict, - I_app_dict, - g_ampa_dict, - g_gaba_dict, - ): - """ - function to obtain the firing rates of the populations of - the network given with 'idx' for given I_app, g_ampa and g_gaba values + Find the maximal current input for the given population. The maximal current + input should result in "resting" firing rate + target firing rate + 100 Hz. Args: - idx: int - network index given by the parallel_run function - - net: object - network object given by the parallel_run function - - net_many_dict: dict - dictionary containing a population_dict and a monitor_dict - which contain for each population name the - - ANNarchy Population object of the magic network - - ANNarchy Monitor object of the magic network - - I_app_arr_dict: dict of arrays - dictionary containing for each population the array with input values for I_app - - g_ampa_arr_dict: dict of arrays - dictionary containing for each population the array with input values for g_ampa - - g_gaba_arr_dict: dict of arrays - dictionary containing for each population the array with input values for g_gaba - - variable_init_sampler_dict: dict - dictionary containing for each population the initial variables sampler object - with the function.sample() to get initial values of the neurons + pop_name (str): + Name of the population + target_firing_rate_dict (dict[str, float]): + Target firing rate for each population - self: object - the model_configurator object - - return: - f_rec_arr_dict: dict of arrays - dictionary containing for each population the array with the firing rates for the given inputs - """ - ### reset and set init values - net.reset() - for pop_name, varaible_init_sampler in variable_init_sampler_dict.items(): - population = net.get(population_dict[pop_name]) - variable_init_arr = varaible_init_sampler.sample(len(population), seed=0) - for var_idx, var_name in enumerate(population.variables): - set_val = variable_init_arr[:, var_idx] - setattr(population, var_name, set_val) - - ### slow down conductances (i.e. make them constant) - for pop_name in population_dict.keys(): - population = net.get(population_dict[pop_name]) - population.tau_ampa = 1e20 - population.tau_gaba = 1e20 - ### apply given variables - for pop_name in population_dict.keys(): - population = net.get(population_dict[pop_name]) - population.I_app = I_app_dict[pop_name] - population.g_ampa = g_ampa_dict[pop_name] - population.g_gaba = g_gaba_dict[pop_name] - ### simulate 500 ms initial duration + X ms - net.simulate(500 + self.simulation_dur) - ### get rate for the last X ms - f_arr_dict = {} - for pop_name in population_dict.keys(): - population = net.get(population_dict[pop_name]) - monitor = net.get(monitor_dict[pop_name]) - spike_dict = monitor.get("spike") - f_arr = np.zeros(len(population)) - for idx_n, n in enumerate(spike_dict.keys()): - time_list = np.array(spike_dict[n]) - nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) - rate = nbr_spks / (self.simulation_dur / 1000) - f_arr[idx_n] = rate - f_arr_dict[pop_name] = f_arr - - return f_arr_dict - - def get_rate( - self, - net, - population, - variable_init_sampler, - monitor, - I_app=0, - g_ampa=0, - g_gaba=0, - ): + Returns: + I_app_max (float): + Maximal current input """ - simulates a population for X+500 ms and returns the firing rate of each neuron for the last X ms - X is defined with self.simulation_dur - - Args: - net: ANNarchy network - network which contains the population and monitor - - population: ANNarchy population - population which is recorded and stimulated - - variable_init_sampler: object - containing the initial values of the population neuron, use .sample() to get values - - monitor: ANNarchy monitor - to record spikes from population - - I_app: number or arr, optional, default = 0 - applied current to the population neurons, has to have the same size as the population + ### get f_0 and f_max + f_0 = self._simulator.get_firing_rate(pop_name=pop_name) + f_max = f_0 + target_firing_rate_dict[pop_name] + 100 - g_ampa: number or arr, optional, default = 0 - applied ampa conductance to the population neurons, has to have the same size as the population + ### find I_max with f_0, and f_max using find_x_bound + sf.Logger().log( + f"[{pop_name}]: search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0)" + ) + I_max = ef.find_x_bound( + y=lambda X_val: self._simulator.get_firing_rate( + pop_name=pop_name, + I_app=X_val, + ), + x0=0, + y_bound=f_max, + tolerance=1, + ) - g_gaba: number or arr, optional, default = 0 - applied gaba conductance to the population neurons, has to have the same size as the population - """ - ### reset and set init values - net.reset() - self.set_init_variables(population, variable_init_sampler) - ### slow down conductances (i.e. make them constant) - population.tau_ampa = 1e20 - population.tau_gaba = 1e20 - ### apply given variables - population.I_app = I_app - population.g_ampa = g_ampa - population.g_gaba = g_gaba - ### simulate 500 ms initial duration + X ms - net.simulate(500 + self.simulation_dur) - ### get rate for the last X ms - spike_dict = monitor.get("spike") - f_arr = np.zeros(len(population)) - for idx_n, n in enumerate(spike_dict.keys()): - time_list = np.array(spike_dict[n]) - nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) - rate = nbr_spks / (self.simulation_dur / 1000) - f_arr[idx_n] = rate + return I_max - return f_arr - def get_ipsp( +class GetWeights: + def __init__( self, - net: Network, - population: Population, - variable_init_sampler, - monitor, - I_app_hold, - g_ampa=0, - g_gaba=0, - do_plot=False, + model: CompNeuroModel, + do_not_config_list: list[str], + analyze_model: AnalyzeModel, + max_syn: GetMaxSyn.MaxSynGetter, ): - """ - simulates a single spike at t=50 ms and records the change of v within a voltage_clamp neuron - - Args: - net: ANNarchy network - network which contains the population and monitor - - population: ANNarchy population - population which is recorded and stimulated - - variable_init_sampler: object - containing the initial values of the population neuron, use .sample() to get values - - monitor: ANNarchy monitor - to record v_clamp_rec from population - - g_ampa: number, optional, default = 0 - applied ampa conductance to the population neuron at t=50 ms - - g_gaba: number, optional, default = 0 - applied gaba conductance to the population neurons at t=50 ms - """ - ### reset network and set initial values - net.reset() - self.set_init_variables(population, variable_init_sampler) - ### apply input - population.I_app = I_app_hold - ### simulate 50 ms initial duration - net.simulate(50) - ### apply given conductances --> changes v - v_rec_rest = population.v[0] - population.v_psp_thresh = v_rec_rest - population.g_ampa = g_ampa - population.g_gaba = g_gaba - ### simulate until v is near v_rec_rest again - net.simulate_until(max_duration=self.simulation_dur, population=population) - ### get the psp = maximum of difference of v_rec and v_rec_rest - v_rec = monitor.get("v")[:, 0] - spike_dict = monitor.get("spike") - spike_timestep_list = spike_dict[0] + [net.get_current_step()] - end_timestep = int(round(min(spike_timestep_list), 0)) - psp = float( - np.absolute(np.clip(v_rec[:end_timestep] - v_rec_rest, None, 0)).max() - ) - - if do_plot: - plt.figure() - plt.title( - f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" + self._model = model + self._do_not_config_list = do_not_config_list + self._analyze_model = analyze_model + self._max_syn = max_syn + ### initialize the weight_dict with the maximal weights + weight_dict_init = {} + for proj_name in model.projections: + post_pop_name = analyze_model.pre_post_pop_name_dict[proj_name][1] + target_type = analyze_model.proj_init_parameter_dict[proj_name]["target"] + if target_type == "ampa": + weight = self._max_syn.get(pop_name=post_pop_name).g_ampa + elif target_type == "gaba": + weight = self._max_syn.get(pop_name=post_pop_name).g_gaba + weight_dict_init[proj_name] = weight + ### first set the interal weight dict variable, then the property to calculate + ### syn_load_dict and syn_contribution_dict + self._weight_dict = weight_dict_init + self.weight_dict = weight_dict_init + + @property + def weight_dict(self): + return self._weight_dict + + @weight_dict.setter + def weight_dict(self, value: dict[str, float]): + ### check if the dictionary "value" has the same keys as the internal weight_dict + if set(value.keys()) != set(self._weight_dict.keys()): + raise ValueError( + f"The keys of the weight_dict must be: {set(self._weight_dict.keys())}" ) - plt.plot(v_rec) - plt.savefig( - f"tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" + ### if weight_dict is set, recalculate syn_load_dict and syn_contribution_dict + self._weight_dict = value + self._syn_load_dict = self._get_syn_load_dict() + self._syn_contribution_dict = self._get_syn_contribution_dict() + + @property + def syn_load_dict(self): + return self._syn_load_dict + + @syn_load_dict.setter + def syn_load_dict(self, value: dict[str, dict[str, float]]): + ### check if the dictionary "value" has the same structure as the internal + ### nested dict syn_load_dict + if set(value.keys()) != set(self._syn_load_dict.keys()): + raise ValueError( + f"The syn_load_dict must have this structure: {self._syn_load_dict}" ) - plt.close("all") + for pop_name in value.keys(): + if set(value[pop_name].keys()) != set(self._syn_load_dict[pop_name].keys()): + raise ValueError( + f"The syn_load_dict must have this structure: {self._syn_load_dict}" + ) + ### check if values are between 0 and 1 + for pop_name in value.keys(): + for target in value[pop_name].keys(): + if not 0 <= value[pop_name][target] <= 1: + raise ValueError( + "The values of the syn_load_dict must be between 0 and 1" + ) - return psp + ### if syn_load_dict is set, recalculate weight_dict + self._syn_load_dict = value + self._weight_dict = self._get_weight_dict() - def compile_net_many(self, net): - compile_in_folder( - folder_name=f"many_net_{net.id}", net=net, clean=True, silent=True - ) + @property + def syn_contribution_dict(self): + return self._syn_contribution_dict - def create_many_neuron_network(self): - """ - creates a ANNarchy magic network with all popualtions which should be configured the size - of the populations is equal and is obtianed by dividing the number of the - interpolation values by the number of networks which will be used during run_parallel - - return: - net_many_dict: dict - contains - - population_dict: for all population names the created population in the magic network - - monitor_dict: for all population names the created monitors in the magic network - """ - self.log("create many neurons network") - - ### for each population of the given model which should be configured - ### create a population with a given size - ### create a monitor recording spikes - ### create a network containing the population and the monitor - many_neuron_population_list = [] - many_neuron_monitor_list = [] - many_neuron_network_list = [] - for pop_name in self.pop_name_list: - ### create the neuron model with poisson spike trains - ### get the initial arguments of the neuron - neuron_model = self.neuron_model_dict[pop_name] - ### names of arguments - init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) - init_arguments_name_list.remove("self") - init_arguments_name_list.remove("name") - init_arguments_name_list.remove("description") - ### arguments dict - init_arguments_dict = { - init_arguments_name: getattr(neuron_model, init_arguments_name) - for init_arguments_name in init_arguments_name_list - } - ### get the afferent populations - afferent_population_list = [] - proj_target_type_list = [] - for proj_name in self.afferent_projection_dict[pop_name][ - "projection_names" - ]: - proj_dict = self.get_proj_dict(proj_name) - pre_pop_name = proj_dict["pre_pop_name"] - afferent_population_list.append(pre_pop_name) - proj_target_type_list.append(proj_dict["proj_target_type"]) - - ### for each afferent population create a binomial spike train equation string - ### add it to the equations - ### and add the related parameters to the parameters - - ### split the equations and parameters string - equations_line_split_list = str( - init_arguments_dict["equations"] - ).splitlines() - - parameters_line_split_list = str( - init_arguments_dict["parameters"] - ).splitlines() - - ### add the binomial spike train equations and parameters - ( - equations_line_split_list, - parameters_line_split_list, - ) = self.add_binomial_input( - equations_line_split_list, - parameters_line_split_list, - afferent_population_list, - proj_target_type_list, + @syn_contribution_dict.setter + def syn_contribution_dict(self, value: dict[str, dict[str, dict[str, float]]]): + ### check if the dictionary "value" has the same structure as the internal + ### nested dict syn_contribution_dict + if set(value.keys()) != set(self._syn_contribution_dict.keys()): + raise ValueError( + f"The syn_contribution_dict must have this structure: {self._syn_contribution_dict}" ) + for pop_name in value.keys(): + if set(value[pop_name].keys()) != set( + self._syn_contribution_dict[pop_name].keys() + ): + raise ValueError( + f"The syn_contribution_dict must have this structure: {self._syn_contribution_dict}" + ) + for target in value[pop_name].keys(): + if set(value[pop_name][target].keys()) != set( + self._syn_contribution_dict[pop_name][target].keys() + ): + raise ValueError( + f"The syn_contribution_dict must have this structure: {self._syn_contribution_dict}" + ) + ### check if values are between 0 and 1 + for pop_name in value.keys(): + for target in value[pop_name].keys(): + for proj_name in value[pop_name][target].keys(): + if not 0 <= value[pop_name][target][proj_name] <= 1: + raise ValueError( + "The values of the syn_contribution_dict must be between 0 and 1" + ) - ### combine string lines to multiline strings again - init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) - init_arguments_dict["equations"] = "\n".join(equations_line_split_list) - - ### create neuron model with new equations - neuron_model_new = Neuron(**init_arguments_dict) - - # print("new neuron model:") - # print(neuron_model_new) + ### if syn_contribution_dict is set, recalculate weight_dict + self._syn_contribution_dict = value + self._weight_dict = self._get_weight_dict() + + def _get_weight_dict(self): + ### set the weights population wise for the afferent projections + weight_dict = {} + ### loop over all populations + for pop_name in self._model.populations: + ### skip populations which should not be configured + if pop_name in self._do_not_config_list: + continue + synaptic_load = self._syn_load_dict[pop_name] + ### loop over target types + for target, load in synaptic_load.items(): + synaptic_contribution = self._syn_contribution_dict[pop_name][target] + ### loop over afferebt projections with target type + for proj_name in synaptic_contribution.keys(): + max_conductance = ( + self._max_syn.get(pop_name=pop_name).g_ampa + if target == "ampa" + else self._max_syn.get(pop_name=pop_name).g_gaba + ) + weight_dict[proj_name] = ( + load * synaptic_contribution[proj_name] * max_conductance + ) - ### create the many neuron population - my_pop = Population( - geometry=self.nr_neurons_per_net, - neuron=neuron_model_new, - name=f"many_neuron_{pop_name}", - ) + return weight_dict - ### set the attributes of the neurons - for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: - setattr(my_pop, attr_name, attr_val) + def _get_syn_load_dict(self): + syn_load_dict = {} + ### loop over populations + for pop_name in self._model.populations: + ### skip populations which should not be configured + if pop_name in self._do_not_config_list: + continue + syn_load_dict[pop_name] = {} + ### loop over target types + for target in ["ampa", "gaba"]: + ### get all afferent projections with target type + proj_name_list = self._get_afferent_proj_names( + pop_name=pop_name, target=target + ) + if len(proj_name_list) == 0: + continue + ### get the maximal weight of the afferent projections + max_weight = max( + [self._weight_dict[proj_name] for proj_name in proj_name_list] + ) + ### get the synaptic load + if target == "ampa": + syn_load_dict[pop_name][target] = ( + max_weight / self._max_syn.get(pop_name=pop_name).g_ampa + ) + elif target == "gaba": + syn_load_dict[pop_name][target] = ( + max_weight / self._max_syn.get(pop_name=pop_name).g_gaba + ) - ### create Monitor for many neuron - my_mon = Monitor(my_pop, ["spike"]) + return syn_load_dict + + def _get_syn_contribution_dict(self): + syn_contribution_dict = {} + ### loop over populations + for pop_name in self._model.populations: + ### skip populations which should not be configured + if pop_name in self._do_not_config_list: + continue + syn_contribution_dict[pop_name] = {} + ### loop over target types + for target in ["ampa", "gaba"]: + ### get all afferent projections with target type + proj_name_list = self._get_afferent_proj_names( + pop_name=pop_name, target=target + ) + if len(proj_name_list) == 0: + continue + ### get the synaptic contribution + syn_contribution_dict[pop_name][target] = {} + for proj_name in proj_name_list: + syn_contribution_dict[pop_name][target][ + proj_name + ] = self._weight_dict[proj_name] / max( + [self._weight_dict[proj_name] for proj_name in proj_name_list] + ) - ### create the network with population and monitor - my_net = Network() - my_net.add(my_pop) - my_net.add(my_mon) + return syn_contribution_dict - ### compile network - compile_in_folder(folder_name=f"many_neuron_{pop_name}", net=my_net) + ### synaptic load for each population between 0 and 1, determines the largest weight of incoming synapses, 1 means maximal conductance + ### to get synaptic load of a population/target, get all afferent projections of the population/target and take the maximal weight divided by the (global) maximal weight + ### synaptic contribution for each if a population has for a target type multiple afferent projections --> array with numbers for these projections + ### divide the array by tjhe max value --> e.g. result is [0.6,1.0] weights of the projections then are 0.6*max_weight and 1.0*max_weight, where max weight is determined by the synaptic load - ### append the lists - many_neuron_network_list.append(my_net) - many_neuron_population_list.append(my_net.get(my_pop)) - many_neuron_monitor_list.append(my_net.get(my_mon)) + def _get_afferent_proj_names(self, pop_name: str, target: str): + proj_name_list = [] + for proj_name in self._model.projections: + if ( + self._analyze_model.pre_post_pop_name_dict[proj_name][1] == pop_name + and self._analyze_model.proj_init_parameter_dict[proj_name]["target"] + == target + ): + proj_name_list.append(proj_name) - net_many_dict = { - "network_list": many_neuron_network_list, - "population_list": many_neuron_population_list, - "monitor_list": many_neuron_monitor_list, - } - return net_many_dict + return proj_name_list - def add_binomial_input( - self, - equations_line_split_list, - parameters_line_split_list, - afferent_population_list, - proj_target_type_list, - ): - ### loop over afferent populations to add the new equation lines and parameters - for pre_pop_name in afferent_population_list: - ### define the spike train of a pre population as a binomial process with number of trials = number of pre neurons and success probability = spike probability (taken from Poisson neurons) - ### the obtained value is the number of spikes at a time step times the weight - poisson_equation_str = f"{pre_pop_name}_spike_train = Binomial({pre_pop_name}_size, {pre_pop_name}_spike_prob)" - ### add the equation line - equations_line_split_list.insert(1, poisson_equation_str) - ### add the parameters - parameters_line_split_list.append(f"{pre_pop_name}_size = 0 : population") - parameters_line_split_list.append( - f"{pre_pop_name}_spike_prob = 0 : population" - ) - parameters_line_split_list.append(f"{pre_pop_name}_weight = 0 : population") - - ### change the g_ampa and g_gaba line, they additionally are the sum of the spike trains - for equation_line_idx, equation_line in enumerate(equations_line_split_list): - ### remove whitespaces - line = equation_line.replace(" ", "") - ### check if line contains g_ampa - if "dg_ampa/dt" in line: - ### get the right side of the equation - line_right = line.split("=")[1] - line_left = line.split("=")[0] - ### remove and store tags_str - tags_str = "" - if len(line_right.split(":")) > 1: - line_right, tags_str = line_right.split(":") - ### get the populations whose spike train should be appended in g_ampa - afferent_population_to_append_list = [] - for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): - if proj_target_type_list[pre_pop_idx] == "ampa": - afferent_population_to_append_list.append(pre_pop_name) - if len(afferent_population_to_append_list) > 0: - ### change right side, add the sum of the spike trains - line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" - ### add tags_str again - if tags_str != "": - line_right = f"{line_right}:{tags_str}" - ### combine line again and replace the list entry in equations_line_split_list - line = f"{line_left}={line_right}" - equations_line_split_list[equation_line_idx] = line - - ### check if line contains g_gaba - if "dg_gaba/dt" in line: - ### get the right side of the equation - line_right = line.split("=")[1] - line_left = line.split("=")[0] - ### remove and store tags_str - tags_str = "" - if len(line_right.split(":")) > 1: - line_right, tags_str = line_right.split(":") - ### get the populations whose spike train should be appended in g_ampa - afferent_population_to_append_list = [] - for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): - if proj_target_type_list[pre_pop_idx] == "gaba": - afferent_population_to_append_list.append(pre_pop_name) - if len(afferent_population_to_append_list) > 0: - ### change right side, add the sum of the spike trains - line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" - ### add tags_str again - if tags_str != "": - line_right = f"{line_right}:{tags_str}" - ### combine line again and replace the list entry in equations_line_split_list - line = f"{line_left}={line_right}" - equations_line_split_list[equation_line_idx] = line - - return (equations_line_split_list, parameters_line_split_list) - def get_v_clamp_2000( - self, - net: Network, - population, - monitor=None, - v=None, - I_app=None, - variable_init_sampler=None, - pre_pop_name_list=[], - eff_size_list=[], - rate_list=[], - weight_list=[], - return_1000=False, - ): - """ - the returned values is dv/dt - --> to get the hypothetical change of v for a single time step multiply with dt! - """ - ### reset network and set initial values - net.reset() - net.set_seed(0) - if not isinstance(variable_init_sampler, type(None)): - self.set_init_variables(population, variable_init_sampler) - ### set v and I_app - if not isinstance(v, type(None)): - population.v = v - if not isinstance(I_app, type(None)): - population.I_app = I_app - ### set the weights and rates of the binomial spike trains of the afferent populations - for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): - setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) - setattr( - population, - f"{pre_pop_name}_spike_prob", - (rate_list[pre_pop_idx] / 1000) * dt(), - ) - setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) - ### simulate 2000 ms - net.simulate(2000) +class CreateVoltageClampEquations: + """ + Class to create voltage clamp equations from the given equations of a neuron model. + The equations of the neuron model have to contain the voltage change equation in the + form of ... dv/dt ... = ... - if return_1000: - v_clamp_rec_arr = monitor.get("v_clamp_rec_sign")[:, 0] - return np.mean(v_clamp_rec_arr[-int(round(1000 / dt(), 0)) :]) - return population.v_clamp_rec[0] + Attributes: + eq_new (list[str]) + new equations of the neuron model with the voltage clamp + """ - def get_voltage_clamp_equations(self, init_arguments_dict, pop_name): + def __init__(self, eq: list[str], neuron_model_attributes_name_list: list[str]): """ - works with - dv/dt = ... - v += ... + Args: + eq (list[str]) + equations of the neuron model + neuron_model_attributes_name_list (list[str]) + list of the names of the attributes of the neuron model """ ### get the dv/dt equation from equations - ### find the line with dv/dt= or v+= or v= - eq = str(init_arguments_dict["equations"]) - eq = eq.splitlines() - line_is_v_list = [False] * len(eq) - ### check in which lines v is defined - for line_idx, line in enumerate(eq): - line_is_v_list[line_idx] = self.get_line_is_v(line) - ### raise error if no v or multiple times v - if True not in line_is_v_list or sum(line_is_v_list) > 1: - raise ValueError( - f"ERROR model_configurator create_net_single_voltage_clamp: In the equations of the neurons has to be exactly a single line which defines dv/dt or v, not given for population {pop_name}" - ) - ### set the v equation - eq_v = eq[line_is_v_list.index(True)] - - ### if equation type is v += ... --> just take right side - if "+=" in eq_v: - ### create the new equations for the ANNarchy neuron - ### create two lines, the voltage clamp line v+=0 and the - ### right sight of v+=... separately - eq_new_0 = f"v_clamp_rec_sign = {eq_v.split('+=')[1]}" - eq_new_1 = f"v_clamp_rec = fabs({eq_v.split('+=')[1]})" - eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" - eq_new_3 = "v+=0" - ### remove old v line and insert new lines - del eq[line_is_v_list.index(True)] - eq.insert(line_is_v_list.index(True), eq_new_0) - eq.insert(line_is_v_list.index(True), eq_new_1) - eq.insert(line_is_v_list.index(True), eq_new_2) - eq.insert(line_is_v_list.index(True), eq_new_3) - eq = "\n".join(eq) - ### return new neuron equations - return eq - - ### if equation type is dv/dt = ... --> get the right side of dv/dt=... - ### transform eq_v - ### remove whitespaces - ### remove tags and store them for later - ### TODO replace random distributions and mathematical expressions which may be on the left side - eq_v = eq_v.replace(" ", "") - eq_v = eq_v.replace("dv/dt", "delta_v") - eq_tags_list = eq_v.split(":") - eq_v = eq_tags_list[0] - if len(eq_tags_list) > 1: - tags = eq_tags_list[1] - else: - tags = None + eq_v, eq_v_idx = self.get_eq_v(eq=eq) - ### split the equation at "=" and move everything on one side (other side = 0) - eq_v_splitted = eq_v.split("=") - left_side = eq_v_splitted[0] - right_side = "right_side" - eq_v_one_side = f"{right_side}-({left_side})" - - ### prepare the sympy equation generation - attributes_name_list = self.neuron_model_attributes_dict[pop_name] - attributes_tuple = symbols(",".join(attributes_name_list)) - ### for each attribute of the neuron a sympy symbol - attributes_sympy_dict = { - key: attributes_tuple[attributes_name_list.index(key)] - for key in attributes_name_list - } - ### furhter create symbols for delta_v and right_side - attributes_sympy_dict["delta_v"] = Symbol("delta_v") - attributes_sympy_dict["right_side"] = Symbol("right_side") - - ### now replace the symbolds in the eq_v string with the dictionary items - eq_v_replaced = replace_names_with_dict( - expression=eq_v_one_side, - name_of_dict="attributes_sympy_dict", - dictionary=attributes_sympy_dict, - ) + ### prepare the equation string for solving + ### TODO replace random distributions and mathematical expressions which may be on the left side + eq_v, tags = self.prepare_eq_v(eq_v=eq_v) - ### from this string get the sympy equation expression - eq_sympy = eval(eq_v_replaced) + ### solve equation to delta_v (which is dv/dt) + result = self.solve_delta_v(eq_v, neuron_model_attributes_name_list) - ### solve the equation to delta_v - result = solve(eq_sympy, attributes_sympy_dict["delta_v"], dict=True) - if len(result) != 1: - raise ValueError( - f"ERROR model_configurator create_net_single_voltage_clamp: Could not find solution for dv/dt for neuronmodel of population {pop_name}!" - ) - result = str(result[0][attributes_sympy_dict["delta_v"]]) + ### insert the new equation lines for v_clamp and remove the old dv/dt line + self.eq_new = self.replace_delta_v( + result=result, eq=eq, eq_v_idx=eq_v_idx, tags=tags + ) - ### replace right_side by the actual right side - result = result.replace("right_side", f"({eq_v_splitted[1]})") + def replace_delta_v( + self, result: str, eq: list[str], eq_v_idx: int, tags: str = None + ): + """ + Replace the dv/dt line with the voltage clamp lines. - ### TODO replace mathematical expressions and random distributions back to previous + Args: + result (str) + right side of the dv/dt equation + eq (list[str]) + equations of the neuron model + eq_v_idx (int) + index of the dv/dt line + tags (str) + tags of the dv/dt line - ### now create the new equations for the ANNarchy neuron - ### create three lines, the voltage clamp line "dv/dt=0", - ### the obtained line which would be the right side of dv/dt, - ### and this right side sotred from the previous time step - ### v_clamp_rec should be an absolute value + Returns: + eq (list[str]) + new equations of the neuron model with the voltage clamp + """ + ### create the line for recording voltage clamp (right side of dv/dt) eq_new_0 = f"v_clamp_rec_sign = {result}" + ### create the line for the absolute voltage clamp eq_new_1 = f"v_clamp_rec = fabs({result})" + ### create the line for the absolute voltage clamp from the previous time step eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" - ### add stored tags to new dv/dt equation + ### create the voltage clamp line "dv/dt=0" with tags if they exist if not isinstance(tags, type(None)): eq_new_3 = f"dv/dt=0 : {tags}" else: eq_new_3 = "dv/dt=0" - ### remove old v line and insert new three lines - del eq[line_is_v_list.index(True)] - eq.insert(line_is_v_list.index(True), eq_new_0) - eq.insert(line_is_v_list.index(True), eq_new_1) - eq.insert(line_is_v_list.index(True), eq_new_2) - eq.insert(line_is_v_list.index(True), eq_new_3) - eq = "\n".join(eq) + ### remove old v line and insert new three lines, order is important + del eq[eq_v_idx] + eq.insert(eq_v_idx, eq_new_0) + eq.insert(eq_v_idx, eq_new_1) + eq.insert(eq_v_idx, eq_new_2) + eq.insert(eq_v_idx, eq_new_3) ### return new neuron equations return eq def get_line_is_v(self, line: str): """ - check if a equation string contains dv/dt or v= or v+= + Check if the line contains the definition of dv/dt. + + Args: + line (str) + line of the equations of the neuron model + + Returns: + line_is_v (bool) + True if the line contains the definition of dv/dt, False otherwise """ if "v" not in line: return False @@ -3364,537 +3056,103 @@ def get_line_is_v(self, line: str): if "dv/dt" in line: return True - ### check for v update - if ("v=" in line or "v+=" in line) and line.startswith("v"): - return True - return False - def get_line_is_g_ampa(self, line: str): - """ - check if a equation string contains dg_ampa/dt + def get_eq_v(self, eq: list[str]): """ - - ### remove whitespaces - line = line.replace(" ", "") - - ### check for dv/dt - if "dv/dt" in line: - return True - - ### check for v update - if ("v=" in line or "v+=" in line) and line.startswith("v"): - return True - - return False - - def get_init_neuron_variables_for_psp(self, net, pop, v_rest, I_app_hold): - """ - get the variables of the given population after simulating 2000 ms + Get the dv/dt equation from the equations of the neuron model. Args: - net: ANNarchy network - the network which contains the pop - - pop: ANNarchy population - the population whose variables are obtained + eq (list[str]) + equations of the neuron model + Returns: + eq_v (str) + dv/dt equation + eq_v_idx (int) + index of the dv/dt line """ - ### reset neuron and deactivate input and set v_rest - net.reset() - pop.v = v_rest - pop.I_app = I_app_hold - - ### get the variables of the neuron after 5000 ms - net.simulate(5000) - var_name_list = list(pop.variables) - var_arr = np.zeros((1, len(var_name_list))) - get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) - var_arr[0, :] = get_arr[:, 0] - - ### create a sampler with the one data sample - sampler = self.var_arr_sampler(var_arr, var_name_list) - return sampler - - class var_arr_sampler: - def __init__(self, var_arr, var_name_list) -> None: - self.var_arr_shape = var_arr.shape - self.is_const = ( - np.std(var_arr, axis=0) <= np.mean(np.absolute(var_arr), axis=0) / 1000 + ### get the dv/dt equation from equations + ### find the line with dv/dt= or v+= or v= + line_is_v_list = [False] * len(eq) + ### check in which lines v is defined + for line_idx, line in enumerate(eq): + line_is_v_list[line_idx] = self.get_line_is_v(line) + ### raise error if no v or multiple times v + if True not in line_is_v_list or sum(line_is_v_list) > 1: + raise ValueError( + "In the equations of the neurons has to be exactly a single line which defines dv/dt!" ) - self.constant_arr = var_arr[0, self.is_const] - self.not_constant_val_arr = var_arr[:, np.logical_not(self.is_const)] - self.var_name_list = var_name_list - - def sample(self, n=1, seed=0): - """ - Args: - n: int, optional, default=1 - number of samples - - seed: int, optional, default=0 - seed for rng - """ - ### get random idx - rng = np.random.default_rng(seed=seed) - random_idx_arr = rng.integers(low=0, high=self.var_arr_shape[0], size=n) - ### sample with random idx - sample_arr = self.not_constant_val_arr[random_idx_arr] - ### create return array - ret_arr = np.zeros((n,) + self.var_arr_shape[1:]) - ### add samples to return array - ret_arr[:, np.logical_not(self.is_const)] = sample_arr - ### add constant values to return array - ret_arr[:, self.is_const] = self.constant_arr - - return ret_arr - - def get_nr_many_neurons(self, nr_neurons, nr_networks): - """ - Splits the number of neurons in almost equally sized parts. - - Args: - nr_neurons: int - number of neurons which should be splitted - - nr_networks: int - number of networks over which the neurons should be equally distributed - """ - return self.divide_almost_equal(number=nr_neurons, num_parts=nr_networks) - - def get_max_weight_dict_for_pop(self, pop_name): - """ - get the weight dict for a single population - - Args: - pop_name: str - population name - - return: dict - keys = afferent projection names, values = max weights - """ - - ### loop over afferent projections - max_w_list = [] - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - ### find max weight for projection - max_weight_of_proj = self.get_max_weight_of_proj(proj_name=proj_name) - max_w_list.append(max_weight_of_proj) - self.afferent_projection_dict[pop_name]["max_weight"] = max_w_list - - ### remove weight key from self.afferent_projection_dict[pop_name] which was added during the process - self.afferent_projection_dict[pop_name].pop("weights") - - ### now create the dictionary structure for return - # { - # "ampa": {"projection_name": "max_weight value"...}, - # "gaba": {"projection_name": "max_weight value"...}, - # } - max_weight_dict_for_pop = {"ampa": {}, "gaba": {}} - ### loop over all afferent projections - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - proj_dict = self.get_proj_dict(proj_name) - proj_target_type = proj_dict["proj_target_type"] - proj_max_weight = proj_dict["proj_max_weight"] - ### add max weight of projection to the corresponding target type in the return dict - max_weight_dict_for_pop[proj_target_type][proj_name] = proj_max_weight - - return max_weight_dict_for_pop - - def get_proj_dict(self, proj_name): - """ - get a dictionary for a specified projection which contains following information: - post_pop_name - proj_target_type - idx_proj - spike_frequency - proj_weight - g_max - - Args: - proj_name: str - projection name - - return: - proj_dict: dict - keys see above - """ - ### get pre_pop_name - pre_pop_name = self.pre_pop_name_dict[proj_name] - ### get pre_pop_name - pre_pop_size = self.pre_pop_size_dict[proj_name] - ### get post_pop_name - post_pop_name = self.post_pop_name_dict[proj_name] - ### get idx_proj and proj_target_type - idx_proj = self.afferent_projection_dict[post_pop_name][ - "projection_names" - ].index(proj_name) - proj_target_type = self.afferent_projection_dict[post_pop_name]["target"][ - idx_proj - ] - ### get spike frequency - f_t = self.afferent_projection_dict[post_pop_name]["target firing rate"][ - idx_proj - ] - p = self.afferent_projection_dict[post_pop_name]["probability"][idx_proj] - s = self.afferent_projection_dict[post_pop_name]["size"][idx_proj] - spike_frequency = f_t * p * s - ### get weight - try: - proj_weight = self.afferent_projection_dict[post_pop_name]["weights"][ - idx_proj - ] - except: - proj_weight = None - ### g_max - try: - g_max = self.g_max_dict[post_pop_name][proj_target_type] - except: - g_max = None - ### get max weight - try: - proj_max_weight = self.afferent_projection_dict[post_pop_name][ - "max_weight" - ][idx_proj] - except: - proj_max_weight = None - - return { - "pre_pop_name": pre_pop_name, - "pre_pop_size": pre_pop_size, - "post_pop_name": post_pop_name, - "proj_target_type": proj_target_type, - "idx_proj": idx_proj, - "spike_frequency": spike_frequency, - "proj_weight": proj_weight, - "g_max": g_max, - "proj_max_weight": proj_max_weight, - "proj_prob": p, - } - - def get_max_weight_of_proj(self, proj_name): - """ - find the max weight of a specified projection using incremental_continuous_bound_search - increasing weights of projection increases conductance g of projection --> increase - until g_max is found - - Args: - proj_name: str - projection name - - return: - w_max: number - """ - ### log task - self.log(f"get w_max for {proj_name}") - - ### g_max for projection - proj_dict = self.get_proj_dict(proj_name) - g_max = proj_dict["g_max"] - - ### find max weight with incremental_continuous_bound_search - ### increase weights until g_max is reached - self.log("search w_max with y(X) = g(w=X)") - w_max = self.incremental_continuous_bound_search( - y_X=lambda X_val: self.get_g_of_single_proj( - weight=X_val, - proj_name=proj_name, - ), - y_bound=g_max, - X_0=0, - y_0=0, - ) - - return w_max + ### get the index of the line with dv/dt + eq_v_idx = line_is_v_list.index(True) + ### set the v equation + eq_v = eq.copy()[eq_v_idx] + return eq_v, eq_v_idx - def get_g_of_single_proj(self, weight, proj_name): + def prepare_eq_v(self, eq_v: str): """ - given a weight for a specified projection get the resulting conductance value g - in the target population + Prepare the equation string for solving with sympy. Args: - weight: number - the weight of the projection - - proj_name: str - projection name - - return: - g_val: number - """ - ### get some projection infos - proj_dict = self.get_proj_dict(proj_name) - pop_name = proj_dict["post_pop_name"] - idx_proj = proj_dict["idx_proj"] - proj_target_type = proj_dict["proj_target_type"] - - ### set weights in the afferent_projection_dict - ### set all weights to zero except the weight of the current proj which is set to the given weight - weight_list = [0] * self.nr_afferent_proj_dict[pop_name] - weight_list[idx_proj] = weight - self.afferent_projection_dict[pop_name]["weights"] = weight_list - - ### get the g_ampa and g_gaba values based on the current afferent_projection_dict weights - mean_g = self.get_g_values_of_pop(pop_name) - - ### then return the conductance related to the specified projection - return mean_g[proj_target_type] + eq_v (str) + dv/dt equation - def get_g_values_of_pop(self, pop_name): + Returns: + eq_v (str) + dv/dt equation + tags (str) + tags of the dv/dt equation """ - calculate the average g_ampa and g_gaba values of the specified population based on the weights - defined in the afferent_projection_dict - - Args: - pop_name: str - population name - """ - spike_times_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} - spike_weights_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} - ### loop over afferent projections - for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: - ### get projection infos - proj_dict = self.get_proj_dict(proj_name) - proj_weight = proj_dict["proj_weight"] - proj_target_type = proj_dict["proj_target_type"] - spike_frequency = proj_dict["spike_frequency"] - ### get spike times over the simulation duration for the spike frequency - if spike_frequency > 0: - spike_times_arr = self.get_spike_times_arr( - spike_frequency=spike_frequency - ) - else: - spike_times_arr = np.array([]) - ### get weights array - spike_weights_arr = np.ones(len(spike_times_arr)) * proj_weight - ### store spike times and weights for the target type of the projection - spike_times_dict[proj_target_type].append(spike_times_arr) - spike_weights_dict[proj_target_type].append(spike_weights_arr) - - mean_g = {} - for target_type in ["ampa", "gaba"]: - ### concatenate spike times and corresponding weights of different afferent projections - spike_times_arr = np.concatenate(spike_times_dict[target_type]) - spike_weights_arr = np.concatenate(spike_weights_dict[target_type]) - - ### sort the spike times and corresponding weights - sort_idx = np.argsort(spike_times_arr) - spike_times_arr = spike_times_arr[sort_idx] - spike_weights_arr = spike_weights_arr[sort_idx] - - ### calculate mean g values from the spike times and corresponding weights - mean_g[target_type] = self.get_mean_g( - spike_times_arr=spike_times_arr, - spike_weights_arr=spike_weights_arr, - tau=self.tau_dict[pop_name][target_type], - ) - - return mean_g + ### remove whitespaces + eq_v = eq_v.replace(" ", "") + ### replace dv/dt by delta_v + eq_v = eq_v.replace("dv/dt", "delta_v") + ### separate equation and tags + eq_tags_list = eq_v.split(":") + eq_v = eq_tags_list[0] + if len(eq_tags_list) > 1: + tags = eq_tags_list[1] + else: + tags = None + return eq_v, tags - def get_spike_times_arr(self, spike_frequency): + def solve_delta_v(self, eq_v: str, neuron_model_attributes_name_list: list[str]): """ - get spike times for a given spike frequency + Solve the dv/dt equation for delta_v (which is dv/dt). Args: - spike_frequency: number - spike frequency in Hz - """ - expected_nr_spikes = int( - round((500 + self.simulation_dur) * (spike_frequency / 1000), 0) - ) - ### isi_arr in timesteps - isi_arr = poisson.rvs( - (1 / (spike_frequency * (dt() / 1000))), size=expected_nr_spikes - ) - ### convert to ms - isi_arr = isi_arr * dt() + eq_v (str) + dv/dt equation + neuron_model_attributes_name_list (list[str]) + list of the names of the attributes of the neuron model - ### get spike times from isi_arr - spike_times_arr = np.cumsum(isi_arr) - - ### only use spikes which are in the simulation time - spike_times_arr = spike_times_arr[spike_times_arr < (self.simulation_dur + 500)] - - return spike_times_arr - - def get_mean_g(self, spike_times_arr, spike_weights_arr, tau): + Returns: + solution_str (str) + right side of the dv/dt equation """ - calculates the mean conductance g for given spike times, corresponding weights (increases of g) and time constant - - Args: - spike_times_arr: arr - 1d array containing spike times in ms - - spike_weights_arr: arr - 1d array containing the weights corresponding to the spike times - - tau: number - time constant of the exponential decay of the conductance g in ms - """ - ### TODO instead of calculating the mean, create a conductance trace for the simulation time - if np.sum(spike_weights_arr) > 0: - ### get inter spike interval array - isis_g_arr = np.diff(spike_times_arr) - ### calc mean g - mean_w = np.mean(spike_weights_arr) - mean_isi = np.mean(isis_g_arr) - mean_g = mean_w / ((1 / np.exp(-mean_isi / tau)) - 1) - else: - mean_g = 0 - - return mean_g - - -def get_rate_parallel( - idx, - net, - population: Population, - variable_init_sampler, - monitor: Monitor, - I_app_arr, - weight_list: list, - pre_pop_name_list: list, - rate_list: list, - eff_size_list: list, - simulation_dur: int, -): - """ - function to obtain the firing rates of the populations of - the network given with 'idx' for given I_app, g_ampa and g_gaba values - - Args: - idx: int - network index given by the parallel_run function - - net: object - network object given by the parallel_run function - - pop_name_list: list of str - list with population names of network - - population_list: list of ANNarchy Population object - list of population objets of magic network - - variable_init_sampler_list: list of sampler objects - for each population a sampler object with function .sample to get initial variable values - - monitor_list: list of ANNarchy Monitor objects - list of monitor objets of magic network recording spikes from the populations - - I_app_list: list of arrays - list containing for each population the array with input values for I_app - - g_ampa_list: list of arrays - list containing for each population the array with input values for g_ampa - - g_gaba_list: list of arrays - list containing for each population the array with input values for g_gaba - - simulation_dur: int - simulation duration - - return: - f_rec_arr_list: list of arrays - list containing for each population the array with the firing rates for the given inputs - """ - ### reset and set init values - net.reset() - ### sample init values, one could sample different values for multiple neurons - ### but here we sample a single sample and use it for all neurons - variable_init_arr = variable_init_sampler.sample(1, seed=0) - var_name_list = variable_init_sampler.var_name_list - variable_init_arr = np.array([variable_init_arr[0]] * len(population)) - for var_name in enumerate(population.variables): - if var_name in var_name_list: - set_val = variable_init_arr[:, var_name_list.index(var_name)] - setattr(population, var_name, set_val) - - ### set the weights and rates of the poisson spike traces of the afferent populations - for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): - setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) - setattr( - population, - f"{pre_pop_name}_spike_prob", - (rate_list[pre_pop_idx] / 1000) * dt(), - ) - setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) - - ### set the I_app - population.I_app = I_app_arr - - ### simulate 500 ms initial duration + X ms - if "stn" in population.name and False: - net.simulate(500) - time_arr = np.arange(500, 500 + simulation_dur, dt()) - cor_spike_train_list = [] - gpe_spike_train_list = [] - g_ampa_list = [] - g_gaba_list = [] - I_app_list = [] - for time_ms in time_arr: - net.simulate(dt()) - if "cor_spike_train" in population.attributes: - cor_spike_train_list.append(population.cor_spike_train[0]) - else: - cor_spike_train_list.append(0) - if "gpe_spike_train" in population.attributes: - gpe_spike_train_list.append(population.gpe_spike_train[0]) - else: - gpe_spike_train_list.append(0) - g_ampa_list.append(population.g_ampa[0]) - g_gaba_list.append(population.g_gaba[0]) - I_app_list.append(population.I_app[0]) - plt.figure(figsize=(6.4, 4.8 * 2)) - plt.subplot(211) - plt.ylabel("g_ampa") - plt.plot(time_arr, g_ampa_list, "k.") - plt.subplot(212) - plt.ylabel("g_gaba") - plt.plot(time_arr, g_gaba_list, "k.") - plt.tight_layout() - plt.savefig("stn_input_configurator.png", dpi=300) - plt.close("all") - else: - net.simulate(500 + simulation_dur) - - ### get rate for the last X ms - spike_dict = monitor.get("spike") - f_arr = np.zeros(len(population)) - for idx_n, n in enumerate(spike_dict.keys()): - time_list = np.array(spike_dict[n]) - nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) - rate = nbr_spks / (simulation_dur / 1000) - f_arr[idx_n] = rate - return f_arr - - -_p_g_1 = """First call get_max_syn. -This determines max synaptic conductances and weights of all afferent projections of the model populations and returns a dictionary with max weights.""" - -_p_g_after_get_weights = ( - lambda template_weight_dict, template_synaptic_load_dict, template_synaptic_contribution_dict: f"""Now either set the weights of all projections directly or first set the synaptic load of the populations and the synaptic contributions of the afferent projections. -You can set the weights using the function .set_weights() which requires a weight_dict as argument. -Use this template for the weight_dict: + ### Define the attributes of the neuron model as sympy symbols + sp.symbols(",".join(neuron_model_attributes_name_list)) + ### Define delta_v and right_side as sympy symbols + delta_v, _ = sp.symbols("delta_v right_side") -{template_weight_dict} + ### Parse the equation string + lhs, rhs_string = eq_v.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs_string) -The values within the template are the maximum weight values. + ### Form the equation + equation = sp.Eq(lhs, rhs) + ### Solve the equation for delta_v + try: + solution = sp.solve(equation, delta_v)[0] + except: + raise ValueError("Could not find solution for dv/dt!") -You can set the synaptic load and contribution using the function .set_syn_load() which requires a synaptic_load_dict or a single number between 0 and 1 for the synaptic load of the populations and a synaptic_contribution_dict for the synaptic contributions to the synaptic load of the afferent projections. -Use this template for the synaptic_load_dict: - -{template_synaptic_load_dict} - -'ampa_load' and 'gaba_load' are placeholders, replace them with values between 0 and 1. - -Use this template for the synaptic_contribution_dict: - -{template_synaptic_contribution_dict} + ### Get the solution as a string + solution_str = str(solution) -The shown contributions of the afferent projections are based on the assumption that the maximum weights are used. The contributions of all afferent projections of a single population have to sum up to 1! -""" -) + ### replace right_side by the actual right side string + solution_str = solution_str.replace("right_side", f"({rhs_string})") -_p_g_after_set_syn_load = """Synaptic loads and contributions, i.e. weights set. Now call .get_base to obtain the baseline currents for the model populations. With .set_base you can directly set these baselines and the current weights in the model and compile the model. -""" + return solution_str diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp_old.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp_old.py new file mode 100644 index 0000000..4d472a1 --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_cnp_old.py @@ -0,0 +1,4020 @@ +from CompNeuroPy import ( + cnp_clear, + compile_in_folder, + data_obj, + evaluate_expression_with_dict, + timing_decorator, + print_df, + save_variables, + load_variables, + clear_dir, + CompNeuroModel, + CompNeuroMonitors, + PlotRecordings, +) +from CompNeuroPy.system_functions import _find_folder_with_prefix +from CompNeuroPy.neuron_models import poisson_neuron +from ANNarchy import ( + Population, + get_population, + Monitor, + Network, + get_projection, + dt, + parallel_run, + simulate, + reset, + Neuron, + simulate_until, + Uniform, + get_current_step, + projections, + populations, +) + +# from ANNarchy.core.Global import _network +import numpy as np +from scipy.interpolate import interp1d, interpn +from scipy.signal import find_peaks, argrelmin +import matplotlib.pyplot as plt +import inspect +import textwrap +import os +import itertools +from tqdm import tqdm +import multiprocessing +import importlib.util +from time import time, strftime +import datetime +from sympy import symbols, Symbol, sympify, solve +from hyperopt import fmin, tpe, hp +import pandas as pd +from scipy.stats import poisson +from ANNarchy.extensions.bold import BoldMonitor +from sklearn.linear_model import LinearRegression +from CompNeuroPy.examples.model_configurator.reduce_model import _CreateReducedModel + + +class model_configurator: + def __init__( + self, + model: CompNeuroModel, + target_firing_rate_dict, + interpolation_grid_points=10, + max_psp=10, + do_not_config_list=[], + print_guide=False, + I_app_variable="I_app", + ) -> None: + """ + Args: + model: CompNeuroPy generate_model object + it's not important if the model is created or compiled but after running + the model_configurator only the given model will exist so do not create + something else in ANNarchy! + + target_firing_rate_dict: dict + keys = population names of model which should be configured, values = target firing rates in Hz + + interpolation_grid_points: int, optional, default=10 + how many points should be used for the interpolation of the f-I-g curve on a single axis + + max_psp: int, optional, default=10 + maximum post synaptic potential in mV + + do_not_config_list: list, optional, default=[] + list with strings containing population names of populations which should not be configured + + print_guide: bool, optional, default=False + if you want to get information about what you could do with model_configurator + + I_app_variable: str, optional, default="I_app" + the name of the varaible in the populations which represents the applied current + TODO: not implemented yet, default value is always used + + Functions: + get_max_syn: + returns a dictionary with weight ranges for all afferent projections of the configured populations + """ + self.model = model + self.target_firing_rate_dict = target_firing_rate_dict + self.pop_name_list = list(target_firing_rate_dict.keys()) + for do_not_pop_name in do_not_config_list: + self.pop_name_list.remove(do_not_pop_name) + self.I_app_max_dict = {pop_name: None for pop_name in self.pop_name_list} + self.g_max_dict = {pop_name: None for pop_name in self.pop_name_list} + self.tau_dict = {pop_name: None for pop_name in self.pop_name_list} + self.nr_afferent_proj_dict = {pop_name: None for pop_name in self.pop_name_list} + self.net_many_dict = {pop_name: None for pop_name in self.pop_name_list} + self.net_single_dict = {pop_name: None for pop_name in self.pop_name_list} + self.net_single_v_clamp_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.max_weight_dict = {pop_name: None for pop_name in self.pop_name_list} + self.variable_init_sampler_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.f_I_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} + self.I_f_g_curve_dict = {pop_name: None for pop_name in self.pop_name_list} + self.afferent_projection_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.neuron_model_dict = {pop_name: None for pop_name in self.pop_name_list} + self.neuron_model_parameters_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.neuron_model_attributes_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.max_psp_dict = {pop_name: None for pop_name in self.pop_name_list} + self.possible_rates_dict = {pop_name: None for pop_name in self.pop_name_list} + self.extreme_firing_rates_df_dict = { + pop_name: None for pop_name in self.pop_name_list + } + self.prepare_psp_dict = {pop_name: None for pop_name in self.pop_name_list} + ### set max psp for a single spike + self.max_psp_dict = {pop_name: max_psp for pop_name in self.pop_name_list} + ### print things + self.log_exist = False + self.caller_name = "" + self.log("model configurator log:") + self.print_guide = print_guide + ### simulation things + self.simulation_dur = 5000 + self.simulation_dur_estimate_time = 50 + self.nr_neurons_per_net = 100 + + ### do things for which the model needs to be created (it will not be available later) + self.analyze_model() + + ### get reduced model + self.model_reduced = _CreateReducedModel( + model=self.model, + reduced_size=100, + do_create=False, + do_compile=False, + verbose=True, + ).model_reduced + + ### print guide + self._p_g(_p_g_1) + + def analyze_model(self): + """ + prepares the creation of the single neuron and many neuron networks + """ + + ### clear ANNarchy and create the model + cnp_clear() + self.model.create(do_compile=False) + + ### get the neuron models from the model + for pop_name in self.pop_name_list: + self.neuron_model_dict[pop_name] = get_population(pop_name).neuron_type + self.neuron_model_parameters_dict[pop_name] = get_population( + pop_name + ).init.items() + self.neuron_model_attributes_dict[pop_name] = get_population( + pop_name + ).attributes + + ### do further things for which the model needs to be created + ### get the afferent projection dict for the populations (model needed!) + for pop_name in self.pop_name_list: + ### get afferent projection dict + self.log(f"get the afferent_projection_dict for {pop_name}") + self.afferent_projection_dict[pop_name] = self.get_afferent_projection_dict( + pop_name=pop_name + ) + + ### create dictionary with timeconstants of g_ampa and g_gaba of the populations + for pop_name in self.pop_name_list: + self.tau_dict[pop_name] = { + "ampa": get_population(pop_name).tau_ampa, + "gaba": get_population(pop_name).tau_gaba, + } + + ### get the post_pop_name_dict + self.post_pop_name_dict = {} + for proj_name in self.model.projections: + self.post_pop_name_dict[proj_name] = get_projection(proj_name).post.name + + ### get the pre_pop_name_dict + self.pre_pop_name_dict = {} + for proj_name in self.model.projections: + self.pre_pop_name_dict[proj_name] = get_projection(proj_name).pre.name + + ### get the pre_pop_size_dict + self.pre_pop_size_dict = {} + for proj_name in self.model.projections: + self.pre_pop_size_dict[proj_name] = get_projection(proj_name).pre.size + + ### clear ANNarchy --> the model is not available anymore + cnp_clear() + + def get_afferent_projection_dict(self, pop_name): + """ + creates a dictionary containing + projection_names + target firing rate + probability + size + target + for each afferent projection (=first level of keys) of the specified population + + Args: + pop_name: str + populaiton name + + return: dict of dicts + """ + ### check if model is available + if not self.model.created: + error_msg = "ERROR model_configurator get_afferent_projection_dict: the model has to be created!" + self.log(error_msg) + raise AssertionError(error_msg) + ### get projection names + afferent_projection_dict = {} + afferent_projection_dict["projection_names"] = [] + for projection in self.model.projections: + if get_projection(projection).post.name == pop_name: + afferent_projection_dict["projection_names"].append(projection) + + self.nr_afferent_proj_dict[pop_name] = len( + afferent_projection_dict["projection_names"] + ) + + ### get target firing rates resting-state for afferent projections + afferent_projection_dict["target firing rate"] = [] + afferent_projection_dict["probability"] = [] + afferent_projection_dict["size"] = [] + afferent_projection_dict["target"] = [] + for projection in afferent_projection_dict["projection_names"]: + pre_pop_name = get_projection(projection).pre.name + ### target firing rate + afferent_projection_dict["target firing rate"].append( + self.target_firing_rate_dict[pre_pop_name] + ) + ### probability, _connection_args only if connect_fixed_prob (i.e. connector_name==Random) + afferent_projection_dict["probability"].append( + get_projection(projection)._connection_args[0] + ) + ### size + afferent_projection_dict["size"].append(len(get_projection(projection).pre)) + ### target type + afferent_projection_dict["target"].append(get_projection(projection).target) + + return afferent_projection_dict + + def get_max_syn(self, cache=True, clear=False): + """ + get the weight dictionary for all populations given in target_firing_rate_dict + keys = population names, values = dict which contain values = afferent projection names, values = lists with w_min and w_max + """ + ### clear cache to create new cache + if cache and clear: + self.log("clear cache of get_max_syn") + clear_dir("./.model_configurator_cache/get_max_syn") + + ### check cache for get_max_syn + cache_worked = False + if cache: + try: + loaded_variables_dict = load_variables( + name_list=[ + "net_single_dict", + "prepare_psp_dict", + "I_app_max_dict", + "g_max_dict", + "syn_contr_dict", + "syn_load_dict", + ], + path="./.model_configurator_cache/get_max_syn", + ) + ( + self.net_single_dict, + self.prepare_psp_dict, + self.I_app_max_dict, + self.g_max_dict, + self.syn_contr_dict, + self.syn_load_dict, + ) = loaded_variables_dict.values() + ### create dummy network for single network and actually create network for single_v_clamp (single_v_clamp needed in get_base) + self.create_single_neuron_networks( + single_net=False, single_net_v_clamp=True, prepare_psp=False + ) + cache_worked = True + except: + cache_worked = False + + if not cache_worked: + ### create single neuron networks + self.create_single_neuron_networks() + + ### get max synaptic things with single neuron networks + for pop_name in self.pop_name_list: + self.log(pop_name) + ### get max I_app and max weights (i.e. g_ampa, g_gaba) + txt = f"get max I_app, g_ampa and g_gaba using network_single for {pop_name}" + print(txt) + self.log(txt) + I_app_max, g_ampa_max, g_gaba_max = self.get_max_syn_currents( + pop_name=pop_name, + ) + + self.I_app_max_dict[pop_name] = I_app_max + self.g_max_dict[pop_name] = { + "ampa": g_ampa_max, + "gaba": g_gaba_max, + } + + ### obtain the synaptic contributions assuming max weights + self.syn_contr_dict = {} + for pop_name in self.pop_name_list: + self.syn_contr_dict[pop_name] = {} + for target_type in ["ampa", "gaba"]: + self.log(f"get synaptic contributions for {pop_name} {target_type}") + self.syn_contr_dict[pop_name][target_type] = ( + self.get_syn_contr_dict( + pop_name=pop_name, + target_type=target_type, + use_max_weights=True, + normalize=True, + ) + ) + + ### create the synaptic load template dict + self.syn_load_dict = {} + for pop_name in self.pop_name_list: + self.syn_load_dict[pop_name] = [] + if "ampa" in self.afferent_projection_dict[pop_name]["target"]: + self.syn_load_dict[pop_name].append("ampa_load") + if "gaba" in self.afferent_projection_dict[pop_name]["target"]: + self.syn_load_dict[pop_name].append("gaba_load") + + ### save variables in cache + ### obtain variables which should be cached / are needed later + ### do not cache ANNarchy objects + net_single_dict_to_cache = {} + for key, val in self.net_single_dict.items(): + net_single_dict_to_cache[key] = { + "variable_init_sampler": val["variable_init_sampler"] + } + save_variables( + variable_list=[ + net_single_dict_to_cache, + self.prepare_psp_dict, + self.I_app_max_dict, + self.g_max_dict, + self.syn_contr_dict, + self.syn_load_dict, + ], + name_list=[ + "net_single_dict", + "prepare_psp_dict", + "I_app_max_dict", + "g_max_dict", + "syn_contr_dict", + "syn_load_dict", + ], + path="./.model_configurator_cache/get_max_syn", + ) + + ### only return synaptic contributions smaller 1 + template_synaptic_contribution_dict = ( + self.get_template_synaptic_contribution_dict(given_dict=self.syn_contr_dict) + ) + + self._p_g( + _p_g_after_get_weights( + template_weight_dict=self.g_max_dict, + template_synaptic_load_dict=self.syn_load_dict, + template_synaptic_contribution_dict=template_synaptic_contribution_dict, + ) + ) + return self.max_weight_dict + + def get_syn_contr_dict( + self, pop_name: str, target_type: str, use_max_weights=False, normalize=False + ) -> dict: + """ + get the relative synaptic contribution list of a population for a given target type + weights are obtained from the afferent_projection_dict, if there are no weights --> use max weights + + Args: + pop_name: str + population name + + target_type: str + target type of the afferent projections of the population + + use_max_weights: bool, optional, default=False + if True the max weights are used, if False the weights from the afferent_projection_dict are used + + Returns: + rel_syn_contr_dict: dict + keys = projection names, values = relative synaptic contributions + """ + ### g_max have to be obtained already + assert not ( + isinstance(self.g_max_dict[pop_name][target_type], type(None)) + ), "ERROR, get_rel_syn_contr_list: g_max have to be obtained already" + ### get list of relative synaptic contributions + proj_name_list = [] + rel_syn_contr_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + proj_target_type = proj_dict["proj_target_type"] + weight = proj_dict["proj_weight"] + if isinstance(weight, type(None)) or use_max_weights: + weight = self.g_max_dict[pop_name][target_type] + if proj_target_type == target_type: + rel_syn_contr_list.append(proj_dict["spike_frequency"] * weight) + proj_name_list.append(proj_name) + ### normalize the list + if normalize: + rel_syn_contr_arr = np.array(rel_syn_contr_list) + rel_syn_contr_arr = rel_syn_contr_arr / np.sum(rel_syn_contr_arr) + rel_syn_contr_list = rel_syn_contr_arr.tolist() + ### combine proj_name_list and rel_syn_contr_list to an dict + rel_syn_contr_dict = { + proj_name: rel_syn_contr + for proj_name, rel_syn_contr in zip(proj_name_list, rel_syn_contr_list) + } + + return rel_syn_contr_dict + + def create_single_neuron_networks( + self, single_net=True, single_net_v_clamp=True, prepare_psp=True + ): + ### clear ANNarchy + cnp_clear() + + ### create the single neuron networks + for pop_name in self.pop_name_list: + txt = f"create network_single for {pop_name}" + print(txt) + self.log(txt) + + ### the network with the standard neuron + if single_net: + self.net_single_dict[pop_name] = self.create_net_single( + pop_name=pop_name + ) + else: + ### dummy network for the pop + net_single_dummy = Network() + pop_single_dummy = Population( + 1, + neuron=Neuron(equations="r=1"), + name=f"dummy_single_{pop_name}", + ) + mon_single_dummy = Monitor(pop_single_dummy, ["r"]) + net_single_dummy.add([pop_single_dummy, mon_single_dummy]) + + ### the network with the voltage clamp version neuron + if single_net_v_clamp: + self.net_single_v_clamp_dict[pop_name] = ( + self.create_net_single_voltage_clamp(pop_name=pop_name) + ) + else: + ### dummy network for the pop + net_single_v_clamp_dummy = Network() + pop_single_v_clamp_dummy = Population( + 1, + neuron=Neuron(equations="r=1"), + name=f"dummy_single_v_clamp_{pop_name}", + ) + mon_single_v_clamp_dummy = Monitor(pop_single_v_clamp_dummy, ["r"]) + net_single_v_clamp_dummy.add( + [pop_single_v_clamp_dummy, mon_single_v_clamp_dummy] + ) + + ### get v_rest and correspodning I_app_hold + if prepare_psp: + self.prepare_psp_dict[pop_name] = self.find_v_rest_for_psp( + pop_name, do_plot=False + ) + + def create_net_single(self, pop_name): + """ + creates a network with the neuron type of the population given by pop_name + the number of neurons is 1 + + Args: + pop_name: str + population name + """ + + ### for stop condition for recording psp --> add v_before_psp and v_psp_thresh to equations/parameters + + ### get the initial arguments of the neuron + neuron_model = self.neuron_model_dict[pop_name] + ### names of arguments + init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) + init_arguments_name_list.remove("self") + init_arguments_name_list.remove("name") + init_arguments_name_list.remove("description") + ### arguments dict + init_arguments_dict = { + init_arguments_name: getattr(neuron_model, init_arguments_name) + for init_arguments_name in init_arguments_name_list + } + ### add v_before_psp=v at the beginning of the equations + equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() + equations_line_split_list.insert(0, "v_before_psp = v") + init_arguments_dict["equations"] = "\n".join(equations_line_split_list) + ### add v_psp_thresh to the parameters + parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() + parameters_line_split_list.append("v_psp_thresh = 0 : population") + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + + ### create neuron model with new equations + neuron_model_new = Neuron(**init_arguments_dict) + + ### create the single neuron population + single_neuron = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_{pop_name}", + stop_condition=f"((abs(v-v_psp_thresh)<0.01) and (abs(v_before_psp-v_psp_thresh)>0.01)): any", + ) + ### set the attributes of the neuron + for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: + setattr(single_neuron, attr_name, attr_val) + + ### create Monitor for single neuron + mon_single = Monitor(single_neuron, ["spike", "v"]) + + ### create network with single neuron + net_single = Network() + net_single.add([single_neuron, mon_single]) + compile_in_folder( + folder_name=f"single_net_{pop_name}", silent=True, net=net_single + ) + + ### get the values of the variables after 2000 ms simulation + variable_init_sampler = self.get_init_neuron_variables( + net_single, net_single.get(single_neuron) + ) + + ### network dict + net_single_dict = { + "net": net_single, + "population": net_single.get(single_neuron), + "monitor": net_single.get(mon_single), + "variable_init_sampler": variable_init_sampler, + } + + return net_single_dict + + def get_init_neuron_variables(self, net, pop): + """ + get the variables of the given population after simulating 2000 ms + + Args: + net: ANNarchy network + the network which contains the pop + + pop: ANNarchy population + the population whose variables are obtained + + """ + ### reset neuron and deactivate input + net.reset() + pop.I_app = 0 + + ### 10000 ms init duration + net.simulate(10000) + + ### simulate 2000 ms and check every dt the variables of the neuron + time_steps = int(2000 / dt()) + var_name_list = list(pop.variables) + var_arr = np.zeros((time_steps, len(var_name_list))) + for time_idx in range(time_steps): + net.simulate(dt()) + get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) + var_arr[time_idx, :] = get_arr[:, 0] + net.reset() + + ### create a sampler with the data samples of from the 1000 ms simulation + sampler = self.var_arr_sampler(var_arr, var_name_list) + return sampler + + def create_net_single_voltage_clamp(self, pop_name): + """ + creates a network with the neuron type of the population given by pop_name + the number of neurons is 1 + + The equation wich defines the chagne of v is set to zero and teh change of v + is stored in the new variable v_clamp_rec + + Args: + pop_name: str + population name + """ + + ### get the initial arguments of the neuron + neuron_model = self.neuron_model_dict[pop_name] + ### names of arguments + init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) + init_arguments_name_list.remove("self") + init_arguments_name_list.remove("name") + init_arguments_name_list.remove("description") + ### arguments dict + init_arguments_dict = { + init_arguments_name: getattr(neuron_model, init_arguments_name) + for init_arguments_name in init_arguments_name_list + } + ### get new equations for voltage clamp + equations_new = self.get_voltage_clamp_equations(init_arguments_dict, pop_name) + init_arguments_dict["equations"] = equations_new + ### add v_clamp_rec_thresh to the parameters + parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() + parameters_line_split_list.append("v_clamp_rec_thresh = 0 : population") + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + + ### for each afferent population create a binomial spike train equation string + ### add it to the equations + ### and add the related parameters to the parameters + + ### get the afferent populations + afferent_population_list = [] + proj_target_type_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_name = proj_dict["pre_pop_name"] + afferent_population_list.append(pre_pop_name) + proj_target_type_list.append(proj_dict["proj_target_type"]) + + ### split the equations and parameters string + equations_line_split_list = str(init_arguments_dict["equations"]).splitlines() + + parameters_line_split_list = str(init_arguments_dict["parameters"]).splitlines() + + ### add the binomial spike train equations and parameters + ( + equations_line_split_list, + parameters_line_split_list, + ) = self.add_binomial_input( + equations_line_split_list, + parameters_line_split_list, + afferent_population_list, + proj_target_type_list, + ) + + ### combine string lines to multiline strings again + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + init_arguments_dict["equations"] = "\n".join(equations_line_split_list) + + ### create neuron model with new equations + neuron_model_new = Neuron(**init_arguments_dict) + + ### create the single neuron population + single_neuron_v_clamp = Population( + 1, + neuron=neuron_model_new, + name=f"single_neuron_v_clamp_{pop_name}", + ) + + ### set the attributes of the neuron + for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: + setattr(single_neuron_v_clamp, attr_name, attr_val) + + ### create Monitor for single neuron + mon_single = Monitor(single_neuron_v_clamp, ["v_clamp_rec_sign"]) + + ### create network with single neuron + net_single = Network() + net_single.add([single_neuron_v_clamp, mon_single]) + compile_in_folder( + folder_name=f"single_v_clamp_net_{pop_name}", silent=True, net=net_single + ) + + ### network dict + net_single_dict = { + "net": net_single, + "population": net_single.get(single_neuron_v_clamp), + "monitor": net_single.get(mon_single), + } + + return net_single_dict + + def find_v_rest_for_psp(self, pop_name, do_plot=False): + """ + using both single networks to find v_rest and I_app_hold + """ + + ### find v where dv/dt is minimal with voltage clamp network (best = 0, it can only be >= 0) + self.log("search v_rest with y(X) = delta_v_2000(v=X) using grid search") + v_arr = np.linspace(-90, -20, 200) + v_clamp_arr = np.array( + [ + self.get_v_clamp_2000( + v=X_val, + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + ) + for X_val in v_arr + ] + ) + v_rest = np.min(v_arr[argrelmin(v_clamp_arr)[0]]) + if do_plot: + plt.figure() + plt.plot(v_arr, v_clamp_arr) + plt.axvline(v_rest, color="k") + plt.axhline(0, color="k", ls="dashed") + plt.savefig(f"v_clamp_{pop_name}.png") + plt.close("all") + + ### do again the simulation with the obtained v_rest to get the stady state values + detla_v_rest = ( + self.get_v_clamp_2000( + v=v_rest, + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + ) + * dt() + ) + obtained_variables = { + var_name: getattr( + self.net_single_v_clamp_dict[pop_name]["population"], var_name + ) + for var_name in self.net_single_v_clamp_dict[pop_name][ + "population" + ].variables + } + self.log( + f"for {pop_name} found v_rest={v_rest} with delta_v_2000(v=v_rest)={detla_v_rest}" + ) + + ### check if the neuron stays at v_rest with normal neuron + ### if it stays --> use new value as v_rest (its even a bit finer as before) + ### if it not stays --> find I_app which holds the membrane potential constant + v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables) + v_rest_arr_is_const = ( + np.std(v_rest_arr, axis=0) + <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 + ) + if v_rest_arr_is_const: + ### v_rest found, no I_app_hold needed + v_rest = v_rest_arr[-1] + I_app_hold = 0 + self.log(f"final v_rest = {v_rest_arr[-1]}") + else: + ### there is no v_rest i.e. neuron is self-active --> find smallest negative I_app to silence neuron + self.log( + "neuron seems to be self-active --> find smallest I_app to silence the neuron" + ) + + ### negative current initially reduces v + ### then v climbs back up + ### check if the second half of v is constant if yes fine if not increase negative I_app + ### find I_app_hold with incremental_continuous_bound_search + self.log("search I_app_hold with y(X) = CHANGE_OF_V(I_app=X)") + I_app_hold = -self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_v_rest_arr_const( + pop_name=pop_name, + obtained_variables=obtained_variables, + I_app=-X_val, + ), + y_bound=0, + X_0=0, + y_0=self.get_v_rest_arr_const( + pop_name=pop_name, + obtained_variables=obtained_variables, + I_app=0, + ), + X_increase=detla_v_rest, + accept_non_dicontinuity=True, + bound_type="greater", + ) + ### again simulate the neuron with the obtained I_app_hold to get the new v_rest + v_rest_arr = self.get_new_v_rest_2000( + pop_name, obtained_variables, I_app=I_app_hold + ) + v_rest = v_rest_arr[-1] + self.log(f"I_app_hold = {I_app_hold}, resulting v_rest = {v_rest}") + + ### get the sampler for the initial variables + variable_init_sampler = self.get_init_neuron_variables_for_psp( + net=self.net_single_dict[pop_name]["net"], + pop=self.net_single_dict[pop_name]["population"], + v_rest=v_rest, + I_app_hold=I_app_hold, + ) + + return { + "v_rest": v_rest, + "I_app_hold": I_app_hold, + "variable_init_sampler": variable_init_sampler, + } + + def get_v_rest_arr_const( + self, pop_name, obtained_variables, I_app, return_bool=False + ): + """ + sets I_app and obtained varaibles in single neuron + simulates 2000 ms and returns how much the v changes + 0 = constant, negative = not constant + """ + v_rest_arr = self.get_new_v_rest_2000(pop_name, obtained_variables, I_app=I_app) + v_rest_arr = v_rest_arr[len(v_rest_arr) // 2 :] + + if return_bool: + return 0 <= np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( + v_rest_arr, axis=0 + ) + else: + return np.mean(np.absolute(v_rest_arr), axis=0) / 1000 - np.std( + v_rest_arr, axis=0 + ) + + def get_new_v_rest_2000( + self, pop_name, obtained_variables, I_app=None, do_plot=True + ): + """ + use single_net to simulate 2000 ms and return v + """ + net: Network = self.net_single_dict[pop_name]["net"] + pop = self.net_single_dict[pop_name]["population"] + monitor = self.net_single_dict[pop_name]["monitor"] + net.reset() + ### set variables + for var_name, var_val in obtained_variables.items(): + if var_name in pop.variables: + setattr(pop, var_name, var_val) + if not isinstance(I_app, type(None)): + pop.I_app = I_app + ### simulate + net.simulate(2000) + v_arr = monitor.get("v")[:, 0] + + if do_plot: + plt.figure() + plt.title(f"{pop.I_app}") + plt.plot(v_arr) + plt.savefig(f"tmp_v_rest_{pop_name}.png") + plt.close("all") + + return v_arr + + def get_nr_spikes_from_v_rest_2000( + self, pop_name, obtained_variables, I_app=None, do_plot=True + ): + """ + use single_net to simulate 2000 ms and return number spikes + """ + net = self.net_single_dict[pop_name]["net"] + pop = self.net_single_dict[pop_name]["population"] + mon = self.net_single_dict[pop_name]["monitor"] + net.reset() + ### set variables + for var_name, var_val in obtained_variables.items(): + if var_name in pop.variables: + setattr(pop, var_name, var_val) + if not isinstance(I_app, type(None)): + pop.I_app = I_app + ### simulate + simulate(2000) + ### get spikes + spike_dict = mon.get("spike") + nr_spikes = len(spike_dict[0]) + return nr_spikes + + def log(self, txt): + caller_frame = inspect.currentframe().f_back + caller_name = caller_frame.f_code.co_name + + if caller_name == self.caller_name: + txt = f"{textwrap.indent(str(txt), ' ')}" + else: + txt = f"[{caller_name}]:\n{textwrap.indent(str(txt), ' ')}" + + self.caller_name = caller_name + + if self.log_exist: + with open("model_conf_log", "a") as f: + print(txt, file=f) + else: + with open("model_conf_log", "w") as f: + print(txt, file=f) + self.log_exist = True + + def _p_g(self, txt): + """ + prints guiding text + """ + print_width = min([os.get_terminal_size().columns, 80]) + + if self.print_guide: + print("\n[model_configurator guide]:") + for line in txt.splitlines(): + wrapped_text = textwrap.fill( + line, width=print_width - 5, replace_whitespace=False + ) + wrapped_text = textwrap.indent(wrapped_text, " |") + print(wrapped_text) + print("") + + def _p_w(self, txt): + """ + prints warning + """ + print_width = min([os.get_terminal_size().columns, 80]) + + print("\n[model_configurator WARNING]:") + for line in str(txt).splitlines(): + wrapped_text = textwrap.fill( + line, width=print_width - 5, replace_whitespace=False + ) + wrapped_text = textwrap.indent(wrapped_text, " |") + print(wrapped_text) + print("") + + def get_base(self): + """ + Obtain the baseline currents for the configured populations to obtian the target firing rates + with the currently set weights, set by .set_weights or .set_syn_load + + return: + I_base_dict, dict + Dictionary with baseline curretns for all configured populations. + """ + ### TODO: current problem: model is without noise... but how large and for what is noise??? + ### neurons all behave equally (e.g. spike at same time), this changes due to different inputs ("noise" in input) + ### this could also be prevented by initializing all neurons differently (along there periodic u-v curve) + ### or by adding noise to conductances or baseline current + ### thenthe question is, how is the relation between added noise and the noise in the input + ### TODO: I've decided for noise depending on the input current (scaled by specified SNR) + ### without input there is no noise, decorrelate neurons by random initial values + ### TODO: current idea is: to find max syn things the noise has to be deactivated and to find baseline currents the noise has to be activated + ### so single neuron networks should be without noise, an then here noise should be activated, maybe requirement for model conf will be a variable called noise to turn on and off noise + for pop_name in self.pop_name_list: + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + proj_dict = self.get_proj_dict(proj_name) + print(f"set weight of {proj_name} to {proj_dict['proj_weight']}") + + ### set the weights of the normal model + model = self._set_weights_of_model(mode=0) + + ### set initial variables of populations (do not initialize all neurons the same) + for pop_name in self.pop_name_list: + population = get_population(pop_name) + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + + ### record and simulate + mon_dict = {pop_name: ["spike"] for pop_name in model.populations} + mon = CompNeuroMonitors(mon_dict=mon_dict) + mon.start() + simulate(1000) + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + plan = { + "position": list(range(1, len(model.populations) + 1)), + "compartment": model.populations, + "variable": ["spike"] * len(model.populations), + "format": ["hybrid"] * len(model.populations), + } + PlotRecordings( + figname="model_conf_normal_model.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(plan["position"]), 1), + plan=plan, + ) + + ### set the weights of the reduced model + model = self._set_weights_of_model(mode=1) + + ### set initial variables of populations (do not initialize all neurons the same) + for pop_name in self.pop_name_list: + population = get_population(f"{pop_name}_reduced") + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + + ### record and simulate + mon_dict = {f"{pop_name}_reduced": ["spike"] for pop_name in mon_dict.keys()} + mon = CompNeuroMonitors(mon_dict=mon_dict) + mon.start() + simulate(1000) + recordings = mon.get_recordings() + recording_times = mon.get_recording_times() + plan["compartment"] = [ + f"{pop_name}_reduced" for pop_name in plan["compartment"] + ] + PlotRecordings( + figname="model_conf_reduced_model.png", + recordings=recordings, + recording_times=recording_times, + shape=(len(plan["position"]), 1), + plan=plan, + ) + ### next check if populations which should not be tuned have the correct firing rates, if not warning that the populations are tuned but if the rate of the not tuned populations changes this might also change the tuned populations' rates + ### next activate noise and then performe search algorithm ith reduced model with input varaibles = I_app of populations and output variables = firing rates of populations + ### TODO get base with reduced model + quit() + + def _set_weights_of_model(self, mode=0): + """ + Set the weights of the model to the current weights from the + afferent_projection_dict. + """ + ### clear ANNarchy + cnp_clear() + + ### create the original model + if mode == 0: + model = self.model + elif mode == 1: + model = self.model_reduced + model.create() + + for pop_name in self.pop_name_list: + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + if mode == 0: + ### set weght of projection + proj_dict = self.get_proj_dict(proj_name) + get_projection(proj_name).w = proj_dict["proj_weight"] + elif mode == 1: + ### set weight of the projection in the conductance-calculating + ### input current population + proj_dict = self.get_proj_dict(proj_name) + proj_weight = proj_dict["proj_weight"] + proj_target_type = proj_dict["proj_target_type"] + setattr( + get_population(f"{pop_name}_{proj_target_type}_aux"), + f"weights_{proj_name}", + proj_weight, + ) + return model + + def find_base_current(self, net_many_dict): + """ + search through whole I_app space + for each population simulate a network with 10000 neurons, each neuron has a different I_app value + g_ampa and g_gaba values are internally created using + the weigths stored in the afferent_projection dict + and target firing rates stored in the target_firing_rate_dict + """ + + I_app_arr_list = [] + weight_list_list = [] + pre_pop_name_list_list = [] + rate_list_list = [] + eff_size_list_list = [] + ### get lists which define the current weights to the afferent populations + ### get lists which define the current rates of the afferent populations + ### get lists with the names of the afferent populations + ### the length of the lists has to be the number of networks i.e. the number of populations + for pop_name in self.pop_name_list: + ### get the weights, names, rates of the afferent populations + weight_list = self.afferent_projection_dict[pop_name]["weights"] + proj_name_list = self.afferent_projection_dict[pop_name]["projection_names"] + pre_pop_name_list = [ + self.get_proj_dict(proj_name)["pre_pop_name"] + for proj_name in proj_name_list + ] + rate_list = self.get_rate_list_for_pop(pop_name) + eff_size_list = self.get_eff_size_list_for_pop(pop_name) + ### get correct magnitude of I_app using the voltage clamp networks + I_app_magnitude = self.get_I_app_magnitude( + pop_name, + pre_pop_name_list=pre_pop_name_list, + eff_size_list=eff_size_list, + rate_list=rate_list, + weight_list=weight_list, + ) + ### get the I_app_arr + I_app_arr = np.linspace( + I_app_magnitude, + I_app_magnitude + self.I_app_max_dict[pop_name], + self.nr_neurons_per_net, + ) + ### append these lists to the list for all post populations i.e. networks + weight_list_list.append(weight_list) + pre_pop_name_list_list.append(pre_pop_name_list) + rate_list_list.append(rate_list) + eff_size_list_list.append(eff_size_list) + I_app_arr_list.append(I_app_arr) + + ### create list with variable_init_samplers of populations + variable_init_sampler_list = [ + self.net_single_dict[pop_name]["variable_init_sampler"] + for pop_name in self.pop_name_list + ] + + ### get firing rates obtained with all I_app values + ### rates depend on the current weights and the current target firing rates + nr_networks = len(self.pop_name_list) + possible_firing_rates_list_list = parallel_run( + method=get_rate_parallel, + networks=net_many_dict["network_list"], + **{ + "population": net_many_dict["population_list"], + "variable_init_sampler": variable_init_sampler_list, + "monitor": net_many_dict["monitor_list"], + "I_app_arr": I_app_arr_list, + "weight_list": weight_list_list, + "pre_pop_name_list": pre_pop_name_list_list, + "rate_list": rate_list_list, + "eff_size_list": eff_size_list_list, + "simulation_dur": [self.simulation_dur] * nr_networks, + }, + ) + + ### catch if target firing rate in any population cannot be reached + I_app_best_dict = {} + target_firing_rate_changed = False + for pop_idx, pop_name in enumerate(self.pop_name_list): + target_firing_rate = self.target_firing_rate_dict[pop_name] + possible_firing_rates_arr = np.array( + possible_firing_rates_list_list[pop_idx] + ) + I_app_arr = I_app_arr_list[pop_idx] + print(f"firing rates for pop {pop_name}") + print(f"{I_app_arr}") + print(f"{possible_firing_rates_arr}\n") + possible_f_min = possible_firing_rates_arr.min() + possible_f_max = possible_firing_rates_arr.max() + if not ( + target_firing_rate >= possible_f_min + and target_firing_rate <= possible_f_max + ): + new_target_firing_rate = np.array([possible_f_min, possible_f_max])[ + np.argmin( + np.absolute( + np.array([possible_f_min, possible_f_max]) + - target_firing_rate + ) + ) + ] + ### if the possible firing rates are too small --> what (high) firing rate could be maximally reached with a hypothetical g_ampa_max and I_app_max + ### if the possible firing rates are too large --> waht (low) firing rate could be reached with g_gaba_max and -I_app_max + warning_txt = f"WARNING get_possible_rates: target firing rate of population {pop_name}({target_firing_rate}) cannot be reached.\nPossible range with current synaptic load: [{round(possible_f_min,1)},{round(possible_f_max,1)}].\nSet firing rate to {round(new_target_firing_rate,1)}." + self._p_w(warning_txt) + self.log(warning_txt) + self.target_firing_rate_dict[pop_name] = new_target_firing_rate + target_firing_rate = self.target_firing_rate_dict[pop_name] + target_firing_rate_changed = True + ### find best I_app for reaching target firing rate + best_idx = np.argmin( + np.absolute(possible_firing_rates_arr - target_firing_rate) + ) + ### take all possible firing rates in range target firing rate +-10 + lower_rate = max([0, target_firing_rate - 10]) + higher_rate = target_firing_rate + 10 + rate_range_idx_arr = ( + (possible_firing_rates_arr >= lower_rate).astype(int) + * (possible_firing_rates_arr <= higher_rate).astype(int) + ).astype(bool) + possible_firing_rates_arr = possible_firing_rates_arr[rate_range_idx_arr] + I_app_arr = I_app_arr[rate_range_idx_arr] + ### now do linear fit to find I_app for target firing rate + if len(I_app_arr) > 10: + reg = LinearRegression().fit( + X=possible_firing_rates_arr.reshape(-1, 1), y=I_app_arr + ) + I_app_best_dict[pop_name] = reg.predict( + np.array([[target_firing_rate]]) + )[0] + else: + I_app_best_dict[pop_name] = 0 + plt.figure(figsize=(6.4, 4.8 * 2)) + plt.subplot(211) + plt.plot(I_app_arr, possible_firing_rates_arr) + plt.axhline(target_firing_rate, color="k") + plt.axvline(I_app_best_dict[pop_name], color="r") + plt.subplot(212) + plt.plot( + I_app_arr, np.absolute(possible_firing_rates_arr - target_firing_rate) + ) + plt.tight_layout() + plt.savefig(f"possible_firing_rate_{pop_name}.png", dpi=300) + plt.close("all") + + if target_firing_rate_changed and False: + print_df(pd.DataFrame(self.afferent_projection_dict)) + print_df(pd.DataFrame(self.g_max_dict)) + ### TODO cannot reach firing rates for example for thal because I_app_max is too small, this +100Hz method seems not to work well + ### maybe use the weights and a voltage clamp neuron to find I_app + ### like with I_app_hold + ### weights i.e. spike trains cause dv/dt to be e.g. extremely negative --> then find I_app to make dv/dt zero + ### this I_app should then be "near" the I_app needed to reach the target firing rate + quit() + + return [target_firing_rate_changed, I_app_best_dict] + + def get_I_app_magnitude( + self, + pop_name, + pre_pop_name_list=[], + eff_size_list=[], + rate_list=[], + weight_list=[], + ): + """ + Get the correct magnitude of I_app for the given population. + The correct magnitude is the magnitude which is to negate the synaptic currents caused by the afferent populations. + Use the curretn weights and rates from the afferent_projection_dict and target_firing_rate_dict. + """ + print(f"get v clamp of {pop_name}") + print(f"pre_pop_name_list: {pre_pop_name_list}") + print(f"eff_size_list: {eff_size_list}") + print(f"rate_list: {rate_list}") + print(f"weight_list: {weight_list}") + print(f"I_app_hold: {self.prepare_psp_dict[pop_name]['I_app_hold']}") + print(f"v_rest: {self.prepare_psp_dict[pop_name]['v_rest']}") + + detla_v_rest_0 = ( + self.get_v_clamp_2000( + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], + v=None, + I_app=0, + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + pre_pop_name_list=pre_pop_name_list, + eff_size_list=eff_size_list, + rate_list=rate_list, + weight_list=weight_list, + return_1000=True, + ) + * dt() + ) + + if detla_v_rest_0 > 0: + I_app_sign = -1 + else: + I_app_sign = 1 + + self.log("search I_app_magnitude with y(X) = detla_v(I_app=X)") + I_app_magnitude = I_app_sign * self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_v_clamp_2000( + net=self.net_single_v_clamp_dict[pop_name]["net"], + population=self.net_single_v_clamp_dict[pop_name]["population"], + monitor=self.net_single_v_clamp_dict[pop_name]["monitor"], + v=None, + I_app=I_app_sign * X_val, + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + pre_pop_name_list=pre_pop_name_list, + eff_size_list=eff_size_list, + rate_list=rate_list, + weight_list=weight_list, + return_1000=True, + ) + * dt(), + y_bound=0, + X_0=0, + y_0=detla_v_rest_0, + alpha_abs=0.005, + ) + + print(f"I_app_magnitude: {I_app_magnitude}\n") + + return I_app_magnitude + + def get_rate_list_for_pop(self, pop_name): + """ + get the rate list for the afferent populations of the given population + """ + rate_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_name = proj_dict["pre_pop_name"] + pre_rate = self.target_firing_rate_dict[pre_pop_name] + rate_list.append(pre_rate) + return rate_list + + def get_eff_size_list_for_pop(self, pop_name): + """ + get the effective size list for the afferent populations of the given population + """ + eff_size_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_size = proj_dict["pre_pop_size"] + proj_prob = proj_dict["proj_prob"] + eff_size = int(round(pre_pop_size * proj_prob, 0)) + eff_size_list.append(eff_size) + return eff_size_list + + def set_base(self, I_base_dict=None, I_base_variable="base_mean"): + """ + Set baseline currents in model, compile model and set weights in model. + + Args: + I_base_dict: dict, optional, default=None + Dictionary with baseline currents for all populations, if None the baselines are obtained by .get_base + + I_base_variable: str, optional, default="mean_base" + Name of the variable which represents the baseline current in the configured populations. They all have to have the same variable. + """ + ### check I_base_dict + if isinstance(I_base_dict, type(None)): + I_base_dict = self.get_base() + + ### clear annarchy, create model and set baselines and weights + cnp_clear() + self.model.create(do_compile=False) + ### set initial variables of populations + for pop_name in self.pop_name_list: + population = get_population(pop_name) + variable_init_sampler = self.net_single_dict[pop_name][ + "variable_init_sampler" + ] + self.set_init_variables(population, variable_init_sampler) + ### set baselines + for pop_name in I_base_dict.keys(): + get_val = getattr(get_population(pop_name), I_base_variable) + try: + set_val = np.ones(len(get_val)) * I_base_dict[pop_name] + except: + set_val = I_base_dict[pop_name] + setattr(get_population(pop_name), I_base_variable, set_val) + ### compile + self.model.compile() + ### set weights + for pop_name in self.pop_name_list: + for proj_idx, proj_name in enumerate( + self.afferent_projection_dict[pop_name]["projection_names"] + ): + weight_val = self.afferent_projection_dict[pop_name]["weights"][ + proj_idx + ] + get_projection(proj_name).w = weight_val + + return I_base_dict + + def set_init_variables(self, population, variable_init_sampler): + """ + Set the initial variables of the given population to the given values. + """ + variable_init_arr = variable_init_sampler.sample(len(population), seed=0) + var_name_list = variable_init_sampler.var_name_list + for var_name in population.variables: + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) + + def get_time_in_x_sec(self, x): + """ + Args: + x: int + how many seconds add to the current time + + return: + formatted_future_time: str + string of the future time in HH:MM:SS + """ + # Get the current time + current_time = datetime.datetime.now() + + # Add 10 seconds to the current time + future_time = current_time + datetime.timedelta(seconds=x) + + # Format future_time as HH:MM:SS + formatted_future_time = future_time.strftime("%H:%M:%S") + + return formatted_future_time + + def get_interpolation(self): + """ + get the interpolations to + predict f with I_app, g_ampa and g_gaba + + sets the class variable self.f_I_g_curve_dict --> for each population a f_I_g_curve function + """ + + ### create model + net_many_dict = self.create_many_neuron_network() + + ### get interpolation data + txt = "get interpolation data..." + print(txt) + self.log(txt) + ### for each population get the input arrays for I_app, g_ampa and g_gaba + ### while getting inputs define which values should be used later + input_dict = self.get_input_for_many_neurons_net() + + ### create list with variable_init_samplers of populations + variable_init_sampler_list = [ + self.net_single_dict[pop_name]["variable_init_sampler"] + for pop_name in self.pop_name_list + ] + + ### run the run_parallel with a reduced simulation duration and obtain a time estimate for the full duration + ### TODO use directly measureing simulation time to get time estimate + start = time() + parallel_run( + method=get_rate_parallel, + number=self.nr_networks, + **{ + "pop_name_list": [self.pop_name_list] * self.nr_networks, + "population_list": [list(net_many_dict["population_dict"].values())] + * self.nr_networks, + "variable_init_sampler_list": [variable_init_sampler_list] + * self.nr_networks, + "monitor_list": [list(net_many_dict["monitor_dict"].values())] + * self.nr_networks, + "I_app_list": input_dict["I_app_list"], + "g_ampa_list": input_dict["g_ampa_list"], + "g_gaba_list": input_dict["g_gaba_list"], + "simulation_dur": [dt()] * self.nr_networks, + }, + ) + reset() + end = time() + offset_time = end - start + start = time() + parallel_run( + method=get_rate_parallel, + number=self.nr_networks, + **{ + "pop_name_list": [self.pop_name_list] * self.nr_networks, + "population_list": [list(net_many_dict["population_dict"].values())] + * self.nr_networks, + "variable_init_sampler_list": [variable_init_sampler_list] + * self.nr_networks, + "monitor_list": [list(net_many_dict["monitor_dict"].values())] + * self.nr_networks, + "I_app_list": input_dict["I_app_list"], + "g_ampa_list": input_dict["g_ampa_list"], + "g_gaba_list": input_dict["g_gaba_list"], + "simulation_dur": [self.simulation_dur_estimate_time] + * self.nr_networks, + }, + ) + reset() + end = time() + time_estimate = np.clip( + round( + (end - start - offset_time) + * (self.simulation_dur / self.simulation_dur_estimate_time), + 0, + ), + 0, + None, + ) + + txt = f"start parallel_run of many neurons network on {self.nr_networks} threads, will take approx. {time_estimate} s (end: {self.get_time_in_x_sec(x=time_estimate)})..." + print(txt) + self.log(txt) + ### simulate the many neurons network with the input arrays splitted into the network populations sizes + ### and get the data of all populations + ### run_parallel + start = time() + f_rec_arr_list_list = parallel_run( + method=get_rate_parallel, + number=self.nr_networks, + **{ + "pop_name_list": [self.pop_name_list] * self.nr_networks, + "population_list": [list(net_many_dict["population_dict"].values())] + * self.nr_networks, + "variable_init_sampler_list": [variable_init_sampler_list] + * self.nr_networks, + "monitor_list": [list(net_many_dict["monitor_dict"].values())] + * self.nr_networks, + "I_app_list": input_dict["I_app_list"], + "g_ampa_list": input_dict["g_ampa_list"], + "g_gaba_list": input_dict["g_gaba_list"], + "simulation_dur": [self.simulation_dur] * self.nr_networks, + }, + ) + end = time() + txt = f"took {end-start} s" + print(txt) + self.log(txt) + + ### combine the list of outputs from parallel_run to one output per population + output_of_populations_dict = self.get_output_of_populations( + f_rec_arr_list_list, input_dict + ) + + ### create interpolation for each population + ### it can be a 1D to 3D interpolation, default (if everything works fine) is + ### 3D interpolation with "x": "I_app", "y": "g_ampa", "z": "g_gaba" + for pop_name in self.pop_name_list: + ### get whole input arrays + I_app_value_array = None + g_ampa_value_array = None + g_gaba_value_array = None + if self.I_app_max_dict[pop_name] > 0: + I_app_value_array = input_dict["I_app_arr_dict"][pop_name] + if self.g_max_dict[pop_name]["ampa"] > 0: + g_ampa_value_array = input_dict["g_ampa_arr_dict"][pop_name] + if self.g_max_dict[pop_name]["gaba"] > 0: + g_gaba_value_array = input_dict["g_gaba_arr_dict"][pop_name] + + ### get the interpolation + self.f_I_g_curve_dict[pop_name] = self.get_interp_3p( + values=output_of_populations_dict[pop_name], + model_conf_obj=self, + var_name_dict={"x": "I_app", "y": "g_ampa", "z": "g_gaba"}, + x=I_app_value_array, + y=g_ampa_value_array, + z=g_gaba_value_array, + ) + + self.did_get_interpolation = True + + ### with interpolation get the firing rates for all extreme values of I_app, g_ampa, g_gaba + for pop_name in self.pop_name_list: + self.extreme_firing_rates_df_dict[pop_name] = ( + self.get_extreme_firing_rates_df(pop_name) + ) + + def get_extreme_firing_rates_df(self, pop_name): + """ + get the firing rates for all extreme values of I_app, g_ampa, g_gaba + + Args: + pop_name: str + popualtion name + + return: + table_df: pandas dataframe + containing the firing rates for all extreme values of I_app, g_ampa, g_gaba + """ + I_app_list = [-self.I_app_max_dict[pop_name], self.I_app_max_dict[pop_name]] + g_ampa_list = [0, self.g_max_dict[pop_name]["ampa"]] + g_gaba_list = [0, self.g_max_dict[pop_name]["gaba"]] + ### create all combiniations of I_app_list, g_ampa_list, g_gaba_list in a single list + comb_list = self.get_all_combinations_of_lists( + [I_app_list, g_ampa_list, g_gaba_list] + ) + + ### get the firing rates for all combinations + f_list = [] + for I_app, g_ampa, g_gaba in comb_list: + f_list.append( + self.f_I_g_curve_dict[pop_name](x=I_app, y=g_ampa, z=g_gaba)[0] + ) + + ### now get the same for names + I_app_name_list = ["min", "max"] + g_ampa_name_list = ["min", "max"] + g_gaba_name_list = ["min", "max"] + ### create all combiniations of I_app_name_list, g_ampa_name_list, g_gaba_name_list in a single list + comb_name_list = self.get_all_combinations_of_lists( + [I_app_name_list, g_ampa_name_list, g_gaba_name_list] + ) + + ### create a dict as table with header I_app, g_ampa, g_gaba + table_dict = { + "I_app": np.array(comb_name_list)[:, 0].tolist(), + "g_ampa": np.array(comb_name_list)[:, 1].tolist(), + "g_gaba": np.array(comb_name_list)[:, 2].tolist(), + "f": f_list, + } + + ### create a pandas dataframe from the table_dict + table_df = pd.DataFrame(table_dict) + + return table_df + + def get_all_combinations_of_lists(self, list_of_lists): + """ + get all combinations of lists in a single list + example: [[1,2],[3,4],[5,6]] --> [[1,3,5],[1,3,6],[1,4,5],[1,4,6],[2,3,5],[2,3,6],[2,4,5],[2,4,6]] + """ + return list(itertools.product(*list_of_lists)) + + def get_output_of_populations(self, f_rec_arr_list_list, input_dict): + """ + restructure the output of run_parallel so that for each population a single array with firing rates is obtained + + Args: + f_rec_arr_list_list: list of lists of arrays + first lists contain different network runs, second level lists contain arrays for the different populations + return: + output_pop_dict: dict of arrays + for each population a single array with firing rates + """ + output_pop_dict = {} + for pop_name in self.pop_name_list: + output_pop_dict[pop_name] = [] + ### first loop selecting the network + for f_rec_arr_list in f_rec_arr_list_list: + ### second loop selecting the population + for pop_idx, pop_name in enumerate(self.pop_name_list): + ### append the recorded values to the array of the corresponding population + output_pop_dict[pop_name].append(f_rec_arr_list[pop_idx]) + + ### concatenate the arrays of the individual populations + for pop_name in self.pop_name_list: + output_pop_dict[pop_name] = np.concatenate(output_pop_dict[pop_name]) + + ### use the input dict to only use values which should be used + ### lis of lists, first list level = networks, second list level = populations then you get array with input values + ### so same format as f_rec_arr_list_list + use_I_app_arr_list_list = input_dict["use_I_app_list"] + use_g_ampa_arr_list_list = input_dict["use_g_ampa_list"] + use_g_gaba_arr_list_list = input_dict["use_g_gaba_list"] + + ### now get for each population an array which contains the info if the values should be used + use_output_pop_dict = {} + for pop_name in self.pop_name_list: + use_output_pop_dict[pop_name] = [] + ### first loop selecting the network + for net_idx in range(len(use_I_app_arr_list_list)): + use_I_app_arr_list = use_I_app_arr_list_list[net_idx] + use_g_ampa_arr_list = use_g_ampa_arr_list_list[net_idx] + use_g_gaba_arr_list = use_g_gaba_arr_list_list[net_idx] + ### second loop selecting the population + for pop_idx, pop_name in enumerate(self.pop_name_list): + ### only use values if for all input values use is True + use_I_app_arr = use_I_app_arr_list[pop_idx] + use_g_ampa_arr = use_g_ampa_arr_list[pop_idx] + use_g_gaba_arr = use_g_gaba_arr_list[pop_idx] + use_value_arr = np.logical_and(use_I_app_arr, use_g_ampa_arr) + use_value_arr = np.logical_and(use_value_arr, use_g_gaba_arr) + ### append the recorded values to the array of the corresponding population + use_output_pop_dict[pop_name].append(use_value_arr) + + ### concatenate the arrays of the individual populations + for pop_name in self.pop_name_list: + use_output_pop_dict[pop_name] = np.concatenate( + use_output_pop_dict[pop_name] + ) + + ### finaly only use values defined by ues_output... + for pop_name in self.pop_name_list: + output_pop_dict[pop_name] = output_pop_dict[pop_name][ + use_output_pop_dict[pop_name] + ] + + return output_pop_dict + + def get_input_for_many_neurons_net(self): + """ + get the inputs for the parallel many neurons network simulation + + need a list of dicts, keys=pop_name, lsit=number of networks + """ + + ### create dicts with lists for the populations + I_app_arr_list_dict = {} + g_ampa_arr_list_dict = {} + g_gaba_arr_list_dict = {} + use_I_app_arr_list_dict = {} + use_g_ampa_arr_list_dict = {} + use_g_gaba_arr_list_dict = {} + I_app_arr_dict = {} + g_ampa_arr_dict = {} + g_gaba_arr_dict = {} + for pop_name in self.pop_name_list: + ### prepare grid for I, g_ampa and g_gaba + ### bounds + g_ampa_max = self.g_max_dict[pop_name]["ampa"] + g_gaba_max = self.g_max_dict[pop_name]["gaba"] + I_max = self.I_app_max_dict[pop_name] + + ### create value_arrays + I_app_value_array = np.linspace( + -I_max, I_max, self.nr_vals_interpolation_grid + ) + g_ampa_value_array = np.linspace( + 0, g_ampa_max, self.nr_vals_interpolation_grid + ) + g_gaba_value_array = np.linspace( + 0, g_gaba_max, self.nr_vals_interpolation_grid + ) + + ### store these value arrays for each pop + I_app_arr_dict[pop_name] = I_app_value_array + g_ampa_arr_dict[pop_name] = g_ampa_value_array + g_gaba_arr_dict[pop_name] = g_gaba_value_array + + ### create use values arrays + use_I_app_array = np.array([I_max > 0] * self.nr_vals_interpolation_grid) + use_g_ampa_array = np.array( + [g_ampa_max > 0] * self.nr_vals_interpolation_grid + ) + use_g_gaba_array = np.array( + [g_gaba_max > 0] * self.nr_vals_interpolation_grid + ) + ### use at least a single value + use_I_app_array[0] = True + use_g_ampa_array[0] = True + use_g_gaba_array[0] = True + + ### get all combinations (grid) of value_arrays + I_g_arr = np.array( + list( + itertools.product( + *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] + ) + ) + ) + + ### get all combinations (grid) of the use values arrays + use_I_g_arr = np.array( + list( + itertools.product( + *[use_I_app_array, use_g_ampa_array, use_g_gaba_array] + ) + ) + ) + + ### individual value arrays from combinations + I_app_arr = I_g_arr[:, 0] + g_ampa_arr = I_g_arr[:, 1] + g_gaba_arr = I_g_arr[:, 2] + + ### individual use values arrays from combinations + use_I_app_arr = use_I_g_arr[:, 0] + use_g_ampa_arr = use_I_g_arr[:, 1] + use_g_gaba_arr = use_I_g_arr[:, 2] + + ### split the arrays for the networks + networks_size_list = np.array( + [self.nr_neurons_of_pop_per_net] * self.nr_networks + ) + split_idx_arr = np.cumsum(networks_size_list)[:-1] + ### after this split the last array may be smaller than the others --> append zeros + ### value arrays + I_app_arr_list = np.split(I_app_arr, split_idx_arr) + g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) + g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) + ### use value arrays + use_I_app_arr_list = np.split(use_I_app_arr, split_idx_arr) + use_g_ampa_arr_list = np.split(use_g_ampa_arr, split_idx_arr) + use_g_gaba_arr_list = np.split(use_g_gaba_arr, split_idx_arr) + + ### check if last network is smaler + if self.nr_last_network < self.nr_neurons_of_pop_per_net: + ### if yes --> append zeros to value arrays + ### and append False to use values arrays + nr_of_zeros_append = round( + self.nr_neurons_of_pop_per_net - self.nr_last_network, 0 + ) + ### value arrays + I_app_arr_list[-1] = np.concatenate( + [I_app_arr_list[-1], np.zeros(nr_of_zeros_append)] + ) + g_ampa_arr_list[-1] = np.concatenate( + [g_ampa_arr_list[-1], np.zeros(nr_of_zeros_append)] + ) + g_gaba_arr_list[-1] = np.concatenate( + [g_gaba_arr_list[-1], np.zeros(nr_of_zeros_append)] + ) + ### use values arrays + use_I_app_arr_list[-1] = np.concatenate( + [use_I_app_arr_list[-1], np.array([False] * nr_of_zeros_append)] + ) + use_g_ampa_arr_list[-1] = np.concatenate( + [use_g_ampa_arr_list[-1], np.array([False] * nr_of_zeros_append)] + ) + use_g_gaba_arr_list[-1] = np.concatenate( + [use_g_gaba_arr_list[-1], np.array([False] * nr_of_zeros_append)] + ) + + ### store the array lists into the population dicts + ### value arrays + I_app_arr_list_dict[pop_name] = I_app_arr_list + g_ampa_arr_list_dict[pop_name] = g_ampa_arr_list + g_gaba_arr_list_dict[pop_name] = g_gaba_arr_list + ### use value arrays + use_I_app_arr_list_dict[pop_name] = use_I_app_arr_list + use_g_ampa_arr_list_dict[pop_name] = use_g_ampa_arr_list + use_g_gaba_arr_list_dict[pop_name] = use_g_gaba_arr_list + + ### restructure the dict of lists into a list for networks of list for populations + I_app_list = [] + g_ampa_list = [] + g_gaba_list = [] + use_I_app_list = [] + use_g_ampa_list = [] + use_g_gaba_list = [] + for net_idx in range(self.nr_networks): + ### value arrays + I_app_list.append( + [ + I_app_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + g_ampa_list.append( + [ + g_ampa_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + g_gaba_list.append( + [ + g_gaba_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + ### use values arrays + use_I_app_list.append( + [ + use_I_app_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + use_g_ampa_list.append( + [ + use_g_ampa_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + use_g_gaba_list.append( + [ + use_g_gaba_arr_list_dict[pop_name][net_idx] + for pop_name in self.pop_name_list + ] + ) + + return { + "I_app_list": I_app_list, + "g_ampa_list": g_ampa_list, + "g_gaba_list": g_gaba_list, + "use_I_app_list": use_I_app_list, + "use_g_ampa_list": use_g_ampa_list, + "use_g_gaba_list": use_g_gaba_list, + "I_app_arr_dict": I_app_arr_dict, + "g_ampa_arr_dict": g_ampa_arr_dict, + "g_gaba_arr_dict": g_gaba_arr_dict, + } + + for pop_name in self.pop_name_list: + ### prepare grid for I, g_ampa and g_gaba + ### bounds + g_ampa_max = self.g_max_dict[pop_name]["ampa"] + g_gaba_max = self.g_max_dict[pop_name]["gaba"] + I_max = self.I_app_max_dict[pop_name] + ### number of points for individual value arrays: I, g_ampa and g_gaba + number_of_points = np.round( + self.nr_neurons_net_many_total ** (1 / 3), 0 + ).astype(int) + ### create value_arrays + I_app_value_array = np.linspace(-I_max, I_max, number_of_points) + g_ampa_value_array = np.linspace(0, g_ampa_max, number_of_points) + g_gaba_value_array = np.linspace(0, g_gaba_max, number_of_points) + ### get all combinations (grid) of value_arrays + I_g_arr = np.array( + list( + itertools.product( + *[I_app_value_array, g_ampa_value_array, g_gaba_value_array] + ) + ) + ) + ### individual value arrays from combinations + I_app_arr = I_g_arr[:, 0] + g_ampa_arr = I_g_arr[:, 1] + g_gaba_arr = I_g_arr[:, 2] + + ### split the arrays into the sizes of the many-neuron networks + split_idx_arr = np.cumsum(self.nr_many_neurons_list[pop_name])[:-1] + + I_app_arr_list = np.split(I_app_arr, split_idx_arr) + g_ampa_arr_list = np.split(g_ampa_arr, split_idx_arr) + g_gaba_arr_list = np.split(g_gaba_arr, split_idx_arr) + + class get_interp_3p: + def __init__( + self, values, model_conf_obj, var_name_dict, x=None, y=None, z=None + ) -> None: + """ + x, y, and z are the increasing gid steps on the interpolation grid + set z=None to get 2D interpiolation + set y and z = None to get 1D interpolation + """ + self.x = x + self.y = y + self.z = z + self.values = values + self.model_conf_obj = model_conf_obj + self.var_name_dict = var_name_dict + + if ( + isinstance(self.x, type(None)) + and isinstance(self.y, type(None)) + and isinstance(self.z, type(None)) + ): + error_msg = ( + "ERROR get_interp_3p: at least one of x,y,z has to be an array" + ) + model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + + def __call__(self, x=None, y=None, z=None): + ### check x + if isinstance(x, type(None)): + if not isinstance(self.x, type(None)): + error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['x']} were given but sample points are missing!" + self.model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + tmp_x = 0 + else: + if isinstance(self.x, type(None)): + warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['x']} are given but no interpolation values for {self.var_name_dict['x']} were given!" + self.model_conf_obj.log(warning_txt) + x = None + tmp_x = 0 + else: + tmp_x = x + + ### check y + if isinstance(y, type(None)): + if not isinstance(self.y, type(None)): + error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['y']} were given but sample points are missing!" + self.model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + tmp_y = 0 + else: + if isinstance(self.y, type(None)): + warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['y']} are given but no interpolation values for {self.var_name_dict['y']} were given!" + self.model_conf_obj.log(warning_txt) + y = None + tmp_y = 0 + else: + tmp_y = y + + ### check z + if isinstance(z, type(None)): + if not isinstance(self.y, type(None)): + error_msg = f"ERROR get_interp_3p: interpolation values for {self.var_name_dict['z']} were given but sample points are missing!" + self.model_conf_obj.log(error_msg) + raise AssertionError(error_msg) + tmp_z = 0 + else: + if isinstance(self.z, type(None)): + warning_txt = f"WARNING get_interp_3p: sample points for {self.var_name_dict['z']} are given but no interpolation values for {self.var_name_dict['z']} were given!" + self.model_conf_obj._p_w(warning_txt) + self.model_conf_obj.log(warning_txt) + z = None + tmp_z = 0 + else: + tmp_z = z + + ### get input arrays + input_arr_dict = { + "x": np.array(tmp_x).reshape(-1), + "y": np.array(tmp_y).reshape(-1), + "z": np.array(tmp_z).reshape(-1), + } + + ### check if the arrays with size larger 1 have same size + size_arr = np.array([val.size for val in input_arr_dict.values()]) + mask = size_arr > 1 + if True in mask: + input_size = size_arr[mask][0] + if not (input_size == size_arr[mask]).all(): + raise ValueError( + "ERROR model_configurator get_interp_3p: x,y,z sample points have to be either single values or arrays. All arrays have to have same size" + ) + + ### if there are inputs only consisting of a single value --> duplicate to increase size if there are also array inputs + for idx, larger_1 in enumerate(mask): + if not larger_1 and True in mask: + val = input_arr_dict[list(input_arr_dict.keys())[idx]][0] + input_arr_dict[list(input_arr_dict.keys())[idx]] = ( + np.ones(input_size) * val + ) + + ### get the sample points + use_variable_names_list = ["x", "y", "z"] + if isinstance(x, type(None)): + use_variable_names_list.remove("x") + if isinstance(y, type(None)): + use_variable_names_list.remove("y") + if isinstance(z, type(None)): + use_variable_names_list.remove("z") + point_arr = np.array( + [input_arr_dict[var_name] for var_name in use_variable_names_list] + ).T + + ### get the grid points, only use these which are not None + use_variable_names_list = ["x", "y", "z"] + if isinstance(self.x, type(None)): + use_variable_names_list.remove("x") + if isinstance(self.y, type(None)): + use_variable_names_list.remove("y") + if isinstance(self.z, type(None)): + use_variable_names_list.remove("z") + + interpolation_grid_arr_dict = { + "x": self.x, + "y": self.y, + "z": self.z, + } + points = tuple( + [ + interpolation_grid_arr_dict[var_name] + for var_name in use_variable_names_list + ] + ) + + ### get shape of values + values_shape = tuple( + [ + interpolation_grid_arr_dict[var_name].size + for var_name in use_variable_names_list + ] + ) + + return interpn( + points=points, + values=self.values.reshape(values_shape), + xi=point_arr, + ) + + def set_syn_load(self, synaptic_load_dict, synaptic_contribution_dict=None): + """ + Args: + synaptic_load_dict: dict or number + either a dictionary with keys = all population names the model_configurator should configure + or a single number between 0 and 1 + The dictionary values should be lists which contain either 2 values for ampa and gaba load, + only 1 value if the population has only ampa or gaba input. + For the strucutre of the dictionary check the print_guide + + synaptic_contribution_dict: dict, optional, default=None + by default the synaptic contributions of all afferent projections is equal + one can define other contributions in this dict + give for each affernt projection the contribution to the synaptic load of the target population + For the strucutre of the dictionary check the print_guide + """ + + ### set synaptic load + ### is dict --> replace internal dict values + if isinstance(synaptic_load_dict, dict): + ### check if correct number of population + if len(list(synaptic_load_dict.keys())) != len( + list(self.syn_load_dict.keys()) + ): + error_msg = f"ERROR set_syn_load: wrong number of populations given with 'synaptic_load_dict' given={len(list(synaptic_load_dict.keys()))}, expected={len(list(self.syn_load_dict.keys()))}" + self.log(error_msg) + raise ValueError(error_msg) + ### loop over all populations + for pop_name in synaptic_load_dict.keys(): + ### cehck pop name + if pop_name not in list(self.syn_load_dict.keys()): + error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + value_list = synaptic_load_dict[pop_name] + ### check value list + if len(value_list) != len(self.syn_load_dict[pop_name]): + error_msg = f"ERROR set_syn_load: for population {pop_name}, {len(self.syn_load_dict[pop_name])} syn load values should be given but {len(value_list)} were given" + self.log(error_msg) + raise ValueError(error_msg) + if not ( + (np.array(value_list) <= 1).all() + and (np.array(value_list) >= 0).all() + ): + error_msg = f"ERROR set_syn_load: the values for synaptic loads should be equal or smaller than 1, given for population {pop_name}: {value_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### replace internal values with given values + self.syn_load_dict[pop_name] = value_list + else: + ### is not a dict --> check number + try: + synaptic_load = float(synaptic_load_dict) + except: + error_msg = "ERROR set_syn_load: if synaptic_load_dict is not a dictionary it should be a single number!" + self.log(error_msg) + raise ValueError(error_msg) + if not (synaptic_load <= 1 and synaptic_load >= 0): + error_msg = "ERROR set_syn_load: value for synaptic_loadshould be equal or smaller than 1" + self.log(error_msg) + raise ValueError(error_msg) + ### replace internal values with given value + for pop_name in self.syn_load_dict.keys(): + for idx in range(len(self.syn_load_dict[pop_name])): + self.syn_load_dict[pop_name][idx] = synaptic_load + ### transform syn load dict in correct form with projection target type keys + syn_load_dict = {} + for pop_name in self.pop_name_list: + syn_load_dict[pop_name] = {} + if ( + "ampa" in self.afferent_projection_dict[pop_name]["target"] + and "gaba" in self.afferent_projection_dict[pop_name]["target"] + ): + syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] + syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][1] + elif "ampa" in self.afferent_projection_dict[pop_name]["target"]: + syn_load_dict[pop_name]["ampa"] = self.syn_load_dict[pop_name][0] + syn_load_dict[pop_name]["gaba"] = 0 + elif "gaba" in self.afferent_projection_dict[pop_name]["target"]: + syn_load_dict[pop_name]["ampa"] = 0 + syn_load_dict[pop_name]["gaba"] = self.syn_load_dict[pop_name][0] + self.syn_load_dict = syn_load_dict + + ### set synaptic contribution + if not isinstance(synaptic_contribution_dict, type(None)): + ### loop over all given populations + for pop_name in synaptic_contribution_dict.keys(): + ### check pop_name + if pop_name not in list(self.syn_contr_dict.keys()): + error_msg = f"ERROR set_syn_load: the given population {pop_name} is not within the list of populations which should be configured {self.pop_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### loop over given projection target type (ampa,gaba) + for given_proj_target_type in synaptic_contribution_dict[ + pop_name + ].keys(): + ### check given target type + if not ( + given_proj_target_type == "ampa" + or given_proj_target_type == "gaba" + ): + error_msg = f"ERROR set_syn_load: with the synaptic_contribution_dict for each given population a 'ampa' and/or 'gaba' dictionary contianing the corresponding afferent projections should be given, given key={given_proj_target_type}" + self.log(error_msg) + raise ValueError(error_msg) + ### check if for the projection target type the correct number of projections is given + given_proj_name_list = list( + synaptic_contribution_dict[pop_name][ + given_proj_target_type + ].keys() + ) + internal_proj_name_list = list( + self.syn_contr_dict[pop_name][given_proj_target_type].keys() + ) + if len(given_proj_name_list) != len(internal_proj_name_list): + error_msg = f"ERROR set_syn_load: in synaptic_contribution_dict for population {pop_name} and target_type {given_proj_target_type} wrong number of projections is given\ngiven={given_proj_name_list}, expected={internal_proj_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### check if given contributions for the target type sum up to 1 + given_contribution_arr = np.array( + list( + synaptic_contribution_dict[pop_name][ + given_proj_target_type + ].values() + ) + ) + if round(given_contribution_arr.sum(), 6) != 1: + error_msg = f"ERROR set_syn_load: given synaptic contributions for population {pop_name} and target_type {given_proj_target_type} do not sum up to 1: given={given_contribution_arr}-->{round(given_contribution_arr.sum(),6)}" + self.log(error_msg) + raise ValueError(error_msg) + ### loop over given afferent projections + for proj_name in given_proj_name_list: + ### check if projection name exists + if proj_name not in internal_proj_name_list: + error_msg = f"ERROR set_syn_load: given projection {proj_name} given with synaptic_contribution_dict no possible projection, possible={internal_proj_name_list}" + self.log(error_msg) + raise ValueError(error_msg) + ### replace internal value of the projection with given value + self.syn_contr_dict[pop_name][given_proj_target_type][ + proj_name + ] = synaptic_contribution_dict[pop_name][ + given_proj_target_type + ][ + proj_name + ] + + ### set the weights in the afferent_projection_dict based on the given synaptic contributions + for pop_name in self.pop_name_list: + weight_list = [] + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + ### get proj info + proj_dict = self.get_proj_dict(proj_name) + proj_target_type = proj_dict["proj_target_type"] + + ### obtain the weight using the given syn_contr_dict and the syn_contr_max_dict (assuming max weights) + target_type_contr_dict = self.syn_contr_dict[pop_name][proj_target_type] + target_type_contr_max_dict = self.get_syn_contr_dict( + pop_name=pop_name, + target_type=proj_target_type, + use_max_weights=True, + normalize=True, + ) + ### convert the synaptic contribution dicts to arrays + target_type_contr_arr = np.array(list(target_type_contr_dict.values())) + target_type_contr_max_arr = np.array( + list(target_type_contr_max_dict.values()) + ) + ### get the transformation from synaptic contributions assuming max weights to given synaptic contributions + contr_transform_arr = target_type_contr_max_arr / target_type_contr_arr + ### normalize the transform_arr by the largest scaling --> obtain the weight factors + contr_transform_arr /= contr_transform_arr.max() + ### get the weight of the current projection + weight = ( + self.g_max_dict[pop_name][proj_target_type] + * contr_transform_arr[ + list(target_type_contr_dict.keys()).index(proj_name) + ] + ) + ### append weight to weight list + weight_list.append(weight) + ### replace the weights in the afferent_projection_dict + self.afferent_projection_dict[pop_name]["weights"] = weight_list + + ### now scale the weights based on the synaptic load + for pop_name in self.pop_name_list: + for target_type in ["ampa", "gaba"]: + ### get the synaptic load based on the weights + syn_load = self.get_syn_load(pop_name=pop_name, target_type=target_type) + ### if the obtained syn load with the weights is smaller than the given target syn load + ### print warning because upscaling is not possible, syn load is smaller than the user wanted + print( + f"syn_load={syn_load}, target={self.syn_load_dict[pop_name][target_type]}" + ) + if syn_load < self.syn_load_dict[pop_name][target_type]: + ### the weights cannot be upscaled because syn_load was obtained with max weights + ### --> print a warning + warning_txt = f"WARNING set_syn_load: the synaptic load for population {pop_name} and target_type {target_type} cannot reach teh given synaptic load using the given synaptic contributions without scaling the weights over the maximum weights!\ngiven syn_load={self.syn_load_dict[pop_name][target_type]}, obtained syn_load={syn_load}" + self.log(warning_txt) + self._p_w(warning_txt) + ### update the syn_load_dict with the obtained syn_load + self.syn_load_dict[pop_name][target_type] = syn_load + elif syn_load > 0: + ### get the weights + weight_arr = np.array( + self.afferent_projection_dict[pop_name]["weights"] + ) + ### get the proj target type array + proj_target_type_arr = np.array( + self.afferent_projection_dict[pop_name]["target"] + ) + ### select the weights for the target type + weight_arr = weight_arr[proj_target_type_arr == target_type] + ### scale the weights + weight_arr *= self.syn_load_dict[pop_name][target_type] / syn_load + ### update the weights in the afferent_projection_dict + weight_idx_arr = np.where(proj_target_type_arr == target_type)[0] + for weight_idx_new, weight_idx_original in enumerate( + weight_idx_arr + ): + self.afferent_projection_dict[pop_name]["weights"][ + weight_idx_original + ] = weight_arr[weight_idx_new] + + ### print guide + self._p_g(_p_g_after_set_syn_load) + + def set_weights(self, weights): + for pop_name in self.pop_name_list: + self.afferent_projection_dict[pop_name]["weights"] = [] + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + self.afferent_projection_dict[pop_name]["weights"].append( + weights[pop_name][proj_name] + ) + + def get_syn_load(self, pop_name: str, target_type: str) -> float: + """ + Calculates the synaptic load of a population for a given target type for the given weights of the afferent_projection_dict + + Args: + pop_name: str + name of the population + + target_type: str + either 'ampa' or 'gaba' + + Returns: + syn_load: float + synaptic load of the population for the given target type + """ + ### get the proj target type array + proj_target_type_arr = np.array( + self.afferent_projection_dict[pop_name]["target"] + ) + if target_type in proj_target_type_arr: + ### get the weights + weight_arr = np.array(self.afferent_projection_dict[pop_name]["weights"]) + ### select the weights for the target type + weight_arr = weight_arr[proj_target_type_arr == target_type] + ### get the pre size + size_arr = np.array(self.afferent_projection_dict[pop_name]["size"]) + ### select the pre size for the target type + size_arr = size_arr[proj_target_type_arr == target_type] + ### get the probaility + prob_arr = np.array(self.afferent_projection_dict[pop_name]["probability"]) + ### select the probability for the target type + prob_arr = prob_arr[proj_target_type_arr == target_type] + ### get the firing rate + firing_rate_arr = np.array( + self.afferent_projection_dict[pop_name]["target firing rate"] + ) + ### select the firing rate for the target type + firing_rate_arr = firing_rate_arr[proj_target_type_arr == target_type] + + ### get the synaptic load based on weights, sizes, probabilities and max weights + syn_load = np.sum(weight_arr * size_arr * prob_arr * firing_rate_arr) / ( + self.g_max_dict[pop_name][target_type] + * np.sum(size_arr * prob_arr * firing_rate_arr) + ) + else: + syn_load = 0 + + return syn_load + + def get_template_synaptic_contribution_dict(self, given_dict): + """ + converts the full template dict with all keys for populations, target-types and projections into a reduced dict + which only contains the keys which lead to values smaller 1 + """ + + ret_dict = {} + for key in given_dict.keys(): + if isinstance(given_dict[key], dict): + rec_dict = self.get_template_synaptic_contribution_dict(given_dict[key]) + if len(rec_dict) > 0: + ret_dict[key] = self.get_template_synaptic_contribution_dict( + given_dict[key] + ) + else: + if given_dict[key] < 1: + ret_dict[key] = given_dict[key] + + return ret_dict + + def divide_almost_equal(self, number, num_parts): + # Calculate the quotient and remainder + quotient, remainder = divmod(number, num_parts) + + # Initialize a list to store the almost equal integers + result = [quotient] * num_parts + + # Distribute the remainder evenly among the integers + for i in range(remainder): + result[i] += 1 + + return result + + def compile_net_many_sequential(self): + network_list = [ + net_many_dict["net"] + for net_many_dict_list in self.net_many_dict.values() + for net_many_dict in net_many_dict_list + ] + for net in network_list: + self.compile_net_many(net=net) + + def compile_net_many_parallel(self): + nr_available_workers = int(multiprocessing.cpu_count() / 2) + network_list = [ + net_many_dict["net"] + for net_many_dict_list in self.net_many_dict.values() + for net_many_dict in net_many_dict_list + ] + with multiprocessing.Pool(nr_available_workers) as p: + p.map(self.compile_net_many, network_list) + + ### for each network have network idx + ### network 0 is base network + ### netork 1,2,3...N are the single neuron networks for the N populations + ### start idx = N+1 (inclusive), end_idx = number many networks + N (inclusive) + for net_idx in range( + len(self.pop_name_list) + 1, len(network_list) + len(self.pop_name_list) + 1 + ): + ### get the name of the run folder of the network + ### search for a folder which starts with run_ + ### there should only be 1 --> get run_folder_name as str + run_folder_name = _find_folder_with_prefix( + base_path=f"annarchy_folders/many_net_{net_idx}", prefix="run_" + ) + run_folder_name = f"/scratch/olmai/Projects/PhD/CompNeuroPy/CompNeuroPy/examples/model_configurator/annarchy_folders/many_net_{net_idx}//{run_folder_name}" + + print(run_folder_name) + ### import the ANNarchyCore.so module from this folder + spec = importlib.util.spec_from_file_location( + f"ANNarchyCore{net_idx}", f"{run_folder_name}/ANNarchyCore{net_idx}.so" + ) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + + ### overwrite the entries in the network manager + _network[net_idx]["instance"] = foo + _network[net_idx]["compiled"] = True + _network[net_idx]["directory"] = run_folder_name + + def get_max_syn_currents(self, pop_name: str) -> list: + """ + obtain I_app_max, g_ampa_max and g_gaba max. + f_max = f_0 + f_t + 100 + I_app_max causes f_max (increases f from f_0 to f_max) + g_gaba_max causes max IPSP + g_ampa_max cancels out g_gaba_max IPSP + + Args: + pop_name: str + population name from original model + + return: + list containing [I_max, g_ampa_max, g_gaba_max] + + Abbreviations: + f_max: max firing rate + + f_0: firing rate without syn currents + + f_t: target firing rate + """ + + ### TODO: problem for g_gaba: what if resting potential is <=-90... + ### find g_gaba max using max IPSP + self.log("search g_gaba_max with y(X) = PSP(g_ampa=0, g_gaba=X)") + g_gaba_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_ipsp( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], + g_gaba=X_val, + ), + y_bound=self.max_psp_dict[pop_name], + X_0=0, + y_0=0, + alpha_abs=0.005, + X_increase=0.1, + ) + + ### for g_ampa EPSPs can lead to spiking + ### --> find g_ampa max by "overriding" IPSP of g_gaba max + self.log( + f"search g_ampa_max with y(X) = PSP(g_ampa=X, g_gaba=g_gaba_max={g_gaba_max})" + ) + g_ampa_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_ipsp( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], + g_ampa=X_val, + g_gaba=g_gaba_max, + ), + y_bound=0, + X_0=0, + y_0=self.get_ipsp( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.prepare_psp_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app_hold=self.prepare_psp_dict[pop_name]["I_app_hold"], + g_ampa=0, + g_gaba=g_gaba_max, + ), + alpha_abs=0.005, + X_increase=g_gaba_max / 10, + ) + + ### get f_0 and f_max + f_0 = self.get_rate( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.net_single_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + )[0] + f_max = f_0 + self.target_firing_rate_dict[pop_name] + 100 + + ### find I_max with f_0, and f_max using incremental_continuous_bound_search + self.log("search I_app_max with y(X) = f(I_app=X, g_ampa=0, g_gaba=0)") + I_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_rate( + net=self.net_single_dict[pop_name]["net"], + population=self.net_single_dict[pop_name]["population"], + variable_init_sampler=self.net_single_dict[pop_name][ + "variable_init_sampler" + ], + monitor=self.net_single_dict[pop_name]["monitor"], + I_app=X_val, + )[0], + y_bound=f_max, + X_0=0, + y_0=f_0, + alpha_abs=1, + ) + + return [I_max, g_ampa_max, g_gaba_max] + + def incremental_continuous_bound_search( + self, + y_X, + y_bound, + X_0, + y_0, + alpha_rel=0.01, + alpha_abs=None, + n_it_max=100, + X_increase=1, + saturation_thresh=10, + saturation_warning=True, + accept_non_dicontinuity=False, + bound_type="equal", + ): + """ + you have system X --> y + you want X for y=y_bound (either upper or lower bound) + if you increase X (from starting point) y gets closer to y_bound! + + expectes a continuous funciton without from P_0(X_0,y_0) to P_bound(X_bound, y_bound) + if it finds a saturation or non-continuous "step" on the way to P_bound it will return + the X_bound for the end of the continuous part from P_0 to P_bound --> y_bound will not + be reached + + Args: + y_X: function + returns a single number given a single number, call like y = y_X(X) + increasing X should bring y closer to y_bound + + y_bound: number + the bound for y for which an X_bound should be found + + X_0: number + start value of X, from where the search should start + + y_0: number + start value of y which results from X_0 + + alpha_rel: number, optional, default=0.001 + allowed relative tolerance for deviations of y from y_bound + if alpha_abs is given it overrides alpha_rel + + alpha_abs: number, optional, default=None + allowed absolute tolerance for deviations of y from y_bound + if alpha_abs is given it overrides alpha_rel + + n_it_max: number, optional, default=100 + maximum of iterations to find X_bound + + X_increase: number, optional, default=1 + the first increase of X (starting from X_0) to obtain the first new y_val + i.e. first calculation is: y_val = y_X(X_0+X_increase) + + saturation_thresh: number, optional, default=5 + if y does not change while increasing X by X_increase the search will stop + after this number of trials + + saturation_warning: bool, optional, default=True + if you want to get a warning when the saturation is reached during search + + accept_non_dicontinuity: bool, optional, default=False + if you do not want to search only in the first continuous search space + + bound_type: str, optional, default="equal" + equal, greater or less + equal: result should be near bound within tolerance + greater: result should be at least larger bound within tolerance + less: result should be smaller bound within tolerance + + return: + X_bound: + X value which causes y=y_bound + """ + ### TODO catch difference to target goes up in both directions + ### then nothing new is predicted --> fails + + self.log( + f"find X_bound for: y_0(X_0={X_0})={y_0} --> y_bound(X_bound=??)={y_bound}" + ) + + ### get tolerance + tolerance = abs(y_bound - y_0) * alpha_rel + if not isinstance(alpha_abs, type(None)): + tolerance = alpha_abs + + ### define stop condition + if bound_type == "equal": + stop_condition = ( + lambda y_val, n_it: ( + ((y_bound - tolerance) <= y_val) + and (y_val <= (y_bound + tolerance)) + ) + or n_it >= n_it_max + ) + elif bound_type == "greater": + stop_condition = ( + lambda y_val, n_it: ( + ((y_bound - 0) <= y_val) and (y_val <= (y_bound + 2 * tolerance)) + ) + or n_it >= n_it_max + ) + elif bound_type == "less": + stop_condition = ( + lambda y_val, n_it: ( + ((y_bound - 2 * tolerance) <= y_val) and (y_val <= (y_bound + 0)) + ) + or n_it >= n_it_max + ) + + ### check if y(X) is increasing + is_increasing = y_bound > y_0 + + ### search for X_val + X_list_predict = [X_0] + y_list_predict = [y_0] + X_list_all = [X_0] + y_list_all = [y_0] + n_it_first_round = 0 + X_val = X_0 + X_increase + y_val = y_0 + y_not_changed_counter = 0 + X_change_predicted = X_increase + while not stop_condition(y_val, n_it_first_round): + ### get y_val for X + y_val_pre = y_val + y_val = y_X(X_val) + y_change = y_val_pre - y_val + + ### store search history + X_list_all.append(X_val) + y_list_all.append(y_val) + + ### get next X_val depending on if y_val changed or not + if abs(y_change) > 0: + ### append X_val and y_val to y_list/X_list + y_list_predict.append(y_val) + X_list_predict.append(X_val) + ### predict new X_val using y_bound as predictor + X_val_pre = X_val + X_val = self.predict_1d( + X=y_list_predict, y=X_list_predict, X_pred=y_bound + )[0] + X_change_predicted = X_val - X_val_pre + ### now actually update X_val + X_val = X_val_pre + X_change_predicted * (1 + y_not_changed_counter / 2) + else: + ### just increase X_val + X_val = X_val + X_change_predicted * (1 + y_not_changed_counter / 2) + + ### check saturation of y_val + if abs(y_change) < tolerance: + ### increase saturation counter + ### saturation counter also increases updates of X_val + y_not_changed_counter += 1 + else: + ### reset saturation counter + y_not_changed_counter = 0 + + ### break if y_val saturated + if y_not_changed_counter >= saturation_thresh: + break + + ### increase iterator + n_it_first_round += 1 + + ### catch the initial point already satisified stop condition + if len(X_list_all) == 1: + warning_txt = "WARNING incremental_continuous_bound_search: search did not start because initial point already satisfied stop condition!" + self._p_w(warning_txt) + self.log(warning_txt) + return X_0 + + ### warning if search saturated + if (y_not_changed_counter >= saturation_thresh) and saturation_warning: + warning_txt = f"WARNING incremental_continuous_bound_search: search saturated at y={y_list_predict[-1]} while searching for X_val for y_bound={y_bound}" + self._p_w(warning_txt) + self.log(warning_txt) + self.log("initial search lists:") + self.log("all:") + self.log(np.array([X_list_all, y_list_all]).T) + self.log("predict:") + self.log(np.array([X_list_predict, y_list_predict]).T) + + ### if search saturated right at the begining --> search failed (i.e. y did not change while increasing X) + if (y_not_changed_counter >= saturation_thresh) and len(X_list_predict) == 1: + error_msg = "ERROR incremental_continuous_bound_search: search failed because changing X_val did not change y_val" + self.log(error_msg) + raise AssertionError(error_msg) + + ### get best X value for which y is closest to y_bound + idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) + X_bound = X_list_predict[idx_best] + + ### sort y_list_predict and corresponding X_list_predict + ### get value pair which is before bound and value pair which is behind bound + ### if this does not work... use previous X_0 and X_bound + sort_idx_arr = np.argsort(y_list_predict) + X_arr_predict_sort = np.array(X_list_predict)[sort_idx_arr] + y_arr_predict_sort = np.array(y_list_predict)[sort_idx_arr] + over_y_bound_arr = y_arr_predict_sort > y_bound + over_y_bound_changed_idx = np.where(np.diff(over_y_bound_arr))[0] + if len(over_y_bound_changed_idx) == 1: + if over_y_bound_changed_idx[0] < len(y_arr_predict_sort): + X_aside_change_list = [ + X_arr_predict_sort[over_y_bound_changed_idx[0]], + X_arr_predict_sort[over_y_bound_changed_idx[0] + 1], + ] + y_aside_change_list = [ + y_arr_predict_sort[over_y_bound_changed_idx[0]], + y_arr_predict_sort[over_y_bound_changed_idx[0] + 1], + ] + X_0 = min(X_aside_change_list) + X_bound = max(X_aside_change_list) + y_0 = min(y_aside_change_list) + self.log("predict sorted:") + self.log(np.array([X_arr_predict_sort, y_arr_predict_sort, over_y_bound_arr]).T) + self.log(over_y_bound_changed_idx) + + ### if y cannot get larger or smaller than y_bound one has to check if you not "overshoot" with X_bound + ### --> fine tune result by investigating the space between X_0 and X_bound and predict a new X_bound + self.log(f"X_0: {X_0}, X_bound:{X_bound} for final predict list") + X_space_arr = np.linspace(X_0, X_bound, 100) + y_val = y_0 - [-1, 1][int(is_increasing)] + X_list_predict = [] + y_list_predict = [] + X_list_all = [] + y_list_all = [] + did_break = False + n_it_second_round = 0 + for X_val in X_space_arr: + y_val_pre = y_val + y_val = y_X(X_val) + X_list_all.append(X_val) + y_list_all.append(y_val) + if y_val != y_val_pre: + ### if y_val changed + ### append X_val and y_val to y_list/X_list + y_list_predict.append(y_val) + X_list_predict.append(X_val) + ### if already over y_bound -> stop + if y_val > y_bound and is_increasing: + did_break = True + break + if y_val < y_bound and not is_increasing: + did_break = True + break + n_it_second_round += 1 + ### if did break early --> use again finer bounds + if did_break and n_it_second_round < 90: + X_space_arr = np.linspace( + X_list_predict[-2], X_list_predict[-1], 100 - n_it_second_round + ) + y_val = y_list_predict[-2] + for X_val in X_space_arr: + y_val_pre = y_val + y_val = y_X(X_val) + X_list_all.append(X_val) + y_list_all.append(y_val) + if y_val != y_val_pre: + ### if y_val changed + ### append X_val and y_val to y_list/X_list + y_list_predict.append(y_val) + X_list_predict.append(X_val) + ### if already over y_bound -> stop + if y_val > y_bound and is_increasing: + break + if y_val < y_bound and not is_increasing: + break + ### sort value lists + sort_idx_all_arr = np.argsort(X_list_all) + X_list_all = (np.array(X_list_all)[sort_idx_all_arr]).tolist() + y_list_all = (np.array(y_list_all)[sort_idx_all_arr]).tolist() + sort_idx_predict_arr = np.argsort(X_list_predict) + X_list_predict = (np.array(X_list_predict)[sort_idx_predict_arr]).tolist() + y_list_predict = (np.array(y_list_predict)[sort_idx_predict_arr]).tolist() + + ### log + self.log("final predict lists:") + self.log("all:") + self.log(np.array([X_list_all, y_list_all]).T) + self.log("predict:") + self.log(np.array([X_list_predict, y_list_predict]).T) + + ### check if there is a discontinuity in y_all, starting with the first used value in y_predict + ### update all values with first predict value + first_y_used_in_predict = y_list_predict[0] + idx_first_y_in_all = y_list_all.index(first_y_used_in_predict) + y_list_all = y_list_all[idx_first_y_in_all:] + ### get discontinuity + discontinuity_idx_list = self.get_discontinuity_idx_list(y_list_all) + self.log("discontinuity_idx_list") + self.log(f"{discontinuity_idx_list}") + if len(discontinuity_idx_list) > 0 and not accept_non_dicontinuity: + ### there is a discontinuity + discontinuity_idx = discontinuity_idx_list[0] + ### only use values until discontinuity + y_bound_new = y_list_all[discontinuity_idx] + idx_best = y_list_predict.index(y_bound_new) + X_val_best = X_list_predict[idx_best] + y_val_best = y_list_predict[idx_best] + ### print warning + warning_txt = f"WARNING incremental_continuous_bound_search: found discontinuity, only reached y={y_bound_new} while searching for y_bound={y_bound}" + self._p_w(warning_txt) + ### log + self.log(warning_txt) + self.log( + f"discontinuities detected --> only use last values until first discontinuity: X={X_val_best}, y={y_val_best}" + ) + else: + ### there is no discontinuity + ### there can still be duplicates in the y_list --> remove them + ### get arrays + X_arr_predict = np.array(X_list_predict) + y_arr_predict = np.array(y_list_predict) + ### get unique indices + _, unique_indices = np.unique(y_arr_predict, return_index=True) + ### get arrays without duplicates in y_list + X_arr_predict = X_arr_predict[unique_indices] + y_arr_predict = y_arr_predict[unique_indices] + + ### now predict final X_val using y_arr + X_val = self.predict_1d( + X=y_arr_predict, y=X_arr_predict, X_pred=y_bound, linear=False + )[0] + y_val = y_X(X_val) + + ### append it to lists + X_list_predict.append(X_val) + y_list_predict.append(y_val) + + ### find best + idx_best = np.argmin(np.absolute(np.array(y_list_predict) - y_bound)) + X_val_best = X_list_predict[idx_best] + y_val_best = y_list_predict[idx_best] + + ### log + self.log(f"final values: X={X_val_best}, y={y_val_best}") + + ### warning for max iteration search + if not (n_it_first_round < n_it_max): + warning_txt = f"WARNING incremental_continuous_bound_search: reached max iterations to find X_bound to get y_bound={y_bound}, found X_bound causes y={y_val_best}" + self._p_w(warning_txt) + self.log(warning_txt) + + return X_val_best + + def get_discontinuity_idx_list(self, arr): + """ + Args: + arr: array-like + array for which its checked if there are discontinuities + """ + arr = np.array(arr) + range_data = arr.max() - arr.min() + diff_arr = np.diff(arr) + diff_rel_range_arr = diff_arr / range_data + diff_rel_range_abs_arr = np.absolute(diff_rel_range_arr) + peaks = find_peaks( + diff_rel_range_abs_arr, prominence=10 * np.mean(diff_rel_range_abs_arr) + ) + peaks_idx_list = peaks[0] + + return peaks_idx_list + + def predict_1d(self, X, y, X_pred, linear=True): + """ + Args: + X: array-like + X values + + y: array-like + y values, same size as X_values + + X_pred: array-like or number + X value(s) for which new y value(s) are predicted + + linear: bool, optional, default=True + if interpolation is linear + + return: + Y_pred_arr: array + predicted y values for X_pred + """ + if not linear: + if len(X) >= 4: + y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="cubic") + elif len(X) >= 3: + y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="quadratic") + else: + y_X = interp1d(x=X, y=y, fill_value="extrapolate", kind="linear") + y_pred_arr = y_X(X_pred) + return y_pred_arr.reshape(1) + + def get_rate_dict( + self, + net, + population_dict, + variable_init_sampler_dict, + monitor_dict, + I_app_dict, + g_ampa_dict, + g_gaba_dict, + ): + """ + function to obtain the firing rates of the populations of + the network given with 'idx' for given I_app, g_ampa and g_gaba values + + Args: + idx: int + network index given by the parallel_run function + + net: object + network object given by the parallel_run function + + net_many_dict: dict + dictionary containing a population_dict and a monitor_dict + which contain for each population name the + - ANNarchy Population object of the magic network + - ANNarchy Monitor object of the magic network + + I_app_arr_dict: dict of arrays + dictionary containing for each population the array with input values for I_app + + g_ampa_arr_dict: dict of arrays + dictionary containing for each population the array with input values for g_ampa + + g_gaba_arr_dict: dict of arrays + dictionary containing for each population the array with input values for g_gaba + + variable_init_sampler_dict: dict + dictionary containing for each population the initial variables sampler object + with the function.sample() to get initial values of the neurons + + self: object + the model_configurator object + + return: + f_rec_arr_dict: dict of arrays + dictionary containing for each population the array with the firing rates for the given inputs + """ + ### reset and set init values + net.reset() + for pop_name, varaible_init_sampler in variable_init_sampler_dict.items(): + population = net.get(population_dict[pop_name]) + variable_init_arr = varaible_init_sampler.sample(len(population), seed=0) + for var_idx, var_name in enumerate(population.variables): + set_val = variable_init_arr[:, var_idx] + setattr(population, var_name, set_val) + + ### slow down conductances (i.e. make them constant) + for pop_name in population_dict.keys(): + population = net.get(population_dict[pop_name]) + population.tau_ampa = 1e20 + population.tau_gaba = 1e20 + ### apply given variables + for pop_name in population_dict.keys(): + population = net.get(population_dict[pop_name]) + population.I_app = I_app_dict[pop_name] + population.g_ampa = g_ampa_dict[pop_name] + population.g_gaba = g_gaba_dict[pop_name] + ### simulate 500 ms initial duration + X ms + net.simulate(500 + self.simulation_dur) + ### get rate for the last X ms + f_arr_dict = {} + for pop_name in population_dict.keys(): + population = net.get(population_dict[pop_name]) + monitor = net.get(monitor_dict[pop_name]) + spike_dict = monitor.get("spike") + f_arr = np.zeros(len(population)) + for idx_n, n in enumerate(spike_dict.keys()): + time_list = np.array(spike_dict[n]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (self.simulation_dur / 1000) + f_arr[idx_n] = rate + f_arr_dict[pop_name] = f_arr + + return f_arr_dict + + def get_rate( + self, + net, + population, + variable_init_sampler, + monitor, + I_app=0, + g_ampa=0, + g_gaba=0, + ): + """ + simulates a population for X+500 ms and returns the firing rate of each neuron for the last X ms + X is defined with self.simulation_dur + + Args: + net: ANNarchy network + network which contains the population and monitor + + population: ANNarchy population + population which is recorded and stimulated + + variable_init_sampler: object + containing the initial values of the population neuron, use .sample() to get values + + monitor: ANNarchy monitor + to record spikes from population + + I_app: number or arr, optional, default = 0 + applied current to the population neurons, has to have the same size as the population + + g_ampa: number or arr, optional, default = 0 + applied ampa conductance to the population neurons, has to have the same size as the population + + g_gaba: number or arr, optional, default = 0 + applied gaba conductance to the population neurons, has to have the same size as the population + """ + ### reset and set init values + net.reset() + self.set_init_variables(population, variable_init_sampler) + ### slow down conductances (i.e. make them constant) + population.tau_ampa = 1e20 + population.tau_gaba = 1e20 + ### apply given variables + population.I_app = I_app + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate 500 ms initial duration + X ms + net.simulate(500 + self.simulation_dur) + ### get rate for the last X ms + spike_dict = monitor.get("spike") + f_arr = np.zeros(len(population)) + for idx_n, n in enumerate(spike_dict.keys()): + time_list = np.array(spike_dict[n]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (self.simulation_dur / 1000) + f_arr[idx_n] = rate + + return f_arr + + def get_ipsp( + self, + net: Network, + population: Population, + variable_init_sampler, + monitor, + I_app_hold, + g_ampa=0, + g_gaba=0, + do_plot=False, + ): + """ + simulates a single spike at t=50 ms and records the change of v within a voltage_clamp neuron + + Args: + net: ANNarchy network + network which contains the population and monitor + + population: ANNarchy population + population which is recorded and stimulated + + variable_init_sampler: object + containing the initial values of the population neuron, use .sample() to get values + + monitor: ANNarchy monitor + to record v_clamp_rec from population + + g_ampa: number, optional, default = 0 + applied ampa conductance to the population neuron at t=50 ms + + g_gaba: number, optional, default = 0 + applied gaba conductance to the population neurons at t=50 ms + """ + ### reset network and set initial values + net.reset() + self.set_init_variables(population, variable_init_sampler) + ### apply input + population.I_app = I_app_hold + ### simulate 50 ms initial duration + net.simulate(50) + ### apply given conductances --> changes v + v_rec_rest = population.v[0] + population.v_psp_thresh = v_rec_rest + population.g_ampa = g_ampa + population.g_gaba = g_gaba + ### simulate until v is near v_rec_rest again + net.simulate_until(max_duration=self.simulation_dur, population=population) + ### get the psp = maximum of difference of v_rec and v_rec_rest + v_rec = monitor.get("v")[:, 0] + spike_dict = monitor.get("spike") + spike_timestep_list = spike_dict[0] + [net.get_current_step()] + end_timestep = int(round(min(spike_timestep_list), 0)) + psp = float( + np.absolute(np.clip(v_rec[:end_timestep] - v_rec_rest, None, 0)).max() + ) + + if do_plot: + plt.figure() + plt.title( + f"g_ampa={g_ampa}\ng_gaba={g_gaba}\nv_rec_rest={v_rec_rest}\npsp={psp}" + ) + plt.plot(v_rec) + plt.savefig( + f"tmp_psp_{population.name}_{int(g_ampa*1000)}_{int(g_gaba*1000)}.png" + ) + plt.close("all") + + return psp + + def compile_net_many(self, net): + compile_in_folder( + folder_name=f"many_net_{net.id}", net=net, clean=True, silent=True + ) + + def create_many_neuron_network(self): + """ + creates a ANNarchy magic network with all popualtions which should be configured the size + of the populations is equal and is obtianed by dividing the number of the + interpolation values by the number of networks which will be used during run_parallel + + return: + net_many_dict: dict + contains + - population_dict: for all population names the created population in the magic network + - monitor_dict: for all population names the created monitors in the magic network + """ + self.log("create many neurons network") + + ### for each population of the given model which should be configured + ### create a population with a given size + ### create a monitor recording spikes + ### create a network containing the population and the monitor + many_neuron_population_list = [] + many_neuron_monitor_list = [] + many_neuron_network_list = [] + for pop_name in self.pop_name_list: + ### create the neuron model with poisson spike trains + ### get the initial arguments of the neuron + neuron_model = self.neuron_model_dict[pop_name] + ### names of arguments + init_arguments_name_list = list(Neuron.__init__.__code__.co_varnames) + init_arguments_name_list.remove("self") + init_arguments_name_list.remove("name") + init_arguments_name_list.remove("description") + ### arguments dict + init_arguments_dict = { + init_arguments_name: getattr(neuron_model, init_arguments_name) + for init_arguments_name in init_arguments_name_list + } + ### get the afferent populations + afferent_population_list = [] + proj_target_type_list = [] + for proj_name in self.afferent_projection_dict[pop_name][ + "projection_names" + ]: + proj_dict = self.get_proj_dict(proj_name) + pre_pop_name = proj_dict["pre_pop_name"] + afferent_population_list.append(pre_pop_name) + proj_target_type_list.append(proj_dict["proj_target_type"]) + + ### for each afferent population create a binomial spike train equation string + ### add it to the equations + ### and add the related parameters to the parameters + + ### split the equations and parameters string + equations_line_split_list = str( + init_arguments_dict["equations"] + ).splitlines() + + parameters_line_split_list = str( + init_arguments_dict["parameters"] + ).splitlines() + + ### add the binomial spike train equations and parameters + ( + equations_line_split_list, + parameters_line_split_list, + ) = self.add_binomial_input( + equations_line_split_list, + parameters_line_split_list, + afferent_population_list, + proj_target_type_list, + ) + + ### combine string lines to multiline strings again + init_arguments_dict["parameters"] = "\n".join(parameters_line_split_list) + init_arguments_dict["equations"] = "\n".join(equations_line_split_list) + + ### create neuron model with new equations + neuron_model_new = Neuron(**init_arguments_dict) + + # print("new neuron model:") + # print(neuron_model_new) + + ### create the many neuron population + my_pop = Population( + geometry=self.nr_neurons_per_net, + neuron=neuron_model_new, + name=f"many_neuron_{pop_name}", + ) + + ### set the attributes of the neurons + for attr_name, attr_val in self.neuron_model_parameters_dict[pop_name]: + setattr(my_pop, attr_name, attr_val) + + ### create Monitor for many neuron + my_mon = Monitor(my_pop, ["spike"]) + + ### create the network with population and monitor + my_net = Network() + my_net.add(my_pop) + my_net.add(my_mon) + + ### compile network + compile_in_folder(folder_name=f"many_neuron_{pop_name}", net=my_net) + + ### append the lists + many_neuron_network_list.append(my_net) + many_neuron_population_list.append(my_net.get(my_pop)) + many_neuron_monitor_list.append(my_net.get(my_mon)) + + net_many_dict = { + "network_list": many_neuron_network_list, + "population_list": many_neuron_population_list, + "monitor_list": many_neuron_monitor_list, + } + return net_many_dict + + def add_binomial_input( + self, + equations_line_split_list, + parameters_line_split_list, + afferent_population_list, + proj_target_type_list, + ): + ### loop over afferent populations to add the new equation lines and parameters + for pre_pop_name in afferent_population_list: + ### define the spike train of a pre population as a binomial process with number of trials = number of pre neurons and success probability = spike probability (taken from Poisson neurons) + ### the obtained value is the number of spikes at a time step times the weight + poisson_equation_str = f"{pre_pop_name}_spike_train = Binomial({pre_pop_name}_size, {pre_pop_name}_spike_prob)" + ### add the equation line + equations_line_split_list.insert(1, poisson_equation_str) + ### add the parameters + parameters_line_split_list.append(f"{pre_pop_name}_size = 0 : population") + parameters_line_split_list.append( + f"{pre_pop_name}_spike_prob = 0 : population" + ) + parameters_line_split_list.append(f"{pre_pop_name}_weight = 0 : population") + + ### change the g_ampa and g_gaba line, they additionally are the sum of the spike trains + for equation_line_idx, equation_line in enumerate(equations_line_split_list): + ### remove whitespaces + line = equation_line.replace(" ", "") + ### check if line contains g_ampa + if "dg_ampa/dt" in line: + ### get the right side of the equation + line_right = line.split("=")[1] + line_left = line.split("=")[0] + ### remove and store tags_str + tags_str = "" + if len(line_right.split(":")) > 1: + line_right, tags_str = line_right.split(":") + ### get the populations whose spike train should be appended in g_ampa + afferent_population_to_append_list = [] + for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): + if proj_target_type_list[pre_pop_idx] == "ampa": + afferent_population_to_append_list.append(pre_pop_name) + if len(afferent_population_to_append_list) > 0: + ### change right side, add the sum of the spike trains + line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" + ### add tags_str again + if tags_str != "": + line_right = f"{line_right}:{tags_str}" + ### combine line again and replace the list entry in equations_line_split_list + line = f"{line_left}={line_right}" + equations_line_split_list[equation_line_idx] = line + + ### check if line contains g_gaba + if "dg_gaba/dt" in line: + ### get the right side of the equation + line_right = line.split("=")[1] + line_left = line.split("=")[0] + ### remove and store tags_str + tags_str = "" + if len(line_right.split(":")) > 1: + line_right, tags_str = line_right.split(":") + ### get the populations whose spike train should be appended in g_ampa + afferent_population_to_append_list = [] + for pre_pop_idx, pre_pop_name in enumerate(afferent_population_list): + if proj_target_type_list[pre_pop_idx] == "gaba": + afferent_population_to_append_list.append(pre_pop_name) + if len(afferent_population_to_append_list) > 0: + ### change right side, add the sum of the spike trains + line_right = f"{line_right} + {'+'.join([f'({pre_pop_name}_spike_train*{pre_pop_name}_weight)/dt' for pre_pop_name in afferent_population_to_append_list])}" + ### add tags_str again + if tags_str != "": + line_right = f"{line_right}:{tags_str}" + ### combine line again and replace the list entry in equations_line_split_list + line = f"{line_left}={line_right}" + equations_line_split_list[equation_line_idx] = line + + return (equations_line_split_list, parameters_line_split_list) + + def get_v_clamp_2000( + self, + net: Network, + population, + monitor=None, + v=None, + I_app=None, + variable_init_sampler=None, + pre_pop_name_list=[], + eff_size_list=[], + rate_list=[], + weight_list=[], + return_1000=False, + ): + """ + the returned values is dv/dt + --> to get the hypothetical change of v for a single time step multiply with dt! + """ + ### reset network and set initial values + net.reset() + net.set_seed(0) + if not isinstance(variable_init_sampler, type(None)): + self.set_init_variables(population, variable_init_sampler) + ### set v and I_app + if not isinstance(v, type(None)): + population.v = v + if not isinstance(I_app, type(None)): + population.I_app = I_app + ### set the weights and rates of the binomial spike trains of the afferent populations + for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): + setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) + setattr( + population, + f"{pre_pop_name}_spike_prob", + (rate_list[pre_pop_idx] / 1000) * dt(), + ) + setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) + ### simulate 2000 ms + net.simulate(2000) + + if return_1000: + v_clamp_rec_arr = monitor.get("v_clamp_rec_sign")[:, 0] + return np.mean(v_clamp_rec_arr[-int(round(1000 / dt(), 0)) :]) + return population.v_clamp_rec[0] + + def get_voltage_clamp_equations(self, init_arguments_dict, pop_name): + """ + works with + dv/dt = ... + v += ... + """ + ### get the dv/dt equation from equations + ### find the line with dv/dt= or v+= or v= + eq = str(init_arguments_dict["equations"]) + eq = eq.splitlines() + line_is_v_list = [False] * len(eq) + ### check in which lines v is defined + for line_idx, line in enumerate(eq): + line_is_v_list[line_idx] = self.get_line_is_v(line) + ### raise error if no v or multiple times v + if True not in line_is_v_list or sum(line_is_v_list) > 1: + raise ValueError( + f"ERROR model_configurator create_net_single_voltage_clamp: In the equations of the neurons has to be exactly a single line which defines dv/dt or v, not given for population {pop_name}" + ) + ### set the v equation + eq_v = eq[line_is_v_list.index(True)] + + ### if equation type is v += ... --> just take right side + if "+=" in eq_v: + ### create the new equations for the ANNarchy neuron + ### create two lines, the voltage clamp line v+=0 and the + ### right sight of v+=... separately + eq_new_0 = f"v_clamp_rec_sign = {eq_v.split('+=')[1]}" + eq_new_1 = f"v_clamp_rec = fabs({eq_v.split('+=')[1]})" + eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" + eq_new_3 = "v+=0" + ### remove old v line and insert new lines + del eq[line_is_v_list.index(True)] + eq.insert(line_is_v_list.index(True), eq_new_0) + eq.insert(line_is_v_list.index(True), eq_new_1) + eq.insert(line_is_v_list.index(True), eq_new_2) + eq.insert(line_is_v_list.index(True), eq_new_3) + eq = "\n".join(eq) + ### return new neuron equations + return eq + + ### if equation type is dv/dt = ... --> get the right side of dv/dt=... + ### transform eq_v + ### remove whitespaces + ### remove tags and store them for later + ### TODO replace random distributions and mathematical expressions which may be on the left side + eq_v = eq_v.replace(" ", "") + eq_v = eq_v.replace("dv/dt", "delta_v") + eq_tags_list = eq_v.split(":") + eq_v = eq_tags_list[0] + if len(eq_tags_list) > 1: + tags = eq_tags_list[1] + else: + tags = None + + ### split the equation at "=" and move everything on one side (other side = 0) + eq_v_splitted = eq_v.split("=") + left_side = eq_v_splitted[0] + right_side = "right_side" + eq_v_one_side = f"{right_side}-({left_side})" + + ### prepare the sympy equation generation + attributes_name_list = self.neuron_model_attributes_dict[pop_name] + attributes_tuple = symbols(",".join(attributes_name_list)) + ### for each attribute of the neuron a sympy symbol + attributes_sympy_dict = { + key: attributes_tuple[attributes_name_list.index(key)] + for key in attributes_name_list + } + ### furhter create symbols for delta_v and right_side + attributes_sympy_dict["delta_v"] = Symbol("delta_v") + attributes_sympy_dict["right_side"] = Symbol("right_side") + + ### get the sympy equation expression by evaluating the string + eq_sympy = evaluate_expression_with_dict( + expression=eq_v_one_side, value_dict=attributes_sympy_dict + ) + + ### solve the equation to delta_v + result = solve(eq_sympy, attributes_sympy_dict["delta_v"], dict=True) + if len(result) != 1: + raise ValueError( + f"ERROR model_configurator create_net_single_voltage_clamp: Could not find solution for dv/dt for neuronmodel of population {pop_name}!" + ) + result = str(result[0][attributes_sympy_dict["delta_v"]]) + + ### replace right_side by the actual right side + result = result.replace("right_side", f"({eq_v_splitted[1]})") + + ### TODO replace mathematical expressions and random distributions back to previous + + ### now create the new equations for the ANNarchy neuron + ### create three lines, the voltage clamp line "dv/dt=0", + ### the obtained line which would be the right side of dv/dt, + ### and this right side sotred from the previous time step + ### v_clamp_rec should be an absolute value + eq_new_0 = f"v_clamp_rec_sign = {result}" + eq_new_1 = f"v_clamp_rec = fabs({result})" + eq_new_2 = "v_clamp_rec_pre = v_clamp_rec" + ### add stored tags to new dv/dt equation + if not isinstance(tags, type(None)): + eq_new_3 = f"dv/dt=0 : {tags}" + else: + eq_new_3 = "dv/dt=0" + ### remove old v line and insert new three lines + del eq[line_is_v_list.index(True)] + eq.insert(line_is_v_list.index(True), eq_new_0) + eq.insert(line_is_v_list.index(True), eq_new_1) + eq.insert(line_is_v_list.index(True), eq_new_2) + eq.insert(line_is_v_list.index(True), eq_new_3) + eq = "\n".join(eq) + ### return new neuron equations + return eq + + def get_line_is_v(self, line: str): + """ + check if a equation string contains dv/dt or v= or v+= + """ + if "v" not in line: + return False + + ### remove whitespaces + line = line.replace(" ", "") + + ### check for dv/dt + if "dv/dt" in line: + return True + + ### check for v update + if ("v=" in line or "v+=" in line) and line.startswith("v"): + return True + + return False + + def get_line_is_g_ampa(self, line: str): + """ + check if a equation string contains dg_ampa/dt + """ + + ### remove whitespaces + line = line.replace(" ", "") + + ### check for dv/dt + if "dv/dt" in line: + return True + + ### check for v update + if ("v=" in line or "v+=" in line) and line.startswith("v"): + return True + + return False + + def get_init_neuron_variables_for_psp(self, net, pop, v_rest, I_app_hold): + """ + get the variables of the given population after simulating 2000 ms + + Args: + net: ANNarchy network + the network which contains the pop + + pop: ANNarchy population + the population whose variables are obtained + + """ + ### reset neuron and deactivate input and set v_rest + net.reset() + pop.v = v_rest + pop.I_app = I_app_hold + + ### get the variables of the neuron after 5000 ms + net.simulate(5000) + var_name_list = list(pop.variables) + var_arr = np.zeros((1, len(var_name_list))) + get_arr = np.array([getattr(pop, var_name) for var_name in pop.variables]) + var_arr[0, :] = get_arr[:, 0] + + ### create a sampler with the one data sample + sampler = self.var_arr_sampler(var_arr, var_name_list) + return sampler + + class var_arr_sampler: + def __init__(self, var_arr, var_name_list) -> None: + self.var_arr_shape = var_arr.shape + self.is_const = ( + np.std(var_arr, axis=0) <= np.mean(np.absolute(var_arr), axis=0) / 1000 + ) + self.constant_arr = var_arr[0, self.is_const] + self.not_constant_val_arr = var_arr[:, np.logical_not(self.is_const)] + self.var_name_list = var_name_list + + def sample(self, n=1, seed=0): + """ + Args: + n: int, optional, default=1 + number of samples + + seed: int, optional, default=0 + seed for rng + """ + ### get random idx + rng = np.random.default_rng(seed=seed) + random_idx_arr = rng.integers(low=0, high=self.var_arr_shape[0], size=n) + ### sample with random idx + sample_arr = self.not_constant_val_arr[random_idx_arr] + ### create return array + ret_arr = np.zeros((n,) + self.var_arr_shape[1:]) + ### add samples to return array + ret_arr[:, np.logical_not(self.is_const)] = sample_arr + ### add constant values to return array + ret_arr[:, self.is_const] = self.constant_arr + + return ret_arr + + def get_nr_many_neurons(self, nr_neurons, nr_networks): + """ + Splits the number of neurons in almost equally sized parts. + + Args: + nr_neurons: int + number of neurons which should be splitted + + nr_networks: int + number of networks over which the neurons should be equally distributed + """ + return self.divide_almost_equal(number=nr_neurons, num_parts=nr_networks) + + def get_max_weight_dict_for_pop(self, pop_name): + """ + get the weight dict for a single population + + Args: + pop_name: str + population name + + return: dict + keys = afferent projection names, values = max weights + """ + + ### loop over afferent projections + max_w_list = [] + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + ### find max weight for projection + max_weight_of_proj = self.get_max_weight_of_proj(proj_name=proj_name) + max_w_list.append(max_weight_of_proj) + self.afferent_projection_dict[pop_name]["max_weight"] = max_w_list + + ### remove weight key from self.afferent_projection_dict[pop_name] which was added during the process + self.afferent_projection_dict[pop_name].pop("weights") + + ### now create the dictionary structure for return + # { + # "ampa": {"projection_name": "max_weight value"...}, + # "gaba": {"projection_name": "max_weight value"...}, + # } + max_weight_dict_for_pop = {"ampa": {}, "gaba": {}} + ### loop over all afferent projections + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + proj_dict = self.get_proj_dict(proj_name) + proj_target_type = proj_dict["proj_target_type"] + proj_max_weight = proj_dict["proj_max_weight"] + ### add max weight of projection to the corresponding target type in the return dict + max_weight_dict_for_pop[proj_target_type][proj_name] = proj_max_weight + + return max_weight_dict_for_pop + + def get_proj_dict(self, proj_name): + """ + get a dictionary for a specified projection which contains following information: + post_pop_name + proj_target_type + idx_proj + spike_frequency + proj_weight + g_max + + Args: + proj_name: str + projection name + + return: + proj_dict: dict + keys see above + """ + ### get pre_pop_name + pre_pop_name = self.pre_pop_name_dict[proj_name] + ### get pre_pop_name + pre_pop_size = self.pre_pop_size_dict[proj_name] + ### get post_pop_name + post_pop_name = self.post_pop_name_dict[proj_name] + ### get idx_proj and proj_target_type + idx_proj = self.afferent_projection_dict[post_pop_name][ + "projection_names" + ].index(proj_name) + proj_target_type = self.afferent_projection_dict[post_pop_name]["target"][ + idx_proj + ] + ### get spike frequency + f_t = self.afferent_projection_dict[post_pop_name]["target firing rate"][ + idx_proj + ] + p = self.afferent_projection_dict[post_pop_name]["probability"][idx_proj] + s = self.afferent_projection_dict[post_pop_name]["size"][idx_proj] + spike_frequency = f_t * p * s + ### get weight + try: + proj_weight = self.afferent_projection_dict[post_pop_name]["weights"][ + idx_proj + ] + except: + proj_weight = None + ### g_max + try: + g_max = self.g_max_dict[post_pop_name][proj_target_type] + except: + g_max = None + ### get max weight + try: + proj_max_weight = self.afferent_projection_dict[post_pop_name][ + "max_weight" + ][idx_proj] + except: + proj_max_weight = None + + return { + "pre_pop_name": pre_pop_name, + "pre_pop_size": pre_pop_size, + "post_pop_name": post_pop_name, + "proj_target_type": proj_target_type, + "idx_proj": idx_proj, + "spike_frequency": spike_frequency, + "proj_weight": proj_weight, + "g_max": g_max, + "proj_max_weight": proj_max_weight, + "proj_prob": p, + } + + def get_max_weight_of_proj(self, proj_name): + """ + find the max weight of a specified projection using incremental_continuous_bound_search + increasing weights of projection increases conductance g of projection --> increase + until g_max is found + + Args: + proj_name: str + projection name + + return: + w_max: number + """ + ### log task + self.log(f"get w_max for {proj_name}") + + ### g_max for projection + proj_dict = self.get_proj_dict(proj_name) + g_max = proj_dict["g_max"] + + ### find max weight with incremental_continuous_bound_search + ### increase weights until g_max is reached + self.log("search w_max with y(X) = g(w=X)") + w_max = self.incremental_continuous_bound_search( + y_X=lambda X_val: self.get_g_of_single_proj( + weight=X_val, + proj_name=proj_name, + ), + y_bound=g_max, + X_0=0, + y_0=0, + ) + + return w_max + + def get_g_of_single_proj(self, weight, proj_name): + """ + given a weight for a specified projection get the resulting conductance value g + in the target population + + Args: + weight: number + the weight of the projection + + proj_name: str + projection name + + return: + g_val: number + """ + ### get some projection infos + proj_dict = self.get_proj_dict(proj_name) + pop_name = proj_dict["post_pop_name"] + idx_proj = proj_dict["idx_proj"] + proj_target_type = proj_dict["proj_target_type"] + + ### set weights in the afferent_projection_dict + ### set all weights to zero except the weight of the current proj which is set to the given weight + weight_list = [0] * self.nr_afferent_proj_dict[pop_name] + weight_list[idx_proj] = weight + self.afferent_projection_dict[pop_name]["weights"] = weight_list + + ### get the g_ampa and g_gaba values based on the current afferent_projection_dict weights + mean_g = self.get_g_values_of_pop(pop_name) + + ### then return the conductance related to the specified projection + return mean_g[proj_target_type] + + def get_g_values_of_pop(self, pop_name): + """ + calculate the average g_ampa and g_gaba values of the specified population based on the weights + defined in the afferent_projection_dict + + Args: + pop_name: str + population name + """ + spike_times_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} + spike_weights_dict = {"ampa": [np.array([])], "gaba": [np.array([])]} + ### loop over afferent projections + for proj_name in self.afferent_projection_dict[pop_name]["projection_names"]: + ### get projection infos + proj_dict = self.get_proj_dict(proj_name) + proj_weight = proj_dict["proj_weight"] + proj_target_type = proj_dict["proj_target_type"] + spike_frequency = proj_dict["spike_frequency"] + ### get spike times over the simulation duration for the spike frequency + if spike_frequency > 0: + spike_times_arr = self.get_spike_times_arr( + spike_frequency=spike_frequency + ) + else: + spike_times_arr = np.array([]) + ### get weights array + spike_weights_arr = np.ones(len(spike_times_arr)) * proj_weight + ### store spike times and weights for the target type of the projection + spike_times_dict[proj_target_type].append(spike_times_arr) + spike_weights_dict[proj_target_type].append(spike_weights_arr) + + mean_g = {} + for target_type in ["ampa", "gaba"]: + ### concatenate spike times and corresponding weights of different afferent projections + spike_times_arr = np.concatenate(spike_times_dict[target_type]) + spike_weights_arr = np.concatenate(spike_weights_dict[target_type]) + + ### sort the spike times and corresponding weights + sort_idx = np.argsort(spike_times_arr) + spike_times_arr = spike_times_arr[sort_idx] + spike_weights_arr = spike_weights_arr[sort_idx] + + ### calculate mean g values from the spike times and corresponding weights + mean_g[target_type] = self.get_mean_g( + spike_times_arr=spike_times_arr, + spike_weights_arr=spike_weights_arr, + tau=self.tau_dict[pop_name][target_type], + ) + + return mean_g + + def get_spike_times_arr(self, spike_frequency): + """ + get spike times for a given spike frequency + + Args: + spike_frequency: number + spike frequency in Hz + """ + expected_nr_spikes = int( + round((500 + self.simulation_dur) * (spike_frequency / 1000), 0) + ) + ### isi_arr in timesteps + isi_arr = poisson.rvs( + (1 / (spike_frequency * (dt() / 1000))), size=expected_nr_spikes + ) + ### convert to ms + isi_arr = isi_arr * dt() + + ### get spike times from isi_arr + spike_times_arr = np.cumsum(isi_arr) + + ### only use spikes which are in the simulation time + spike_times_arr = spike_times_arr[spike_times_arr < (self.simulation_dur + 500)] + + return spike_times_arr + + def get_mean_g(self, spike_times_arr, spike_weights_arr, tau): + """ + calculates the mean conductance g for given spike times, corresponding weights (increases of g) and time constant + + Args: + spike_times_arr: arr + 1d array containing spike times in ms + + spike_weights_arr: arr + 1d array containing the weights corresponding to the spike times + + tau: number + time constant of the exponential decay of the conductance g in ms + """ + ### TODO instead of calculating the mean, create a conductance trace for the simulation time + if np.sum(spike_weights_arr) > 0: + ### get inter spike interval array + isis_g_arr = np.diff(spike_times_arr) + ### calc mean g + mean_w = np.mean(spike_weights_arr) + mean_isi = np.mean(isis_g_arr) + mean_g = mean_w / ((1 / np.exp(-mean_isi / tau)) - 1) + else: + mean_g = 0 + + return mean_g + + +def get_rate_parallel( + idx, + net, + population: Population, + variable_init_sampler, + monitor: Monitor, + I_app_arr, + weight_list: list, + pre_pop_name_list: list, + rate_list: list, + eff_size_list: list, + simulation_dur: int, +): + """ + function to obtain the firing rates of the populations of + the network given with 'idx' for given I_app, g_ampa and g_gaba values + + Args: + idx: int + network index given by the parallel_run function + + net: object + network object given by the parallel_run function + + pop_name_list: list of str + list with population names of network + + population_list: list of ANNarchy Population object + list of population objets of magic network + + variable_init_sampler_list: list of sampler objects + for each population a sampler object with function .sample to get initial variable values + + monitor_list: list of ANNarchy Monitor objects + list of monitor objets of magic network recording spikes from the populations + + I_app_list: list of arrays + list containing for each population the array with input values for I_app + + g_ampa_list: list of arrays + list containing for each population the array with input values for g_ampa + + g_gaba_list: list of arrays + list containing for each population the array with input values for g_gaba + + simulation_dur: int + simulation duration + + return: + f_rec_arr_list: list of arrays + list containing for each population the array with the firing rates for the given inputs + """ + ### reset and set init values + net.reset() + ### sample init values, one could sample different values for multiple neurons + ### but here we sample a single sample and use it for all neurons + variable_init_arr = variable_init_sampler.sample(1, seed=0) + var_name_list = variable_init_sampler.var_name_list + variable_init_arr = np.array([variable_init_arr[0]] * len(population)) + for var_name in enumerate(population.variables): + if var_name in var_name_list: + set_val = variable_init_arr[:, var_name_list.index(var_name)] + setattr(population, var_name, set_val) + + ### set the weights and rates of the poisson spike traces of the afferent populations + for pre_pop_idx, pre_pop_name in enumerate(pre_pop_name_list): + setattr(population, f"{pre_pop_name}_size", eff_size_list[pre_pop_idx]) + setattr( + population, + f"{pre_pop_name}_spike_prob", + (rate_list[pre_pop_idx] / 1000) * dt(), + ) + setattr(population, f"{pre_pop_name}_weight", weight_list[pre_pop_idx]) + + ### set the I_app + population.I_app = I_app_arr + + ### simulate 500 ms initial duration + X ms + if "stn" in population.name and False: + net.simulate(500) + time_arr = np.arange(500, 500 + simulation_dur, dt()) + cor_spike_train_list = [] + gpe_spike_train_list = [] + g_ampa_list = [] + g_gaba_list = [] + I_app_list = [] + for time_ms in time_arr: + net.simulate(dt()) + if "cor_spike_train" in population.attributes: + cor_spike_train_list.append(population.cor_spike_train[0]) + else: + cor_spike_train_list.append(0) + if "gpe_spike_train" in population.attributes: + gpe_spike_train_list.append(population.gpe_spike_train[0]) + else: + gpe_spike_train_list.append(0) + g_ampa_list.append(population.g_ampa[0]) + g_gaba_list.append(population.g_gaba[0]) + I_app_list.append(population.I_app[0]) + plt.figure(figsize=(6.4, 4.8 * 2)) + plt.subplot(211) + plt.ylabel("g_ampa") + plt.plot(time_arr, g_ampa_list, "k.") + plt.subplot(212) + plt.ylabel("g_gaba") + plt.plot(time_arr, g_gaba_list, "k.") + plt.tight_layout() + plt.savefig("stn_input_configurator.png", dpi=300) + plt.close("all") + else: + net.simulate(500 + simulation_dur) + + ### get rate for the last X ms + spike_dict = monitor.get("spike") + f_arr = np.zeros(len(population)) + for idx_n, n in enumerate(spike_dict.keys()): + time_list = np.array(spike_dict[n]) + nbr_spks = np.sum((time_list > (500 / dt())).astype(int)) + rate = nbr_spks / (simulation_dur / 1000) + f_arr[idx_n] = rate + return f_arr + + +_p_g_1 = """First call get_max_syn. +This determines max synaptic conductances and weights of all afferent projections of the model populations and returns a dictionary with max weights.""" + +_p_g_after_get_weights = ( + lambda template_weight_dict, template_synaptic_load_dict, template_synaptic_contribution_dict: f"""Now either set the weights of all projections directly or first set the synaptic load of the populations and the synaptic contributions of the afferent projections. +You can set the weights using the function .set_weights() which requires a weight_dict as argument. +Use this template for the weight_dict: + +{template_weight_dict} + +The values within the template are the maximum weight values. + + +You can set the synaptic load and contribution using the function .set_syn_load() which requires a synaptic_load_dict or a single number between 0 and 1 for the synaptic load of the populations and a synaptic_contribution_dict for the synaptic contributions to the synaptic load of the afferent projections. +Use this template for the synaptic_load_dict: + +{template_synaptic_load_dict} + +'ampa_load' and 'gaba_load' are placeholders, replace them with values between 0 and 1. + +Use this template for the synaptic_contribution_dict: + +{template_synaptic_contribution_dict} + +The shown contributions of the afferent projections are based on the assumption that the maximum weights are used. The contributions of all afferent projections of a single population have to sum up to 1! +""" +) + +_p_g_after_set_syn_load = """Synaptic loads and contributions, i.e. weights set. Now call .get_base to obtain the baseline currents for the model populations. With .set_base you can directly set these baselines and the current weights in the model and compile the model. +""" diff --git a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py index 00fcaa6..5830cde 100644 --- a/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py +++ b/src/CompNeuroPy/examples/model_configurator/model_configurator_user.py @@ -9,10 +9,22 @@ ) from CompNeuroPy.neuron_models import ( poisson_neuron_up_down, - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR, +) +from CompNeuroPy import ( + generate_model, + Monitors, + plot_recordings, + my_raster_plot, + CompNeuroMonitors, + PlotRecordings, +) +from CompNeuroPy.examples.model_configurator.model_configurator_cnp import ( + ModelConfigurator, +) +from CompNeuroPy.examples.model_configurator.model_configurator_cnp_old import ( + model_configurator, ) -from CompNeuroPy import generate_model, Monitors, plot_recordings, my_raster_plot -from model_configurator_cnp import model_configurator import matplotlib.pyplot as plt import numpy as np @@ -27,6 +39,7 @@ def BGM_part_function(params): ) cor_exc.tau_up = params["cor_exc.tau_up"] cor_exc.tau_down = params["cor_exc.tau_down"] + cor_exc.rates = params["cor_exc.rates"] cor_inh = Population( params["cor_inh.size"], poisson_neuron_up_down, @@ -34,71 +47,92 @@ def BGM_part_function(params): ) cor_inh.tau_up = params["cor_inh.tau_up"] cor_inh.tau_down = params["cor_inh.tau_down"] + cor_inh.rates = params["cor_inh.rates"] ### BG Populations stn = Population( params["stn.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["stn.a"], + b=params["stn.b"], + c=params["stn.c"], + d=params["stn.d"], + n2=params["stn.n2"], + n1=params["stn.n1"], + n0=params["stn.n0"], + tau_ampa=params["stn.tau_ampa"], + tau_gaba=params["stn.tau_gaba"], + E_ampa=params["stn.E_ampa"], + E_gaba=params["stn.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="stn", ) - stn.a = params["stn.a"] - stn.b = params["stn.b"] - stn.c = params["stn.c"] - stn.d = params["stn.d"] - stn.n2 = params["stn.n2"] - stn.n1 = params["stn.n1"] - stn.n0 = params["stn.n0"] - stn.tau_ampa = params["stn.tau_ampa"] - stn.tau_gaba = params["stn.tau_gaba"] - stn.E_ampa = params["stn.E_ampa"] - stn.E_gaba = params["stn.E_gaba"] snr = Population( params["snr.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["snr.a"], + b=params["snr.b"], + c=params["snr.c"], + d=params["snr.d"], + n2=params["snr.n2"], + n1=params["snr.n1"], + n0=params["snr.n0"], + tau_ampa=params["snr.tau_ampa"], + tau_gaba=params["snr.tau_gaba"], + E_ampa=params["snr.E_ampa"], + E_gaba=params["snr.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="snr", ) - snr.a = params["snr.a"] - snr.b = params["snr.b"] - snr.c = params["snr.c"] - snr.d = params["snr.d"] - snr.n2 = params["snr.n2"] - snr.n1 = params["snr.n1"] - snr.n0 = params["snr.n0"] - snr.tau_ampa = params["snr.tau_ampa"] - snr.tau_gaba = params["snr.tau_gaba"] - snr.E_ampa = params["snr.E_ampa"] - snr.E_gaba = params["snr.E_gaba"] gpe = Population( params["gpe.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["gpe.a"], + b=params["gpe.b"], + c=params["gpe.c"], + d=params["gpe.d"], + n2=params["gpe.n2"], + n1=params["gpe.n1"], + n0=params["gpe.n0"], + tau_ampa=params["gpe.tau_ampa"], + tau_gaba=params["gpe.tau_gaba"], + E_ampa=params["gpe.E_ampa"], + E_gaba=params["gpe.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="gpe", ) - gpe.a = params["gpe.a"] - gpe.b = params["gpe.b"] - gpe.c = params["gpe.c"] - gpe.d = params["gpe.d"] - gpe.n2 = params["gpe.n2"] - gpe.n1 = params["gpe.n1"] - gpe.n0 = params["gpe.n0"] - gpe.tau_ampa = params["gpe.tau_ampa"] - gpe.tau_gaba = params["gpe.tau_gaba"] - gpe.E_ampa = params["gpe.E_ampa"] - gpe.E_gaba = params["gpe.E_gaba"] thal = Population( params["thal.size"], - Izhikevich2003_flexible_noisy_I, + Izhikevich2003NoisyBaseSNR( + a=params["thal.a"], + b=params["thal.b"], + c=params["thal.c"], + d=params["thal.d"], + n2=params["thal.n2"], + n1=params["thal.n1"], + n0=params["thal.n0"], + tau_ampa=params["thal.tau_ampa"], + tau_gaba=params["thal.tau_gaba"], + E_ampa=params["thal.E_ampa"], + E_gaba=params["thal.E_gaba"], + noise=0, + tau_power=10, + snr_target=4, + rate_noise=100, + ), name="thal", ) - thal.a = params["thal.a"] - thal.b = params["thal.b"] - thal.c = params["thal.c"] - thal.d = params["thal.d"] - thal.n2 = params["thal.n2"] - thal.n1 = params["thal.n1"] - thal.n0 = params["thal.n0"] - thal.tau_ampa = params["thal.tau_ampa"] - thal.tau_gaba = params["thal.tau_gaba"] - thal.E_ampa = params["thal.E_ampa"] - thal.E_gaba = params["thal.E_gaba"] ###### PROJECTIONS ###### ### cortex go output @@ -109,7 +143,7 @@ def BGM_part_function(params): name="cor_exc__stn", ) cor_exc__stn.connect_fixed_probability( - probability=params["cor_exc__stn.probability"], weights=1 + probability=params["cor_exc__stn.probability"], weights=0 ) cor_inh__stn = Projection( @@ -119,7 +153,7 @@ def BGM_part_function(params): name="cor_inh__stn", ) cor_inh__stn.connect_fixed_probability( - probability=params["cor_inh__stn.probability"], weights=1 + probability=params["cor_inh__stn.probability"], weights=0 ) ### stn output @@ -130,7 +164,7 @@ def BGM_part_function(params): name="stn__snr", ) stn__snr.connect_fixed_probability( - probability=params["stn__snr.probability"], weights=1 + probability=params["stn__snr.probability"], weights=0 ) stn__gpe = Projection( pre=stn, @@ -139,7 +173,7 @@ def BGM_part_function(params): name="stn__gpe", ) stn__gpe.connect_fixed_probability( - probability=params["stn__gpe.probability"], weights=1 + probability=params["stn__gpe.probability"], weights=0 ) ### gpe proto output if params["general.more_complex"]: @@ -150,7 +184,7 @@ def BGM_part_function(params): # name="gpe__stn", # ) # gpe__stn.connect_fixed_probability( - # probability=params["gpe__stn.probability"], weights=1 + # probability=params["gpe__stn.probability"], weights=0 # ) gpe__snr = Projection( pre=gpe, @@ -159,7 +193,7 @@ def BGM_part_function(params): name="gpe__snr", ) gpe__snr.connect_fixed_probability( - probability=params["gpe__snr.probability"], weights=1 + probability=params["gpe__snr.probability"], weights=0 ) ### snr output snr__thal = Projection( @@ -169,7 +203,7 @@ def BGM_part_function(params): name="snr__thal", ) snr__thal.connect_fixed_probability( - probability=params["snr__thal.probability"], weights=1 + probability=params["snr__thal.probability"], weights=0 ) if params["general.more_complex"]: snr__snr = Projection( @@ -179,7 +213,7 @@ def BGM_part_function(params): name="snr__snr", ) snr__snr.connect_fixed_probability( - probability=params["snr__snr.probability"], weights=1 + probability=params["snr__snr.probability"], weights=0 ) @@ -191,9 +225,11 @@ def BGM_part_function(params): params["cor_exc.size"] = 100 params["cor_exc.tau_up"] = 10 params["cor_exc.tau_down"] = 30 + params["cor_exc.rates"] = 15 params["cor_inh.size"] = 100 params["cor_inh.tau_up"] = 10 params["cor_inh.tau_down"] = 30 + params["cor_inh.rates"] = 30 ### BG Populations params["snr.size"] = 100 params["snr.a"] = 0.005 @@ -255,7 +291,7 @@ def BGM_part_function(params): params["snr__snr.probability"] = 0.6 ### create model which should be configurated - ### if you create or compile has no effect + ### create or compile have no effect setup(dt=0.1) model = generate_model( model_creation_function=BGM_part_function, @@ -265,8 +301,59 @@ def BGM_part_function(params): do_create=False, ) + # model.create() + # mon = CompNeuroMonitors( + # { + # pop_name: [ + # "I_noise", + # "I_signal", + # "I", + # "power_I_signal", + # "spike", + # ] + # for pop_name in ["stn"] + # } + # ) + # mon.start() + + # simulate(500) + # get_population("stn").I_app = 10 + # simulate(500) + + # recordings = mon.get_recordings() + # recording_times = mon.get_recording_times() + + # PlotRecordings( + # recordings=recordings, + # recording_times=recording_times, + # chunk=0, + # shape=(5, 1), + # plan={ + # "position": list(range(1, 5 + 1)), + # "compartment": ["stn"] * 5, + # "variable": [ + # "I_noise", + # "I_signal", + # "I", + # "power_I_signal", + # "spike", + # ], + # "format": [ + # "line", + # "line", + # "line", + # "line", + # "hybrid", + # ], + # }, + # figname="model_recordings_noise.png", + # # time_lim=(495, 505), + # ) + # quit() + ### model configurator should get target resting-state firing rates for the ### model populations one wants to configure and their afferents as input + ### TODO allow for target range target_firing_rate_dict = { "cor_exc": 15, "cor_inh": 30, @@ -278,66 +365,65 @@ def BGM_part_function(params): do_not_config_list = ["cor_exc", "cor_inh"] ### initialize model_configurator - model_conf = model_configurator( - model, - target_firing_rate_dict, + model_conf = ModelConfigurator( + model=model, + target_firing_rate_dict=target_firing_rate_dict, + max_psp=0.7, do_not_config_list=do_not_config_list, print_guide=True, I_app_variable="I_app", - interpolation_grid_points=36, + cache=True, + clear_cache=False, + log_file="model_configurator.log", ) - ### obtain the maximum synaptic loads for the populations and the - ### maximum weights of their afferent projections - model_conf.get_max_syn(clear=False) - - ### now either set weights directly - ### or define synaptic load of populations - synaptic_load_dict = { - "stn": [0.3, 0.3], - "gpe": [0.4], - "snr": [0.5, 0.3], - "thal": [0.1], - } - ### and define the contributions of their afferent projections - synaptic_contribution_dict = {"snr": {"gaba": {"gpe__snr": 0.7, "snr__snr": 0.3}}} - synaptic_contribution_dict = model_conf.set_syn_load( - synaptic_load_dict, - synaptic_contribution_dict, + ### set syn load + model_conf.set_syn_load( + syn_load_dict={ + "stn": {"ampa": 1.0, "gaba": 1.0}, + "snr": {"ampa": 1.0, "gaba": 1.0}, + "gpe": {"ampa": 1.0}, + "thal": {"gaba": 1.0}, + }, + syn_contribution_dict={ + "stn": {"ampa": {"cor_exc__stn": 1.0}, "gaba": {"cor_inh__stn": 1.0}}, + "snr": { + "ampa": {"stn__snr": 1.0}, + "gaba": {"gpe__snr": 1.0, "snr__snr": 1.0}, + }, + "gpe": {"ampa": {"stn__gpe": 1.0}}, + "thal": {"gaba": {"snr__thal": 1.0}}, + }, ) + print(model_conf._weight_dicts.weight_dict) + # ### or set weights + # model_conf.set_weights( + # weight_dict={ + # "cor_exc__stn": 0.14017251511767667, + # "cor_inh__stn": 0.3185158233680059, + # "stn__snr": 0.1411802181516728, + # "gpe__snr": 0.3210042713120005, + # "snr__snr": 0.3210042713120005, + # "stn__gpe": 0.1411802181516728, + # "snr__thal": 1.169558816450918, + # } + # ) - ### after setting the weights i.e. the synaptic load/contributions - ### get the baseline currents - I_base_dict = model_conf.set_base(I_base_variable="base_mean") - print("user I_base:") + I_base_dict = model_conf.get_base() + print("I_base:") print(I_base_dict) - print("model cor_stn_weight:") - print(get_projection("cor_exc__stn").w) - for pop_name in model.populations: - if "cor" in pop_name: - continue - get_population(pop_name).rate_base_noise = 10 - get_population(pop_name).base_noise = max( - [ - model_conf.I_app_max_dict[pop_name] * 0.02, - abs(get_population(pop_name).base_mean[0]) * 0.02, - ] - ) - print(f"pop_name: {pop_name}") - print(f"base_mean: {get_population(pop_name).base_mean[0]}") - print(f"base_noise: {get_population(pop_name).base_noise[0]}") - print(f"rate_base_noise: {get_population(pop_name).rate_base_noise[0]}\n") + model_conf.set_base() ### do a test simulation mon = Monitors( { - "pop;cor_exc": ["spike"], - "pop;cor_inh": ["spike"], - "pop;stn": ["spike", "g_ampa", "g_gaba"], - "pop;gpe": ["spike", "g_ampa", "g_gaba"], - "pop;snr": ["spike", "g_ampa", "g_gaba"], - "pop;thal": ["spike", "g_ampa", "g_gaba"], + "cor_exc": ["spike"], + "cor_inh": ["spike"], + "stn": ["spike", "g_ampa", "g_gaba"], + "gpe": ["spike", "g_ampa", "g_gaba"], + "snr": ["spike", "g_ampa", "g_gaba"], + "thal": ["spike", "g_ampa", "g_gaba"], } ) get_population("cor_exc").rates = target_firing_rate_dict["cor_exc"] @@ -408,8 +494,3 @@ def BGM_part_function(params): "15;thal;g_gaba;line", ], ) - - # TODO - # it seems that there are problems with snr - # it even gets wotse if I deactivate the lateral inhib - # maybe check which g_ampa, g_gaba are expected based on weights and which actually are present diff --git a/src/CompNeuroPy/examples/model_configurator/reduce_model.py b/src/CompNeuroPy/examples/model_configurator/reduce_model.py new file mode 100644 index 0000000..ee0962d --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/reduce_model.py @@ -0,0 +1,710 @@ +from ANNarchy import ( + Neuron, + Population, + dt, + add_function, + Projection, + get_population, + Constant, + Synapse, + projections, + populations, + get_projection, + Binomial, + CurrentInjection, +) +from ANNarchy.core import ConnectorMethods +import numpy as np +from CompNeuroPy import model_functions as mf +from CompNeuroPy.generate_model import generate_model +from typingchecker import check_types +import inspect +from CompNeuroPy import CompNeuroModel +import sympy as sp + +_connector_methods_dict = { + "One-to-One": ConnectorMethods.connect_one_to_one, + "All-to-All": ConnectorMethods.connect_all_to_all, + "Gaussian": ConnectorMethods.connect_gaussian, + "Difference-of-Gaussian": ConnectorMethods.connect_dog, + "Random": ConnectorMethods.connect_fixed_probability, + "Random Convergent": ConnectorMethods.connect_fixed_number_pre, + "Random Divergent": ConnectorMethods.connect_fixed_number_post, + "User-defined": ConnectorMethods.connect_with_func, + "MatrixMarket": ConnectorMethods.connect_from_matrix_market, + "Connectivity matrix": ConnectorMethods.connect_from_matrix, + "Sparse connectivity matrix": ConnectorMethods.connect_from_sparse, + "From File": ConnectorMethods.connect_from_file, +} + + +class _CreateReducedModel: + """ + Class to create a reduced model from the original model. It is accessable via the + attribute model_reduced. + + Attributes: + model_reduced (CompNeuroModel): + Reduced model, created but not compiled + """ + + def __init__( + self, + model: CompNeuroModel, + reduced_size: int, + do_create: bool = False, + do_compile: bool = False, + verbose: bool = False, + ) -> None: + """ + Prepare model for reduction. + + Args: + model (CompNeuroModel): + Model to be reduced + reduced_size (int): + Size of the reduced populations + """ + self.reduced_size = reduced_size + self.verbose = verbose + ### check if model is already created but not compiled, if not clear annarchy + ### and create it + if not model.created or model.compiled: + mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + model.create(do_compile=False) + + ### analyze model to be able to recreate it + self.analyze_model() + + ### clear model + mf.cnp_clear(functions=False, neurons=True, synapses=True, constants=False) + + ### recreate model with reduced populations and projections + self.model_reduced = CompNeuroModel( + model_creation_function=self.recreate_model, + name=f"{model.name}_reduced", + description=f"{model.description}\nWith reduced populations and projections.", + do_create=do_create, + do_compile=do_compile, + compile_folder_name=f"{model.compile_folder_name}_reduced", + ) + + def analyze_model( + self, + ): + """ + Analyze the model to be able to recreate it. + """ + ### get all population and projection names + ( + self.population_name_list, + self.projection_name_list, + ) = self.get_all_population_and_projection_names() + + ### get population info (eq, params etc.) + ( + self.neuron_model_attr_dict, + self.neuron_model_init_parameter_dict, + self.pop_init_parameter_dict, + ) = self.analyze_populations() + + ### get projection info + ( + self.proj_init_parameter_dict, + self.synapse_init_parameter_dict, + self.synapse_model_attr_dict, + self.connector_function_dict, + self.connector_function_parameter_dict, + self.pre_post_pop_name_dict, + ) = self.analyze_projections() + + def get_all_population_and_projection_names(self): + """ + Get all population and projection names. + + Returns: + population_name_list (list): + List of all population names + projection_name_list (list): + List of all projection names + """ + population_name_list: list[str] = [pop.name for pop in populations()] + projection_name_list: list[str] = [proj.name for proj in projections()] + + return population_name_list, projection_name_list + + def analyze_populations(self): + """ + Get info of each population + """ + ### values of the paramters and variables of the population's neurons, keys are the names of paramters and variables + neuron_model_attr_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Neuron class + neuron_model_init_parameter_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Population class + pop_init_parameter_dict: dict[str, dict] = {} + + ### for loop over all populations + for pop_name in self.population_name_list: + pop: Population = get_population(pop_name) + ### get the neuron model attributes (parameters/variables) + neuron_model_attr_dict[pop.name] = pop.init + ### get a dict of all paramters of the __init__ function of the Neuron + init_params = inspect.signature(Neuron.__init__).parameters + neuron_model_init_parameter_dict[pop.name] = { + param: getattr(pop.neuron_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all paramters of the __init__ function of the Population + init_params = inspect.signature(Population.__init__).parameters + pop_init_parameter_dict[pop.name] = { + param: getattr(pop, param) + for param in init_params + if param != "self" and param != "storage_order" and param != "copied" + } + + return ( + neuron_model_attr_dict, + neuron_model_init_parameter_dict, + pop_init_parameter_dict, + ) + + def analyze_projections(self): + """ + Get info of each projection + """ + ### parameters of the __init__ function of the Projection class + proj_init_parameter_dict: dict[str, dict] = {} + ### parameters of the __init__ function of the Synapse class + synapse_init_parameter_dict: dict[str, dict] = {} + ### values of the paramters and variables of the synapse, keys are the names of paramters and variables + synapse_model_attr_dict: dict[str, dict] = {} + ### connector functions of the projections + connector_function_dict: dict = {} + ### parameters of the connector functions of the projections + connector_function_parameter_dict: dict = {} + ### names of pre- and post-synaptic populations of the projections + pre_post_pop_name_dict: dict[str, tuple] = {} + + ### loop over all projections + for proj_name in self.projection_name_list: + proj: Projection = get_projection(proj_name) + ### get the synapse model attributes (parameters/variables) + synapse_model_attr_dict[proj.name] = proj.init + ### get a dict of all paramters of the __init__ function of the Synapse + init_params = inspect.signature(Synapse.__init__).parameters + synapse_init_parameter_dict[proj.name] = { + param: getattr(proj.synapse_type, param) + for param in init_params + if param != "self" + } + ### get a dict of all paramters of the __init__ function of the Projection + init_params = inspect.signature(Projection.__init__).parameters + proj_init_parameter_dict[proj_name] = { + param: getattr(proj, param) + for param in init_params + if param != "self" and param != "synapse" and param != "copied" + } + + ### get the connector function of the projection and its parameters + ### raise errors for not supported connector functions + if ( + proj.connector_name == "User-defined" + or proj.connector_name == "MatrixMarket" + or proj.connector_name == "From File" + ): + raise ValueError( + f"Connector function '{_connector_methods_dict[proj.connector_name].__name__}' not supported yet" + ) + + ### get the connector function + connector_function_dict[proj.name] = _connector_methods_dict[ + proj.connector_name + ] + + ### get the parameters of the connector function + connector_function_parameter_dict[proj.name] = ( + self.get_connector_parameters(proj) + ) + + ### get the names of the pre- and post-synaptic populations + pre_post_pop_name_dict[proj.name] = (proj.pre.name, proj.post.name) + + return ( + proj_init_parameter_dict, + synapse_init_parameter_dict, + synapse_model_attr_dict, + connector_function_dict, + connector_function_parameter_dict, + pre_post_pop_name_dict, + ) + + def get_connector_parameters(self, proj: Projection): + """ + Get the parameters of the given connector function. + + Args: + proj (Projection): + Projection for which the connector parameters are needed + + Returns: + connector_parameters_dict (dict): + Parameters of the given connector function + """ + + if proj.connector_name == "One-to-One": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "All-to-All": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "allow_self_connections": proj._connection_args[2], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Gaussian": + return { + "amp": proj._connection_args[0], + "sigma": proj._connection_args[1], + "delays": proj._connection_args[2], + "limit": proj._connection_args[3], + "allow_self_connections": proj._connection_args[4], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Difference-of-Gaussian": + return { + "amp_pos": proj._connection_args[0], + "sigma_pos": proj._connection_args[1], + "amp_neg": proj._connection_args[2], + "sigma_neg": proj._connection_args[3], + "delays": proj._connection_args[4], + "limit": proj._connection_args[5], + "allow_self_connections": proj._connection_args[6], + "storage_format": proj._storage_format, + } + elif proj.connector_name == "Random": + return { + "probability": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Convergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Random Divergent": + return { + "number": proj._connection_args[0], + "weights": proj._connection_args[1], + "delays": proj._connection_args[2], + "allow_self_connections": proj._connection_args[3], + "force_multiple_weights": not (proj._single_constant_weight), + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "pre_post": proj._connection_args[2], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + elif proj.connector_name == "Sparse connectivity matrix": + return { + "weights": proj._connection_args[0], + "delays": proj._connection_args[1], + "storage_format": proj._storage_format, + "storage_order": proj._storage_order, + } + + def recreate_model(self): + """ + Recreates the model with reduced populations and projections. + """ + ### 1st for each population create a reduced population + for pop_name in self.population_name_list: + self.create_reduced_pop(pop_name) + ### 2nd for each population which is a presynaptic population, create a spikes collecting aux population + for pop_name in self.population_name_list: + self.create_spike_collecting_aux_pop(pop_name) + ## 3rd for each population which has afferents create a population for incoming spikes for each target type + for pop_name in self.population_name_list: + self.create_conductance_aux_pop(pop_name, target="ampa") + self.create_conductance_aux_pop(pop_name, target="gaba") + + def create_reduced_pop(self, pop_name: str): + """ """ + if self.verbose: + print(f"create_reduced_pop for {pop_name}") + ### 1st check how the population is connected + is_presynaptic, is_postsynaptic, ampa, gaba = self.how_pop_is_connected( + pop_name + ) + + ### 2nd recreate neuron model + ### get the stored parameters of the __init__ function of the Neuron + neuron_model_init_parameter_dict = self.neuron_model_init_parameter_dict[ + pop_name + ].copy() + ### if the population is a postsynaptic population adjust the synaptic + ### conductance equations + if is_postsynaptic: + neuron_model_init_parameter_dict = self.adjust_neuron_model( + neuron_model_init_parameter_dict, ampa=ampa, gaba=gaba + ) + ### create the new neuron model + neuron_model_new = Neuron(**neuron_model_init_parameter_dict) + + ### 3rd recreate the population + ### get the stored parameters of the __init__ function of the Population + pop_init_parameter_dict = self.pop_init_parameter_dict[pop_name].copy() + ### replace the neuron model with the new neuron model + pop_init_parameter_dict["neuron"] = neuron_model_new + ### replace the size with the reduced size (if reduced size is smaller than the + ### original size) + ### TODO add model requirements somewhere, here requirements = geometry = int + pop_init_parameter_dict["geometry"] = min( + [pop_init_parameter_dict["geometry"][0], self.reduced_size] + ) + ### append _reduce to the name + pop_init_parameter_dict["name"] = f"{pop_name}_reduced" + ### create the new population + pop_new = Population(**pop_init_parameter_dict) + + ### 4th set the parameters and variables of the population's neurons + ### get the stored parameters and variables + neuron_model_attr_dict = self.neuron_model_attr_dict[pop_name] + ### set the parameters and variables + for attr_name, attr_val in neuron_model_attr_dict.items(): + setattr(pop_new, attr_name, attr_val) + + def create_spike_collecting_aux_pop(self, pop_name: str): + """ """ + ### get all efferent projections + efferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[0] == pop_name + ] + ### check if pop has efferent projections + if len(efferent_projection_list) == 0: + return + if self.verbose: + print(f"create_spike_collecting_aux_pop for {pop_name}") + ### create the spike collecting population + pop_aux = Population( + 1, + neuron=self.SpikeProbCalcNeuron( + reduced_size=min( + [ + self.pop_init_parameter_dict[pop_name]["geometry"][0], + self.reduced_size, + ] + ) + ), + name=f"{pop_name}_spike_collecting_aux", + ) + ### create the projection from reduced pop to spike collecting aux pop + proj = Projection( + pre=get_population(pop_name + "_reduced"), + post=pop_aux, + target="ampa", + name=f"proj_{pop_name}_spike_collecting_aux", + ) + proj.connect_all_to_all(weights=1) + + def create_conductance_aux_pop(self, pop_name: str, target: str): + """ """ + ### get all afferent projections + afferent_projection_list = [ + proj_name + for proj_name, pre_post_pop_name_dict in self.pre_post_pop_name_dict.items() + if pre_post_pop_name_dict[1] == pop_name + ] + ### check if pop has afferent projections + if len(afferent_projection_list) == 0: + return + ### get all afferent projections with target type + afferent_target_projection_list = [ + proj_name + for proj_name in afferent_projection_list + if self.proj_init_parameter_dict[proj_name]["target"] == target + ] + ### check if there are afferent projections with target type + if len(afferent_target_projection_list) == 0: + return + if self.verbose: + print(f"create_conductance_aux_pop for {pop_name} target {target}") + ### get projection informations TODO in ReduceModel class weights and probs not global constants + ### TODO somewhere add model requirements, here requirements = geometry = int and connection = fixed_probability i.e. random (with weights and probability) + projection_dict = { + proj_name: { + "pre_size": self.pop_init_parameter_dict[ + self.pre_post_pop_name_dict[proj_name][0] + ]["geometry"][0], + "connection_prob": self.connector_function_parameter_dict[proj_name][ + "probability" + ], + "weights": self.connector_function_parameter_dict[proj_name]["weights"], + "pre_name": self.pre_post_pop_name_dict[proj_name][0], + } + for proj_name in afferent_target_projection_list + } + ### create the conductance calculating population + pop_aux = Population( + self.pop_init_parameter_dict[pop_name]["geometry"][0], + neuron=self.InputCalcNeuron(projection_dict=projection_dict), + name=f"{pop_name}_{target}_aux", + ) + ### set number of synapses parameter for each projection + for proj_name, vals in projection_dict.items(): + number_synapses = Binomial( + n=vals["pre_size"], p=vals["connection_prob"] + ).get_values(self.pop_init_parameter_dict[pop_name]["geometry"][0]) + setattr(pop_aux, f"number_synapses_{proj_name}", number_synapses) + ### create the "current injection" projection from conductance calculating + ### population to the reduced post population + proj = CurrentInjection( + pre=pop_aux, + post=get_population(f"{pop_name}_reduced"), + target=f"incomingaux{target}", + name=f"proj_{pop_name}_{target}_aux", + ) + proj.connect_current() + ### create projection from spike_prob calculating aux neurons of presynaptic + ### populations to conductance calculating aux population + for proj_name, vals in projection_dict.items(): + pre_pop_name = vals["pre_name"] + pre_pop_spike_collecting_aux = get_population( + f"{pre_pop_name}_spike_collecting_aux" + ) + proj = Projection( + pre=pre_pop_spike_collecting_aux, + post=pop_aux, + target=f"spikeprob_{pre_pop_name}", + name=f"{proj_name}_spike_collecting_to_conductance", + ) + proj.connect_all_to_all(weights=1) + + def how_pop_is_connected(self, pop_name): + """ + Check how a population is connected. If the population is a postsynaptic and/or + presynaptic population, check if it gets ampa and/or gaba input. + + Args: + pop_name (str): + Name of the population to check + + Returns: + is_presynaptic (bool): + True if the population is a presynaptic population, False otherwise + is_postsynaptic (bool): + True if the population is a postsynaptic population, False otherwise + ampa (bool): + True if the population gets ampa input, False otherwise + gaba (bool): + True if the population gets gaba input, False otherwise + """ + is_presynaptic = False + is_postsynaptic = False + ampa = False + gaba = False + ### loop over all projections + for proj_name in self.projection_name_list: + ### check if the population is a presynaptic population in any projection + if self.pre_post_pop_name_dict[proj_name][0] == pop_name: + is_presynaptic = True + ### check if the population is a postsynaptic population in any projection + if self.pre_post_pop_name_dict[proj_name][1] == pop_name: + is_postsynaptic = True + ### check if the projection target is ampa or gaba + if self.proj_init_parameter_dict[proj_name]["target"] == "ampa": + ampa = True + elif self.proj_init_parameter_dict[proj_name]["target"] == "gaba": + gaba = True + + return is_presynaptic, is_postsynaptic, ampa, gaba + + def adjust_neuron_model( + self, neuron_model_init_parameter_dict, ampa=True, gaba=True + ): + """ + Add the new synaptic input coming from the auxillary population to the neuron + model. + + Args: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + ampa (bool): + True if the population gets ampa input and therefore the ampa conductance + needs to be adjusted, False otherwise + gaba (bool): + True if the population gets gaba input and therefore the gaba conductance + needs to be adjusted, False otherwise + + Returns: + neuron_model_init_parameter_dict (dict): + Dictionary with the parameters of the __init__ function of the Neuron + with DBS mechanisms added + """ + ### 1st adjust the conductance equations + ### get the equations of the neuron model as a list of strings + equations_line_split_list = str( + neuron_model_init_parameter_dict["equations"] + ).splitlines() + ### search for equation with dg_ampa/dt and dg_gaba/dt + for line_idx, line in enumerate(equations_line_split_list): + if ( + self.get_line_is_dvardt(line, var_name="g_ampa", tau_name="tau_ampa") + and ampa + ): + ### add " + tau_ampa*g_incomingauxampa/dt" + ### TODO add model requirements somewhere, here requirements = tau_ampa * dg_ampa/dt = -g_ampa + equations_line_split_list[line_idx] = ( + "tau_ampa*dg_ampa/dt = -g_ampa + tau_ampa*g_incomingauxampa/dt" + ) + if ( + self.get_line_is_dvardt(line, var_name="g_gaba", tau_name="tau_gaba") + and gaba + ): + ### add " + tau_gaba*g_incomingauxgaba/dt" + ### TODO add model requirements somewhere, here requirements = tau_gaba * dg_gaba/dt = -g_gaba + equations_line_split_list[line_idx] = ( + "tau_gaba*dg_gaba/dt = -g_gaba + tau_gaba*g_incomingauxgaba/dt" + ) + ### join list to a string + neuron_model_init_parameter_dict["equations"] = "\n".join( + equations_line_split_list + ) + + ### 2nd extend description + neuron_model_init_parameter_dict["description"] = ( + f"{neuron_model_init_parameter_dict['description']}\nWith incoming auxillary population input implemented." + ) + + return neuron_model_init_parameter_dict + + def get_line_is_dvardt(self, line: str, var_name: str, tau_name: str): + """ + Check if a equation string has the form "tau*dvar/dt = -var". + + Args: + line (str): + Equation string + var_name (str): + Name of the variable + tau_name (str): + Name of the time constant + + Returns: + is_solution_correct (bool): + True if the equation is as expected, False otherwise + """ + if var_name not in line: + return False + + # Define the variables + var, _, _, _ = sp.symbols(f"{var_name} d{var_name} dt {tau_name}") + + # Given equation as a string + equation_str = line + + # Parse the equation string + lhs, rhs = equation_str.split("=") + lhs = sp.sympify(lhs) + rhs = sp.sympify(rhs) + + # Form the equation + equation = sp.Eq(lhs, rhs) + + # Solve the equation for var + try: + solution = sp.solve(equation, var) + except: + ### equation is not solvable with variables means it is not as expected + return False + + # Given solution to compare + expected_solution_str = f"-{tau_name}*d{var_name}/dt" + expected_solution = sp.sympify(expected_solution_str) + + # Check if the solution is as expected + is_solution_correct = solution[0] == expected_solution + + return is_solution_correct + + class SpikeProbCalcNeuron(Neuron): + def __init__(self, reduced_size=1): + parameters = f""" + reduced_size = {reduced_size} : population + tau= 1.0 : population + """ + equations = """ + tau*dr/dt = g_ampa/reduced_size - r + g_ampa = 0 + """ + super().__init__(parameters=parameters, equations=equations) + + class InputCalcNeuron(Neuron): + def __init__(self, projection_dict): + """ + This neurons get the spike probabilities of the pre neurons and calculates the + incoming spikes for each projection. It accumulates the incoming spikes of all + projections (of the same target type) and calculates the conductance increase + for the post neuron. + + Args: + projection_dict (dict): + keys: names of afferent projections (of the same target type) + values: dict with keys "weights", "pre_name" + """ + + ### create parameters + parameters = [ + f""" + number_synapses_{proj_name} = 0 + weights_{proj_name} = {vals['weights']} + """ + for proj_name, vals in projection_dict.items() + ] + parameters = "\n".join(parameters) + + ### create equations + equations = [ + f""" + incoming_spikes_{proj_name} = number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) + Normal(0, 1)*sqrt(number_synapses_{proj_name} * sum(spikeprob_{vals['pre_name']}) * (1 - sum(spikeprob_{vals['pre_name']}))) : min=0, max=number_synapses_{proj_name} + """ + for proj_name, vals in projection_dict.items() + ] + equations = "\n".join(equations) + sum_of_conductance_increase = ( + "r = " + + "".join( + [ + f"incoming_spikes_{proj_name} * weights_{proj_name} + " + for proj_name in projection_dict.keys() + ] + )[:-3] + ) + equations = equations + "\n" + sum_of_conductance_increase + + super().__init__(parameters=parameters, equations=equations) diff --git a/src/CompNeuroPy/examples/model_configurator/test.py b/src/CompNeuroPy/examples/model_configurator/test.py new file mode 100644 index 0000000..dac9db5 --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/test.py @@ -0,0 +1,266 @@ +from ANNarchy import ( + Neuron, + Population, + compile, + get_time, + setup, + dt, + Projection, + Synapse, + Binomial, + get_projection, + get_population, + CurrentInjection, + simulate, + projections, + populations, +) +from CompNeuroPy import ( + CompNeuroMonitors, + PlotRecordings, + interactive_plot, + timing_decorator, + annarchy_compiled, + CompNeuroModel, +) +from CompNeuroPy.neuron_models import PoissonNeuron +import numpy as np +from sklearn.neighbors import KernelDensity +import matplotlib.pyplot as plt +from scipy.stats import binom +from functools import wraps +import time +from collections.abc import Iterable +from tqdm import tqdm +from math import ceil +from CompNeuroPy.examples.model_configurator.reduce_model import _CreateReducedModel + +setup(dt=0.1) + + +CONNECTION_PROB = 0.01 +WEIGHTS = 0.1 +POP_PRE_SIZE = 1000 +POP_POST_SIZE = 100 +POP_REDUCED_SIZE = 100 + + +neuron_izh = Neuron( + parameters=""" + C = 100.0 : population + k = 0.7 : population + v_r = -60.0 : population + v_t = -40.0 : population + a = 0.03 : population + b = -2.0 : population + c = -50.0 : population + d = 100.0 : population + v_peak = 0.0 : population + I_app = 0.0 + E_ampa = 0.0 : population + tau_ampa = 10.0 : population + """, + equations=""" + ### synaptic current + tau_ampa * dg_ampa/dt = -g_ampa + I_ampa = -neg(g_ampa*(v - E_ampa)) + ### Izhikevich spiking + I_v = I_app + I_ampa + C * dv/dt = k*(v - v_r)*(v - v_t) - u + I_v + du/dt = a*(b*(v - v_r) - u) + """, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + """, +) + + +neuron_izh_aux = Neuron( + parameters=""" + C = 100.0 : population + k = 0.7 : population + v_r = -60.0 : population + v_t = -40.0 : population + a = 0.03 : population + b = -2.0 : population + c = -50.0 : population + d = 100.0 : population + v_peak = 0.0 : population + I_app = 0.0 + E_ampa = 0.0 : population + tau_ampa = 10.0 : population + """, + equations=""" + ### synaptic current + tau_ampa * dg_ampa/dt = -g_ampa + tau_ampa*g_ampaaux/dt + I_ampa = -neg(g_ampa*(v - E_ampa)) + ### Izhikevich spiking + I_v = I_app + I_ampa + C * dv/dt = k*(v - v_r)*(v - v_t) - u + I_v + du/dt = a*(b*(v - v_r) - u) + """, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + """, +) + + +def create_model(): + ### create not reduced model + ### pre + pop_pre1 = Population( + POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pop_pre1" + ) + pop_pre2 = Population( + POP_PRE_SIZE, neuron=PoissonNeuron(rates=10.0), name="pop_pre2" + ) + ### post + pop_post = Population(POP_POST_SIZE, neuron=neuron_izh, name="pop_post") + ### pre to post + proj_pre1__post = Projection( + pre=pop_pre1, post=pop_post, target="ampa", name="proj_pre1__post" + ) + proj_pre1__post.connect_fixed_probability( + weights=WEIGHTS, probability=CONNECTION_PROB + ) + proj_pre2__post = Projection( + pre=pop_pre2, post=pop_post, target="ampa", name="proj_pre2__post" + ) + proj_pre2__post.connect_fixed_probability( + weights=WEIGHTS, probability=CONNECTION_PROB + ) + + +if __name__ == "__main__": + ### run normal model + print("normal model") + ### create model + model = CompNeuroModel(model_creation_function=create_model) + ### create monitors + mon_dict = { + "pop_pre1": ["spike"], + "pop_pre2": ["spike"], + "pop_post": ["spike", "g_ampa"], + } + monitors = CompNeuroMonitors( + mon_dict=mon_dict, + ) + monitors.start() + ### run simulation + start = time.time() + simulate(50.0) + get_population("pop_pre1").rates = 1000.0 + simulate(50.0) + get_population("pop_pre2").rates = 1000.0 + simulate(100.0) + print("simulate time:", time.time() - start) + recordings_normal = monitors.get_recordings() + recording_times_normal = monitors.get_recording_times() + + ### run reduced model + print("reduced model") + ### create model + model = _CreateReducedModel( + model=model, reduced_size=POP_REDUCED_SIZE + ).model_reduced + model.compile(warn=False) + ### create monitors + mon_dict = { + "pop_pre1_reduced": ["spike"], + "pop_pre2_reduced": ["spike"], + "pop_post_reduced": ["spike", "g_ampa"], + "pop_pre1_spike_collecting_aux": ["r"], + "pop_pre2_spike_collecting_aux": ["r"], + "pop_post_ampa_aux": [ + "incoming_spikes_proj_pre1__post", + "incoming_spikes_proj_pre2__post", + "r", + ], + } + monitors = CompNeuroMonitors( + mon_dict=mon_dict, + ) + monitors.start() + ### run simulation + start = time.time() + simulate(50.0) + get_population("pop_pre1_reduced").rates = 1000.0 + simulate(50.0) + get_population("pop_pre2_reduced").rates = 1000.0 + simulate(100.0) + print("simulate time:", time.time() - start) + recordings_reduced = monitors.get_recordings() + recording_times_reduced = monitors.get_recording_times() + + ### plot + PlotRecordings( + figname="test_normal.png", + recordings=recordings_normal, + recording_times=recording_times_normal, + shape=(4, 3), + plan={ + "position": [1, 4, 7, 10], + "compartment": [ + "pop_pre1", + "pop_pre2", + "pop_post", + "pop_post", + ], + "variable": [ + "spike", + "spike", + "spike", + "g_ampa", + ], + "format": [ + "hybrid", + "hybrid", + "hybrid", + "line_mean", + ], + }, + ) + + PlotRecordings( + figname="test_reduced.png", + recordings=recordings_reduced, + recording_times=recording_times_reduced, + shape=(4, 3), + plan={ + "position": [2, 5, 8, 11, 3, 6, 9, 12], + "compartment": [ + "pop_pre1_reduced", + "pop_pre2_reduced", + "pop_post_reduced", + "pop_post_reduced", + "pop_pre1_spike_collecting_aux", + "pop_pre2_spike_collecting_aux", + "pop_post_ampa_aux", + "pop_post_ampa_aux", + ], + "variable": [ + "spike", + "spike", + "spike", + "g_ampa", + "r", + "r", + "incoming_spikes_proj_pre1__post", + "incoming_spikes_proj_pre2__post", + ], + "format": [ + "hybrid", + "hybrid", + "hybrid", + "line_mean", + "line_mean", + "line_mean", + "line_mean", + "line_mean", + ], + }, + ) diff --git a/src/CompNeuroPy/examples/model_configurator/test2.py b/src/CompNeuroPy/examples/model_configurator/test2.py new file mode 100644 index 0000000..7eddd1e --- /dev/null +++ b/src/CompNeuroPy/examples/model_configurator/test2.py @@ -0,0 +1,95 @@ +import numpy as np +import matplotlib.pyplot as plt +import scipy.stats as stats + +# Parameters +n = 15 # number of trials +p = 0.95 # probability of success +N = 10000 # number of samples + +# Generate data samples +binomial_sample = np.random.binomial(n, p, N) +mean = n * p +std_dev = np.sqrt(n * p * (1 - p)) +normal_sample = np.random.normal(mean, std_dev, N) + +### scale normal sample above mean and below mean +normal_sample_original = normal_sample.copy() +normal_sample[normal_sample_original >= mean] = ( + normal_sample_original[normal_sample_original >= mean] * 1.1 +) +normal_sample[normal_sample_original < mean] = ( + normal_sample_original[normal_sample_original < mean] * 0.9 +) + +### round and clip the normal sample +normal_sample = np.round(normal_sample) +normal_sample[normal_sample < 0] = 0 +normal_sample[normal_sample > n] = n + + +# Statistical comparison +# Calculate descriptive statistics +binomial_mean = np.mean(binomial_sample) +binomial_std = np.std(binomial_sample) +normal_mean = np.mean(normal_sample) +normal_std = np.std(normal_sample) + +print(f"Binomial Sample Mean: {binomial_mean}, Standard Deviation: {binomial_std}") +print(f"Normal Sample Mean: {normal_mean}, Standard Deviation: {normal_std}") + +# Perform a Kolmogorov-Smirnov test +ks_statistic, p_value = stats.ks_2samp(binomial_sample, normal_sample) +print(f"KS Statistic: {ks_statistic}, P-value: {p_value}") + +# Interpretation +if p_value > 0.05: + print("The two samples are similar (fail to reject H0).") +else: + print("The two samples are different (reject H0).") + + +# sort both samples and calculate the root mean squared difference +binomial_sample.sort() +normal_sample.sort() +rmsd = np.sqrt(np.mean((binomial_sample - normal_sample) ** 2)) +print(f"Root Mean Squared Difference: {rmsd}") + + +# Visual comparison +plt.figure(figsize=(12, 6)) + +# Histogram of binomial sample +plt.subplot(1, 2, 1) +plt.hist( + binomial_sample, + bins=n + 1, + range=(-0.5, n + 0.5), + density=True, + alpha=0.6, + color="b", + label="Binomial", +) +plt.xlim(-0.5, n + 0.5) +plt.title("Binomial Distribution") +plt.xlabel("Value") +plt.ylabel("Frequency") + +# Histogram of normal sample +plt.subplot(1, 2, 2) +plt.hist( + normal_sample, + bins=n + 1, + range=(-0.5, n + 0.5), + density=True, + alpha=0.6, + color="r", + label="Normal", +) +plt.xlim(-0.5, n + 0.5) +plt.title("Normal Distribution") +plt.xlabel("Value") +plt.ylabel("Frequency") + +plt.tight_layout() +plt.show() diff --git a/src/CompNeuroPy/experiment.py b/src/CompNeuroPy/experiment.py index 234b826..952c0ce 100644 --- a/src/CompNeuroPy/experiment.py +++ b/src/CompNeuroPy/experiment.py @@ -1,6 +1,6 @@ from ANNarchy import reset from CompNeuroPy.monitors import RecordingTimes -from CompNeuroPy import CompNeuroMonitors +from CompNeuroPy.monitors import CompNeuroMonitors from CompNeuroPy import model_functions as mf from copy import deepcopy @@ -20,7 +20,7 @@ class CompNeuroExp: data (dict): dict for storing optional data - Examples: + Example: ```python from CompNeuroPy import CompNeuroExp from ANNarchy import simulate @@ -220,7 +220,3 @@ def run(self) -> _ResultsCl: the experiment by calling the results function of the CompNeuroExp class. """ ) - - -### old name for backward compatibility, TODO remove -Experiment = CompNeuroExp diff --git a/src/CompNeuroPy/extra_functions.py b/src/CompNeuroPy/extra_functions.py index 4d74904..269b8b4 100644 --- a/src/CompNeuroPy/extra_functions.py +++ b/src/CompNeuroPy/extra_functions.py @@ -22,15 +22,18 @@ from ANNarchy import Neuron, Population, simulate, setup, get_population from sympy import symbols, Symbol, solve, sympify, Eq, lambdify, factor from scipy.interpolate import griddata +from scipy.optimize import brentq import re from typingchecker import check_types import warnings import json from matplotlib.widgets import Slider -from matplotlib.gridspec import GridSpec from screeninfo import get_monitors import cmaes import efel +import time +import threading +from matplotlib.animation import FuncAnimation def print_df(df: pd.DataFrame | dict, **kwargs): @@ -54,7 +57,7 @@ def flatten_list(lst): Retuns flattened list Args: - lst (list of lists or mixed: values and lists): + lst (list of lists or mixed values and lists): List to be flattened Returns: @@ -104,7 +107,7 @@ def suppress_stdout(): """ Suppresses the print output of a function - Examples: + Example: ```python with suppress_stdout(): print("this will not be printed") @@ -122,7 +125,7 @@ def suppress_stdout(): def sci(nr): """ Rounds a number to a single decimal. - If number is smaller than 0 it is converted to scientific notation with 1 decimal. + If number is smaller than 1 it is converted to scientific notation with 1 decimal. Args: nr (float or int): @@ -142,10 +145,10 @@ def sci(nr): >>> sci(177.22) '177.2' """ - if af.get_number_of_zero_decimals(nr) == 0: + if nr >= 1: return str(round(nr, 1)) else: - return f"{nr*10**af.get_number_of_zero_decimals(nr):.1f}e-{af.get_number_of_zero_decimals(nr)}" + return f"{nr:.1e}" class Cmap: @@ -220,10 +223,6 @@ def __getattribute__(self, __name: str): return super().__getattribute__(__name) -### keep old name for compatibility -data_obj = _DataCl - - def create_cm(colors, name="my_cmap", N=256, gamma=1.0, vmin=0, vmax=1): """ Create a `LinearSegmentedColormap` from a list of colors. @@ -367,10 +366,6 @@ def __call__(self, X, alpha=None, bytes=False): return super().__call__(X, alpha, bytes) -### keep old name for compatibility -my_linear_cmap_obj = _LinearColormapClass - - class DecisionTree: """ Class to create a decision tree. @@ -469,10 +464,6 @@ def _get_path_prod_rec(self, node): return [path_str + "/" + node.name, prob * node.prob] -### keep old name for compatibility -decision_tree = DecisionTree - - class DecisionTreeNode: """ Class to create a node in a decision tree. @@ -536,10 +527,6 @@ def get_path_prod(self): return self.tree._get_path_prod_rec(self) -### keep old name for compatibility -node_cl = DecisionTreeNode - - def evaluate_expression_with_dict(expression, value_dict): """ Evaluate a mathematical expression using values from a dictionary. @@ -682,13 +669,52 @@ class DeapCma: """ Class to run the deap Covariance Matrix Adaptation Evolution Strategy optimization. + Using the [CMAES](https://deap.readthedocs.io/en/master/api/algo.html#module-deap.cma) algorithm from [deap](https://github.com/deap/deap) + + * Fortin, F. A., De Rainville, F. M., Gardner, M. A. G., Parizeau, M., & Gagné, C. (2012). DEAP: Evolutionary algorithms made easy. The Journal of Machine Learning Research, 13(1), 2171-2175. [pdf](https://www.jmlr.org/papers/volume13/fortin12a/fortin12a.pdf) + Attributes: deap_dict (dict): Dictionary containing the toolbox, the hall of fame, the statistics, the lower and upper bounds, the parameter names, the inverse scaler and the strategy. + + Example: + For complete example see [here](../examples/deap_cma.md) + ```python + from CompNeuroPy import DeapCma + import numpy as np + + + ### for DeapCma we need to define the evaluate_function + def evaluate_function(population): + loss_list = [] + ### the population is a list of individuals which are lists of parameters + for individual in population: + loss_of_individual = float(individual[0] + individual[1] + individual[2]) + loss_list.append((loss_of_individual,)) + return loss_list + + + ### define lower bounds of paramters to optimize + lb = np.array([0, 0, 0]) + + ### define upper bounds of paramters to optimize + ub = np.array([10, 10, 10]) + + ### create an "minimal" instance of the DeapCma class + deap_cma = DeapCma( + lower=lb, + upper=ub, + evaluate_function=evaluate_function, + ) + + ### run the optimization + deap_cma_result = deap_cma.run(max_evals=1000) + ``` """ + @check_types() def __init__( self, lower: np.ndarray, @@ -696,6 +722,7 @@ def __init__( evaluate_function: Callable, max_evals: None | int = None, p0: None | np.ndarray = None, + sig0: None | float = None, param_names: None | list[str] = None, learn_rate_factor: float = 1, damping_factor: float = 1, @@ -703,6 +730,7 @@ def __init__( plot_file: None | str = "logbook.png", cma_params_dict: dict = {}, source_solutions: list[tuple[np.ndarray, float]] = [], + hard_bounds: bool = False, ): """ @@ -720,8 +748,13 @@ def __init__( p0 (None | np.ndarray, optional): Initial guess for the parameters. By default the mean of lower and upper bounds. + sig0 (None | float, optional): + Initial guess for the standard deviation of the parameters. It will be + scaled by the range of the parameters. By default 0.25, i.e. 25% of the + range (for each parameter). param_names (None | list[str], optional): - Names of the parameters. By default None. + Names of the parameters. By default None, i.e. the parameters are named + "param0", "param1", ... learn_rate_factor (float, optional): Learning rate factor (decrease -> slower). By default 1. damping_factor (float, optional): @@ -736,7 +769,13 @@ def __init__( details source_solutions (list[tuple[np.ndarray, float]], optional): List of tuples with the parameters and losses of source solutions. These - solutions are used to initialize the covariance matrix. By default []. + solutions are used to initialize the covariance matrix. Using source + solutions ignores the initial guess p0 and sets the cma parameter + 'cmatrix' (which will also be ignored if given in cma_params_dict). By + default []. + hard_bounds (bool, optional): + Whether or not to use hard bounds (parmeters are clipped to lower and + upper bounds). By default False. """ ### store attributes self.max_evals = max_evals @@ -744,6 +783,7 @@ def __init__( self.upper = upper self.evaluate_function = evaluate_function self.p0 = p0 + self.sig0 = sig0 self.param_names = param_names self.learn_rate_factor = learn_rate_factor self.damping_factor = damping_factor @@ -751,6 +791,7 @@ def __init__( self.plot_file = plot_file self.cma_params_dict = cma_params_dict self.source_solutions = source_solutions + self.hard_bounds = hard_bounds ### prepare the optimization self.deap_dict = self._prepare() @@ -771,6 +812,7 @@ def _prepare(self): upper = self.upper evaluate_function = self.evaluate_function p0 = self.p0 + sig0 = self.sig0 param_names = self.param_names learn_rate_factor = self.learn_rate_factor damping_factor = self.damping_factor @@ -781,12 +823,18 @@ def _prepare(self): upper_orig = deepcopy(upper) lower_orig = deepcopy(lower) - def scaler(x): - return (x - lower_orig) / (upper_orig - lower_orig) + def scaler(x, diff=False): + if not diff: + return (x - lower_orig) / (upper_orig - lower_orig) + else: + return x / (upper_orig - lower_orig) ### create inverse scaler to scale parameters back into original range [lower,upper] - def inv_scaler(x): - return x * (upper_orig - lower_orig) + lower_orig + def inv_scaler(x, diff=False): + if not diff: + return x * (upper_orig - lower_orig) + lower_orig + else: + return x * (upper_orig - lower_orig) ### scale upper and lower bounds lower = scaler(lower) @@ -817,9 +865,22 @@ def inv_scaler(x): gamma=1, ) cma_params_dict["cmatrix"] = cmatrix + + if self.hard_bounds: + ### clip centroid to [0,1] + centroid = np.clip(centroid, 0, 1) else: - centroid = (lower + upper) / 2 if isinstance(p0, type(None)) else scaler(p0) - sigma = (upper - lower) / 4 + ### lower + upper / 2 is always 0.5 since lower and upper are scaled + centroid = ( + (lower + upper) / 2 + if isinstance(p0, type(None)) + else ( + scaler(np.clip(p0, lower, upper)) + if self.hard_bounds + else scaler(p0) + ) + ) + sigma = 0.25 if isinstance(sig0, type(None)) else sig0 ### create the strategy strategy = cma.Strategy( @@ -828,6 +889,11 @@ def inv_scaler(x): **cma_params_dict, ) + if verbose: + print( + f"Starting optimization with:\ncentroid: {inv_scaler(strategy.centroid)}, (scaled: {strategy.centroid})\nsigma: {inv_scaler(strategy.sigma,diff=True)}, (scaled: {strategy.sigma})" + ) + ### slow down the learning rate and increase the damping strategy.ccov1 *= learn_rate_factor strategy.ccovmu *= learn_rate_factor @@ -868,6 +934,7 @@ def inv_scaler(x): "param_names": param_names, "inv_scaler": inv_scaler, "strategy": strategy, + "hard_bounds": self.hard_bounds, } def run( @@ -892,8 +959,10 @@ def run( Returns: best (dict): - Dictionary containing the best parameters, the logbook, the last population - of individuals and the best fitness. + Dictionary containing the best parameters (as key and value pairs), + the logbook of the optimization (key = 'logbook'), the last population + of individuals (key = 'deap_pop') and the best fitness (key = + 'best_fitness'). """ ### get attributes @@ -981,13 +1050,17 @@ def _deap_ea_generate_update( stats = deap_dict["stats"] halloffame = deap_dict["hof"] strategy = deap_dict["strategy"] + hard_bounds = deap_dict["hard_bounds"] ### init logbook logbook = tools.Logbook() logbook.header = ["gen", "nevals"] + (stats.fields if stats else []) ### define progress bar - progress_bar = tqdm(range(ngen), total=ngen, unit="gen") + if verbose: + progress_bar = range(ngen) + else: + progress_bar = tqdm(range(ngen), total=ngen, unit="gen") early_stop = False ### loop over generations @@ -996,9 +1069,10 @@ def _deap_ea_generate_update( population = toolbox.generate() ### clip individuals of population to variable bounds ### TODO only if bounds are hard - for ind in population: - for idx, val in enumerate(ind): - ind[idx] = np.clip(val, lower[idx], upper[idx]) + if hard_bounds: + for ind in population: + for idx, val in enumerate(ind): + ind[idx] = np.clip(val, lower[idx], upper[idx]) ### Evaluate the individuals (here whole population at once) ### scale parameters back into original range [lower,upper] population_inv_scaled = [inv_scaler(ind) for ind in deepcopy(population)] @@ -1028,12 +1102,23 @@ def _deap_ea_generate_update( record = stats.compile(population) if stats is not None else {} logbook.record(gen=gen, nevals=len(population), **record) if verbose: + ### print logbook print(logbook.stream) + ### print evaluated individuals and their fitnesses + print_dict = { + f"ind_{idx}": list(ind) + for idx, ind in enumerate(deepcopy(population_inv_scaled)) + } + for idx, key in enumerate(print_dict): + print_dict[key].append(fitnesses[idx][0]) + print_df(print_dict) + print("") ### update progress bar with current best loss - progress_bar.set_postfix_str( - f"best loss: {halloffame[0].fitness.values[0]:.5f}" - ) + if not verbose: + progress_bar.set_postfix_str( + f"best loss: {halloffame[0].fitness.values[0]:.5f}" + ) if early_stop and verbose: print("Stopping because convergence is reached.") @@ -1059,9 +1144,10 @@ def __init__( self, neuron_model: Neuron, equations: str = """ - C*dv/dt = k*(v - v_r)*(v - v_t) - u + C*dv/dt = k*(v - v_r)*(v - v_t) - u + I du/dt = a*(b*(v - v_r) - u) """, + external_current_var: str = "I", bounds: dict[str, tuple[float, float]] = { "C": (0.1, 100), "v_r": (-90, -40), @@ -1089,9 +1175,14 @@ def __init__( equations (str, optional): The equations whose parameters should be obtained. Default: Izhikevich 2007 neuron model + external_current_var (str, optional): + The name of the variable in the neuron model which is used as the + external current. Has to be used in the neuron model and the given + equations Default: "I" bounds (dict, optional): - The bounds for the parameters. For each parameter a bound should be - given! Default: Izhikevich 2007 neuron model + The bounds for the parameters. For each parameter of the equation a + bound should be given (except for the external current variable)! + Default: Izhikevich 2007 neuron model p0 (dict, optional): The initial guess for the parameters. Dict keys should be the same as the keys of bounds. The values can be either a single number for each @@ -1130,6 +1221,7 @@ def __init__( self._verbose_extreme = False ### store the given neuron model and a voltage clamp version of it self.neuron_model = neuron_model + self.external_current_var = external_current_var self._neuron_model = deepcopy(neuron_model) self._neuron_model_clamp = self._get_neuron_model_clamp() @@ -1159,9 +1251,9 @@ def __init__( if self.do_plot: sf.create_dir("/".join(plot_file.split("/")[:-1])) - ### create the functions for v_clamp_inst and v_clamp_hold using the given + ### create the functions for I_clamp_inst and I_clamp_hold using the given ### izhikevich equations - self._f_inst, self._f_hold, self._f_variables = self._create_v_clamp_functions() + self._f_inst, self._f_hold, self._f_variables = self._create_I_clamp_functions() ### create the voltage step arrays self._v_0_arr, self._v_step_arr = self._create_voltage_step_arrays() @@ -1172,25 +1264,25 @@ def __init__( mf.cnp_clear() self._model_normal, self._model_clamp = self._create_model() - ### perform resting state and voltage step simulations to obtain v_clamp_inst, - ### v_clamp_hold and v_rest - self._v_clamp_inst_arr = None - self._v_clamp_hold_arr = None + ### perform resting state and voltage step simulations to obtain I_clamp_inst, + ### I_clamp_hold and v_rest + self._I_clamp_inst_arr = None + self._I_clamp_hold_arr = None if self.verbose: print("Performing simulations...") ( self._v_rest, - self._v_clamp_inst_arr, - self._v_clamp_hold_arr, + self._I_clamp_inst_arr, + self._I_clamp_hold_arr, self._v_step_unique, - self._v_clamp_hold_unique, + self._I_clamp_hold_unique, ) = self._simulations() - ### tune the free paramters of the functions for v_clamp_inst and v_clamp_hold + ### tune the free paramters of the functions for I_clamp_inst and I_clamp_hold ### to fit the data if self.verbose: print("Tuning parameters...") - self._p_opt = self._tune_v_clamp_functions() + self._p_opt = self._tune_I_clamp_functions() self.p_opt = { param_name: self._p_opt.get(param_name, None) for param_name in self.bounds.keys() @@ -1217,7 +1309,8 @@ def __init__( ) ### create a neuron model with the tuned parameters and the given equations - ### then run the simulations again with this neuron model + ### then run the simulations again with this neuron model to do the plots + ### with the tuned parameters if self.verbose: print("Running simulations with tuned parameters...") mf.cnp_clear() @@ -1261,24 +1354,28 @@ def _create_neuron_model_with_tuned_parameters(self): the neuron model with the tuned parameters and the given equations """ ### create the neuron with the tuned parameters, if a parameter is not tuned - ### use the mid of the bounds (these parameters should not affect v_clamp_inst - ### and v_clamp_hold) + ### use the mid of the bounds (these parameters should not affect I_clamp_inst + ### and I_clamp_hold) parameters = "\n".join( [ f"{key} = {self._p_opt.get(key,sum(self.bounds[key])/2)}" for key in self.bounds.keys() ] ) + ### also add the external current variable + parameters = parameters + "\n" + f"{self.external_current_var} = 0" neuron_mondel = Neuron( parameters=parameters, equations=self.equations + "\nr=0", ) + if self.verbose: + print(f"Neuron model with tuned parameters:\n{neuron_mondel}") return neuron_mondel - def _tune_v_clamp_functions(self): + def _tune_I_clamp_functions(self): """ - Tune the free paramters of the functions for v_clamp_inst and v_clamp_hold + Tune the free paramters of the functions for I_clamp_inst and I_clamp_hold to fit the data. """ ### get the names of the free parameters which will be tuned @@ -1289,7 +1386,7 @@ def _tune_v_clamp_functions(self): sub_var_names_list.append(str(var)) ### target array for the error function below - target_arr = np.concatenate([self._v_clamp_inst_arr, self._v_clamp_hold_unique]) + target_arr = np.concatenate([self._I_clamp_inst_arr, self._I_clamp_hold_unique]) ### create a function for the error def error_function(x): @@ -1389,16 +1486,16 @@ def error_function_deap(population): return result_dict - def _create_v_clamp_functions(self): + def _create_I_clamp_functions(self): """ - Create the functions for v_clamp_inst and v_clamp_hold using the given + Create the functions for I_clamp_inst and I_clamp_hold using the given izhikevich equations. Returns: f_inst (Callable): - Function for v_clamp_inst + Function for I_clamp_inst f_hold (Callable): - Function for v_clamp_hold + Function for I_clamp_hold variables (list): List of variables used for the functions """ @@ -1415,8 +1512,7 @@ def _create_v_clamp_functions(self): ### values variables_sympy_dict = {key: Symbol(key) for key in variables_name_list} - ### also create sympy symbols for v_clamp, v_0 and v_step - variables_sympy_dict["v_clamp"] = Symbol("v_clamp") + ### also create sympy symbols for v_0 and v_step variables_sympy_dict["v_0"] = Symbol("v_0") variables_sympy_dict["v_step"] = Symbol("v_step") @@ -1427,11 +1523,11 @@ def _create_v_clamp_functions(self): for line_idx, line in enumerate(eq_line_list): left_side = line.split("=")[0] right_side = line.split("=")[1] - ### check if line contains dv/dt, replace it with v_clamp and add v_clamp - ### to variables_to_solve_for_list, also set instant_update to True + ### check if line contains dv/dt, replace it with 0 and add external current + ### variable to variables_to_solve_for_list, also set instant_update to True if "dv/dt" in line: - variables_to_solve_for_list.append("v_clamp") - left_side = left_side.replace("dv/dt", "v_clamp") + variables_to_solve_for_list.append(self.external_current_var) + left_side = left_side.replace("dv/dt", "0") instant_update_list.append(True) ### check if line contains any other derivative with syntax "d/dt" ### using re, replace it with 0 and add the variable to @@ -1464,8 +1560,8 @@ def _create_v_clamp_functions(self): eq_sympy_list_hold_v_0, variables_to_solve_for_list, "holding v_0" ) - ### 2nd for v_clamp_inst set v to v_step only in equaitons which are - ### updated instantaneously (v_clamp and all non-derivatives), for all + ### 2nd for I_clamp_inst set v to v_step only in equations which are + ### updated instantaneously (I_clamp and all non-derivatives), for all ### derivatives use the solution for holding v_0 eq_sympy_list_inst = deepcopy(eq_sympy_list) for line_idx, line in enumerate(eq_sympy_list_inst): @@ -1488,7 +1584,7 @@ def _create_v_clamp_functions(self): eq_sympy_list_inst, variables_to_solve_for_list, "step from v_0 to v_step" ) - ### 3rd for v_clamp_hold (i.e. holding v_step) set v to v_step in all + ### 3rd for I_clamp_hold (i.e. holding v_step) set v to v_step in all ### equations eq_sympy_list_hold = deepcopy(eq_sympy_list) for line_idx, line in enumerate(eq_sympy_list_hold): @@ -1500,21 +1596,22 @@ def _create_v_clamp_functions(self): eq_sympy_list_hold, variables_to_solve_for_list, "holding v_step" ) - ### get the equations for v_clamp_inst and v_clamp_hold - eq_v_clamp_inst = solution_inst[variables_sympy_dict["v_clamp"]] - eq_v_clamp_hold = solution_hold[variables_sympy_dict["v_clamp"]] + ### get the equations for I_clamp_inst and I_clamp_hold (i.e. the external + ### current variable) + eq_I_clamp_inst = solution_inst[variables_sympy_dict[self.external_current_var]] + eq_I_clamp_hold = solution_hold[variables_sympy_dict[self.external_current_var]] if self.verbose: - print(f"Equation for v_clamp_inst: {factor(eq_v_clamp_inst)}") - print(f"Equation for v_clamp_hold: {factor(eq_v_clamp_hold)}") + print(f"Equation for I_clamp_inst: {factor(eq_I_clamp_inst)}") + print(f"Equation for I_clamp_hold: {factor(eq_I_clamp_hold)}") - ### create functions for v_clamp_inst and v_clamp_hold - ### 1st obtain all variables from the equations for v_clamp_inst and v_clamp_hold + ### create functions for I_clamp_inst and I_clamp_hold + ### 1st obtain all variables from the equations for I_clamp_inst and I_clamp_hold f_variables = list( - set(list(eq_v_clamp_inst.free_symbols) + list(eq_v_clamp_hold.free_symbols)) + set(list(eq_I_clamp_inst.free_symbols) + list(eq_I_clamp_hold.free_symbols)) ) ### 2nd create a function for each equation - f_inst = lambdify(f_variables, eq_v_clamp_inst) - f_hold = lambdify(f_variables, eq_v_clamp_hold) + f_inst = lambdify(f_variables, eq_I_clamp_inst) + f_hold = lambdify(f_variables, eq_I_clamp_hold) return f_inst, f_hold, f_variables @@ -1571,15 +1668,15 @@ def _get_variables_from_eq(self, eq: str): def _simulations(self): """ - Perform the resting state and voltage step simulations to obtain v_clamp_inst, - v_clamp_hold and v_rest. + Perform the resting state and voltage step simulations to obtain I_clamp_inst, + I_clamp_hold and v_rest. Returns: v_rest (float): resting state voltage - v_clamp_inst (np.array): + I_clamp_inst (np.array): array of the voltage clamp values directly after the voltage step - v_clamp_hold (np.array): + I_clamp_hold (np.array): array of the voltage clamp values after the holding period """ @@ -1591,71 +1688,72 @@ def _simulations(self): simulate(duration) get_population("pop_clamp").v = self._v_step_arr simulate(self._timestep) - v_clamp_inst_arr = get_population("pop_clamp").v_clamp + I_clamp_inst_arr = get_population("pop_clamp").I_clamp simulate(duration - self._timestep) - v_clamp_hold_arr = get_population("pop_clamp").v_clamp + I_clamp_hold_arr = get_population("pop_clamp").I_clamp v_rest = get_population("pop_normal").v[0] ### get unique values of v_step and their indices v_step_unique, v_step_unique_idx = np.unique( self._v_step_arr, return_index=True ) - ### get the corresponding values of v_clamp_hold (because it does only depend om + ### get the corresponding values of I_clamp_hold (because it does only depend on ### v_step) - v_clamp_hold_unique = v_clamp_hold_arr[v_step_unique_idx] + I_clamp_hold_unique = I_clamp_hold_arr[v_step_unique_idx] - if self.do_plot and not isinstance(self._v_clamp_inst_arr, type(None)): + if self.do_plot and not isinstance(self._I_clamp_inst_arr, type(None)): + plt.close("all") plt.figure(figsize=(6.4 * 3, 4.8 * 2)) - ### create a 2D color-coded plot of the data for v_clamp_inst and v_clamp_hold + ### create a 2D color-coded plot of the data for I_clamp_inst and I_clamp_hold x = self._v_0_arr y = self._v_step_arr - ### create 2 subplots for original v_clamp_inst and v_clamp_hold + ### create 2 subplots for original I_clamp_inst and I_clamp_hold plt.subplot(231) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_inst_arr, - "v_clamp_inst original", + self._I_clamp_inst_arr, + "I_clamp_inst original", ) plt.subplot(234) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_hold_arr, - "v_clamp_hold original", + self._I_clamp_hold_arr, + "I_clamp_hold original", ) - ### create 2 subplots for tuned v_clamp_inst and v_clamp_hold + ### create 2 subplots for tuned I_clamp_inst and I_clamp_hold plt.subplot(232) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - v_clamp_inst_arr, - "v_clamp_inst tuned", + I_clamp_inst_arr, + "I_clamp_inst tuned", ) plt.subplot(235) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - v_clamp_hold_arr, - "v_clamp_hold tuned", + I_clamp_hold_arr, + "I_clamp_hold tuned", ) ### create 2 subplots for differences plt.subplot(233) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_inst_arr - v_clamp_inst_arr, - "v_clamp_inst diff", + self._I_clamp_inst_arr - I_clamp_inst_arr, + "I_clamp_inst diff", ) plt.subplot(236) - self._plot_v_clamp_subplot( + self._plot_I_clamp_subplot( x, y, - self._v_clamp_hold_arr - v_clamp_hold_arr, - "v_clamp_hold diff", + self._I_clamp_hold_arr - I_clamp_hold_arr, + "I_clamp_hold diff", ) plt.tight_layout() @@ -1664,17 +1762,17 @@ def _simulations(self): self.plot_file.split(".")[0] + "_data." + self.plot_file.split(".")[1], dpi=300, ) - plt.close() + plt.close("all") return ( v_rest, - v_clamp_inst_arr, - v_clamp_hold_arr, + I_clamp_inst_arr, + I_clamp_hold_arr, v_step_unique, - v_clamp_hold_unique, + I_clamp_hold_unique, ) - def _plot_v_clamp_subplot(self, x, y, c, label): + def _plot_I_clamp_subplot(self, x, y, c, label): plt.title(label) ci = c @@ -1837,9 +1935,9 @@ def _get_neuron_model_clamp(self): def _adjust_equations_for_voltage_clamp(self, eq_line_list: list): """ - Replaces the 'dv/dt' or 'v+=' equation with a voltage clamp version in which the - new variable 'v_clamp' is calculated from the right side of the 'dv/dt' or 'v+=' - equation. + Replaces the 'dv/dt' equation with a voltage clamp version (dv/dt=0) in which the + new variable 'I_clamp' is obtained by solving the 'dv/dt' equation for its + external current variable. Args: eq_line_list (list): @@ -1865,81 +1963,46 @@ def _adjust_equations_for_voltage_clamp(self, eq_line_list: list): ### remove whitespaces eq_v = eq_v.replace(" ", "") - ### split eqatuion at ":" to separate flags + ### split eqatuion at ":" to ignore flags eq_v_split = eq_v.split(":") eq_v = eq_v_split[0] - ### check if flags are present - if len(eq_v_split) == 1: - flags = "" - else: - flags = ":" + eq_v_split[1] ### adjust the equation for voltage clamp - if "+=" in eq_v: - eq_v, eq_v_clamp = self._adjust_equation_for_voltage_clamp_plus(eq_v, flags) - else: - eq_v, eq_v_clamp = self._adjust_equation_for_voltage_clamp_dvdt(eq_v, flags) + eq_v, eq_I_clamp = self._adjust_equation_for_voltage_clamp_dvdt(eq_v) ### delete old equation from equation list using the index of the equation eq_line_list.pop(line_is_v_list.index(True)) ### insert new equation at the same position eq_line_list.insert(line_is_v_list.index(True), eq_v) - ### insert new equation for "v_clamp" at the same position - eq_line_list.insert(line_is_v_list.index(True), eq_v_clamp) + ### insert new equation for "I_clamp" at the same position + eq_line_list.insert(line_is_v_list.index(True), eq_I_clamp) return eq_line_list - def _adjust_equation_for_voltage_clamp_plus(self, eq_v: str, flags: str): - """ - Convert the v-update equation using "v+=" into a voltage clamp version. - - Args: - eq_v (str): - the equation string for updating v (without flags) - flags (str): - the flags of the equation string - - Returns: - eq_v (str): - the adjusted equation string for updating v (without flags) - eq_v_clamp (str): - the equation string for "v_clamp" (with flags) - """ - ### split equations at "=" to separate left and right side - eq_v_left, eq_v_right = eq_v.split("=") - ### set right side to zero and combine equation again with "=" - eq_v = eq_v_left + "=" + "0" - ### create new equation for "v_clamp" with right side of original equation - eq_v_clamp = "v_clamp=" + eq_v_right + flags - - return eq_v, eq_v_clamp - - def _adjust_equation_for_voltage_clamp_dvdt(self, eq_v: str, flags: str): + def _adjust_equation_for_voltage_clamp_dvdt(self, eq_v: str): """ Convert the v-update equation using "dv/dt" into a voltage clamp version. + !!! warning + Equation needs to contain dv/dt and the external current variable. + Args: eq_v (str): - the equation string for updating v (without flags) - flags (str): - the flags of the equation string + the equation string for updating v (without flags and whitespace) Returns: eq_v (str): the adjusted equation string for updating v (without flags) - eq_v_clamp (str): - the equation string for "v_clamp" (with flags) + eq_I_clamp (str): + the equation string for "I_clamp" """ - ### if equation starts with "dv/dt=" do the same as for "v+=" - if eq_v.startswith("dv/dt="): - return self._adjust_equation_for_voltage_clamp_plus(eq_v, flags) ### if equation doesn't start with "dv/dt=" --> need to rearrange equation - ### i.e. solve the equation for dv/dt - eq_v = eq_v.replace("dv/dt", "delta_v") + ### set dv/dt to zero and solve the equation for the external current variable + ### (will be I_clamp) + eq_v = eq_v.replace("dv/dt", "0") ### split the equation at "=" and move everything on one side (other side = 0) - ### replace the whole right side with "right_side" making solving easier left_side, right_side = eq_v.split("=") - eq_v_one_side = f"(right_side) - {left_side}" + eq_v_one_side = f"{right_side} - {left_side}" ### prepare the sympy equation generation attributes_name_list = self._get_neuron_model_attributes(self._neuron_model) @@ -1950,30 +2013,31 @@ def _adjust_equation_for_voltage_clamp_dvdt(self, eq_v: str, flags: str): key: attributes_tuple[attributes_name_list.index(key)] for key in attributes_name_list } - ### further create symbols for delta_v and right_side - attributes_sympy_dict["delta_v"] = Symbol("delta_v") - attributes_sympy_dict["right_side"] = Symbol("right_side") ### now creating the sympy equation eq_sympy = sympify(eq_v_one_side) - ### solve the equation for delta_v - result = solve(eq_sympy, attributes_sympy_dict["delta_v"], dict=True) + ### solve the equation for the external current variable + if self.verbose: + print(f"attributes_sympy_dict: {attributes_sympy_dict}") + result = solve( + eq_sympy, attributes_sympy_dict[self.external_current_var], dict=True + ) if len(result) != 1: - raise ValueError("Could not solve equation of neuronmodel for dv/dt!") + raise ValueError( + f"Could not solve equation of neuronmodel for external current variable {self.external_current_var}!" + ) ### convert result to string - result = str(result[0][attributes_sympy_dict["delta_v"]]) - - ### replace "right_side" by the actual right side in brackets - result = result.replace("right_side", f"({right_side})") + result = str(result[0][attributes_sympy_dict[self.external_current_var]]) ### create new equation for dv/dt eq_v = "dv/dt = 0" - ### create new equation for "v_clamp" with the equation solved for dv/dt - eq_v_clamp = "v_clamp=" + result + flags + ### create new equation for "I_clamp" with the equation solved for the external + ### current variable + eq_I_clamp = "I_clamp=" + result - return eq_v, eq_v_clamp + return eq_v, eq_I_clamp def _get_line_is_v(self, line: str): """ @@ -2004,120 +2068,142 @@ def _get_line_is_v(self, line: str): return False -def interactive_plot( - nrows: int, - ncols: int, - sliders: list[dict], - create_plot: Callable, -): - """ - Create an interactive plot with sliders. +class InteractivePlot: - Args: - nrows (int): - number of rows of subplots - ncols (int): - number of columns of subplots - sliders (list): - list of dictionaries with slider kwargs (see matplotlib.widgets.Slider), at - least the following keys have to be present: - - label (str): - label of the slider - - valmin (float): - minimum value of the slider - - valmax (float): - maximum value of the slider - create_plot (Callable): - function which fills the subplots, has to have the signature - create_plot(axs, sliders), where axs is a list of axes (for each subplot) - and sliders is the given sliders list with newly added keys "ax" (axes of - the slider) and "slider" (the Slider object itself, so that you can access - the slider values in the create_plot function using the .val attribute) - - Examples: - ```python - def create_plot(axs, sliders): - axs[0].axhline(sliders[0]["slider"].val, color="r") - axs[1].axvline(sliders[1]["slider"].val, color="r") - - interactive_plot( - nrows=2, - ncols=1, - sliders=[ - {"label": "a", "valmin": 0.0, "valmax": 1.0, "valinit": 0.3}, - {"label": "b", "valmin": 0.0, "valmax": 1.0, "valinit": 0.7}, - ], - create_plot=create_plot, - ) - ``` - """ + def __init__( + self, + nrows: int, + ncols: int, + sliders: list[dict], + create_plot: Callable, + update_loop: Callable | None = None, + figure_frequency: float = 20.0, + update_frequency: float = np.inf, + ): + """ + Create an interactive plot with sliders. - def update(axs, sliders): - ### remove everything from all axes except the sliders axes - for ax in axs: - if ax not in [slider["ax"] for slider in sliders]: - ax.cla() - ### recreate the plot - create_plot(axs, sliders) - ### redraw the canvas - fig.canvas.draw_idle() - - ### create the figure as large as the screen - screen_width, screen_height = get_monitors()[0].width, get_monitors()[0].height - figsize = (screen_width / 100, screen_height / 100) - fig = plt.figure(figsize=figsize) - - ### create the axes filled with the create_plot function - grid = GridSpec((nrows + 1) * len(sliders), ncols * len(sliders), figure=fig) - axs = [] - for row_idx in range(nrows): - for col_idx in range(ncols): - ax = fig.add_subplot( - grid[ - row_idx * len(sliders) : (row_idx + 1) * len(sliders), - col_idx * len(sliders) : (col_idx + 1) * len(sliders), - ] + Args: + nrows (int): + number of rows of subplots + ncols (int): + number of columns of subplots + sliders (list): + list of dictionaries with slider kwargs (see matplotlib.widgets.Slider), at + least the following keys have to be present: + - label (str): + label of the slider + - valmin (float): + minimum value of the slider + - valmax (float): + maximum value of the slider + create_plot (Callable): + function which fills the subplots, has to have the signature + create_plot(axs, sliders), where axs is a list of axes (for each subplot) + and sliders is the given sliders list with newly added keys "ax" (axes of + the slider) and "slider" (the Slider object itself, so that you can access + the slider values in the create_plot function using the .val attribute) + update_loop (Callable, optional): + Function which is called periodically. After each call the plot is updated. + If None, the plot is only updated when a slider is changed. Default is None. + figure_frequency (float, optional): + Frequency of the figure update in Hz. Default is 20.0. + update_frequency (float, optional): + Frequency of the update loop in Hz. Default is np.inf. + + Example: + ```python + def create_plot(axs, sliders): + axs[0].axhline(sliders[0]["slider"].val, color="r") + axs[1].axvline(sliders[1]["slider"].val, color="r") + + interactive_plot( + nrows=2, + ncols=1, + sliders=[ + {"label": "a", "valmin": 0.0, "valmax": 1.0, "valinit": 0.3}, + {"label": "b", "valmin": 0.0, "valmax": 1.0, "valinit": 0.7}, + ], + create_plot=create_plot, ) - axs.append(ax) - - ### create the sliders axes - for slider_idx, slider_kwargs in enumerate(sliders): - sliders[slider_idx]["ax"] = fig.add_subplot( - grid[nrows * len(sliders) + slider_idx, :] - ) - - ### initialize the sliders to their axes - for slider_idx, slider_kwargs in enumerate(sliders): - ### if init out of min max, change min max - if "valinit" in slider_kwargs: - if slider_kwargs["valinit"] < slider_kwargs["valmin"]: - slider_kwargs["valmin"] = slider_kwargs["valinit"] - elif slider_kwargs["valinit"] > slider_kwargs["valmax"]: - slider_kwargs["valmax"] = slider_kwargs["valinit"] - slider = Slider(**slider_kwargs) - slider.on_changed(lambda val: update(axs, sliders)) - sliders[slider_idx]["slider"] = slider - - ### create the plot - create_plot(axs, sliders) - ### arange subplots - plt.tight_layout() - new_right_border = 0.85 - new_left_border = 0.15 - for slider_idx, slider_kwargs in enumerate(sliders): - ax = sliders[slider_idx]["ax"] - ### set new borders - ax.set_position( - [ - new_left_border, - ax.get_position().y0, - new_right_border - new_left_border, - ax.get_position().height, - ] + ``` + """ + self.create_plot = create_plot + self._waiter = _Waiter(duration=2.0, on_finish=self._recreate_plot) + plt.close("all") + + ### create the figure as large as the screen + screen_width, screen_height = get_monitors()[0].width, get_monitors()[0].height + figsize = (screen_width / 100, screen_height / 100) + fig, axs = plt.subplots(nrows, ncols, figsize=figsize) + self.fig = fig + self.axs = axs + + ### create the sliders figure, set the axes for the sliders + fig_sliders, axs_sliders = plt.subplots( + len(sliders), 1, figsize=(6.4, 4.8 * len(sliders)) ) + if len(sliders) == 1: + axs_sliders = [axs_sliders] + for slider_idx in range(len(sliders)): + sliders[slider_idx]["ax"] = axs_sliders[slider_idx] + + ### initialize the sliders + for slider_idx, slider_kwargs in enumerate(sliders): + ### if init out of min max, change min max + if "valinit" in slider_kwargs: + if slider_kwargs["valinit"] < slider_kwargs["valmin"]: + slider_kwargs["valmin"] = slider_kwargs["valinit"] + elif slider_kwargs["valinit"] > slider_kwargs["valmax"]: + slider_kwargs["valmax"] = slider_kwargs["valinit"] + slider = Slider(**slider_kwargs) + slider.on_changed(lambda val: self._waiter.start()) + sliders[slider_idx]["slider"] = slider + + self.sliders = sliders + + ### create the plot + create_plot(axs, sliders) - ### show the plot - plt.show() + if update_loop is None: + ### show the plot + self.ani = FuncAnimation( + self.fig, + func=lambda frame: 0, + frames=10, + interval=(1.0 / figure_frequency) * 1000, + repeat=True, + ) + self.fig.tight_layout() + plt.show() + else: + ### run update loop until figure is closed + figure_pause = 1 / figure_frequency + max_updates_per_pause = update_frequency / figure_frequency + while plt.fignum_exists(fig.number): + ### update figure + self._recreate_plot + plt.pause(figure_pause) + ### in between do the update loop multiple times + start = time.time() + nr_updates = 0 + while ( + time.time() - start < figure_pause + and nr_updates < max_updates_per_pause + ): + update_loop() + nr_updates += 1 + + def _recreate_plot(self): + ### pause the animation + self.ani.event_source.stop() + ### clear the axes + for ax in self.axs.flatten(): + ax.cla() + ### recreate the plot + self.create_plot(self.axs, self.sliders) + ### restart the animation + self.ani.event_source.start() def efel_loss(trace1, trace2, feature_list): @@ -2212,7 +2298,7 @@ def efel_loss(trace1, trace2, feature_list): return loss ### calculate and return the mean of the differences of the features - features_1, features_2 = efel.getFeatureValues( + features_1, features_2 = efel.get_feature_values( [trace1, trace2], feature_list, raise_warnings=False, @@ -2244,3 +2330,236 @@ def efel_loss(trace1, trace2, feature_list): if verbose: print(f"loss: {loss}") return loss + + +class _Waiter: + """ + Class that waits for a certain duration while the rest of the code continues to run. + + Attributes: + finished (bool): + True if the waiting is finished, False otherwise. + """ + + def __init__(self, duration=5, on_finish=None): + """ + Args: + duration (float): + The duration in seconds after which Waiter.finished will return True. + on_finish (callable): + A callable that will be called when the counter finishes. + """ + self.duration = duration + self.on_finish = on_finish + self._finished = False + self._running = False + self._lock = threading.Lock() + self._threads = {} + + def _start_waiting(self): + """ + The function that will be run in a separate thread to wait for the duration. It + will set finished to True when the duration is reached. It will also call the + on_finish callable if it is not None. + """ + ### at the beginning of the thread set the stop flags for all other threads + for thread_id, thread in self._threads.items(): + if thread[0].ident != threading.get_ident(): + thread[1].set() + ### wait duration + time.sleep(self.duration) + ### check if the current thread was already stopped, if not set finished to True + if not (self._threads[threading.get_ident()][1].is_set()): + with self._lock: + ### set finished to True + self._finished = True + ### remove the current thread from the threads dict + self._threads.pop(threading.get_ident()) + ### call the on_finish callable in the main thread + if self.on_finish is not None: + threading.Timer(0.01, self.on_finish).start() + else: + with self._lock: + ### do not set finished to True and remove the current thread from the + ### threads dict + self._threads.pop(threading.get_ident()) + + def start(self): + """ + Start the waiting process in a separate thread. The waiting will last for the + duration specified in the constructor. If the waiting is already running, it + will be stopped and restarted. + """ + ### start new waiting thread + thread = threading.Thread(target=self._start_waiting, daemon=True) + stop_flag = threading.Event() + ### start the thread + thread.start() + ### store the thread and the stop flag + with self._lock: + self._threads[thread.ident] = [thread, stop_flag] + + @property + def finished(self): + with self._lock: + return self._finished + + +class RNG: + """ + Resettable random number generator. + + Attributes: + rng (np.random.Generator): + Random number generator. + + Example: + ```python + rng = RNG(seed=1234) + print(rng.rng.integers(0, 10, 5)) + rng.reset() + print(rng.rng.integers(0, 10, 5)) + ``` + """ + + def __init__(self, seed): + """ + Args: + seed (int): + Seed for the random number generator. + """ + self.rng = np.random.default_rng(seed=seed) + self._original_seed = seed + + def reset(self): + """ + Reset the random number generator to the original seed. + """ + self.rng.bit_generator.state = np.random.default_rng( + seed=self._original_seed + ).bit_generator.state + + +def find_x_bound( + y: Callable[[float], float], + x0: float, + y_bound: float, + tolerance: float = 1e-5, + bound_type: str = "equal", +) -> float: + """ + Find the x value such that y(x) is closest to y_bound within a given tolerance. The + value y_bound should be reachable by y(x) by increasing x from the initial value x0. + + Args: + y (Callable[[float], float]): + A function that takes a single float argument and returns a single float + value. + x0 (float): + The initial value of x to start the search. + y_bound (float): + The target value of y. + tolerance (float, optional): + The tolerance for the difference between y(x) and y_bound. Defaults to 1e-5. + bound_type (str, optional): + The type of bound to find. Can be 'equal'(y(x) should be close to y_bound), + 'greater'(y(x) should be close to y_bound and greater), or 'less'(y(x) should + be close to y_bound and less). Defaults to 'equal'. + + Returns: + x_bound (float): + The x value such that y(x) is closest to y_bound within the tolerance. + """ + # Catch invalid bound type + if bound_type not in ["equal", "greater", "less"]: + raise ValueError("bound_type should be 'equal', 'greater', or 'less'.") + + # Check if the initial value y(x0) is already y_bound + y0 = y(x0) + if np.isclose(y0, y_bound, atol=tolerance): + sf.Logger().log("Warning: The initial value is already equal to y_bound.") + return x0, x0 + + sf.Logger().log(f"x0: {x0}, y0: {y0}, y_bound: {bound_type} {y_bound}") + + # Define a helper function to find x such that y(x) - y_bound = 0 + def func(x): + return y(x) - y_bound + + # Exponential search to find an interval [a, b] where y(a) < y_bound < y(b) + a = x0 + b = x0 + 1 + while func(b) < 0: + a = b + b *= 2 + if b > 1e6: # Avoid infinite loop in case y_bound is not reachable + break + if b > 1e6: + raise ValueError( + "y_bound cannot be reached, the function saturates below y_bound." + ) + sf.Logger().log(f"a: {a}, b: {b}") + + # Use brentq to find the root within the interval [a, b] + x_root: float = brentq(func, a, b, full_output=False) + y_root = y(x_root) + sf.Logger().log(f"y(x_root={x_root}) = {y_root}") + + # check if y(x_root) is not within the tolerance of y_bound + if not np.isclose(y_root, y_bound, atol=tolerance): + sf.Logger().log( + f"Warning: y(x_root) is not within the tolerance of y_bound (y(x_root)={y_root}, y_bound={y_bound}, tolerance={tolerance})!" + ) + + if bound_type == "equal": + # Return the x value such that y(x) = y_bound + sf.Logger().log(f"Returning y(x={x_root}) = {y_root}") + return x_root + + if bound_type == "greater" and y_root > y_bound: + # Return the x value such that y(x) > y_bound + sf.Logger().log(f"Returning y(x={x_root}) = {y_root}") + return x_root + + if bound_type == "less" and y_root < y_bound: + # Return the x value such that y(x) < y_bound + sf.Logger().log(f"Returning y(x={x_root}) = {y_root}") + return x_root + + # Calculate the gradient at x_root + dx = np.abs(x_root - x0) * 1e-3 + grad_y = (y(x_root + dx) - y(x_root - dx)) / (2 * dx) + + # Define epsilon based on the gradient + epsilon = tolerance / np.abs(grad_y) if grad_y != 0 else tolerance + + if bound_type == "greater": + # Find the x value such that y(x) > y_bound (thus maybe increase x) + # do this by incrementaly increasing x by epsilon until y(x) is greater than + # y_bound + # if y(x+epsilon)-y(x) is less than the tolerance, increase epsilon + x = x_root + y_val = y(x) + while y_val < y_bound: + y_val_prev = y_val + x += epsilon + y_val = y(x) + if y_val - y_val_prev < tolerance / 10: + epsilon *= 2 + sf.Logger().log(f"Returning y(x={x}) = {y_val}") + return x + elif bound_type == "less": + # Find the x value such that y(x) < y_bound (thus maybe decrease x) + # do this by incrementaly decreasing x by epsilon until y(x) is less than + # y_bound + # if y(x)-y(x-epsilon) is less than the tolerance, increase epsilon + x = x_root + y_val = y(x) + while y_val > y_bound: + y_val_prev = y_val + x -= epsilon + y_val = y(x) + if y_val_prev - y_val < tolerance / 10: + epsilon *= 2 + sf.Logger().log(f"Returning y(x={x}) = {y_val}") + return x diff --git a/src/CompNeuroPy/full_models/bgm_22/bgm.py b/src/CompNeuroPy/full_models/bgm_22/bgm.py index 6d260ce..d798ad1 100644 --- a/src/CompNeuroPy/full_models/bgm_22/bgm.py +++ b/src/CompNeuroPy/full_models/bgm_22/bgm.py @@ -62,8 +62,8 @@ def __init__( """ Args: name (str, optional): - name of the model, syntax: "BGM_v_p" - replace and with the versions you + name of the model, syntax: "BGM_v*model_version*_p*parameters_version*" + replace *model_version* and *parameters_version* with the versions you want to use, see CompNeuroPy.full_models.BGM_22.parameters for available versions. Default: "BGM_v01_p01" do_create (bool, optional): @@ -72,7 +72,7 @@ def __init__( if True, the model is compiled after creation. Default: True compile_folder_name (str, optional): name of the folder in which the compiled model is saved. Default: None, - i.e. "annarchy_BGM_v" is used + i.e. "annarchy_BGM_v*model_version*" is used seed (int, optional): the seed for the random number generator used during model creation. Default: None, i.e. random seed is used @@ -89,7 +89,7 @@ def __init__( and name.split("_")[2][0] == "p" ): raise ValueError( - "name has to be of the form 'BGM_v_p'" + "name has to be of the form 'BGM_v*model_version*_p*parameters_version*'" ) ### set attributes (except the ones which are set in the super().__init__()) diff --git a/src/CompNeuroPy/generate_model.py b/src/CompNeuroPy/generate_model.py index b8e6035..3bf969e 100644 --- a/src/CompNeuroPy/generate_model.py +++ b/src/CompNeuroPy/generate_model.py @@ -145,7 +145,7 @@ def _update_attribute_df_weights(self): ) self._attribute_df_compiled = True - def compile(self, compile_folder_name=None): + def compile(self, compile_folder_name=None, warn=True): """ Compiles a created model. @@ -153,6 +153,9 @@ def compile(self, compile_folder_name=None): compile_folder_name (str, optional): Name of the folder in which the model is compiled. Default: value from initialization. + warn (bool, optional): + If True a warning is printed if other models are initialized but not + created (they will not be compiled). Default: True. """ ### check if this model is created if self.created: @@ -160,15 +163,16 @@ def compile(self, compile_folder_name=None): compile_folder_name = self.compile_folder_name ### check if other models were initialized but not created --> warn that they are not compiled - not_created_model_list = self._check_if_models_created() - if len(not_created_model_list) > 0: - print( - "\nWARNING during compile of model " - + self.name - + ": There are initialized models which are not created, thus not compiled! models:\n" - + "\n".join(not_created_model_list) - + "\n" - ) + if warn: + not_created_model_list = self._check_if_models_created() + if len(not_created_model_list) > 0: + print( + "\nWARNING during compile of model " + + self.name + + ": There are initialized models which are not created, thus not compiled! models:\n" + + "\n".join(not_created_model_list) + + "\n" + ) mf.compile_in_folder(compile_folder_name, silent=True) self.compiled = True @@ -183,7 +187,7 @@ def compile(self, compile_folder_name=None): + ": Only compile the model after it has been created!" ) - def create(self, do_compile=True, compile_folder_name=None): + def create(self, do_compile=True, compile_folder_name=None, warn=True): """ Creates a model and optionally compiles it directly. @@ -193,6 +197,9 @@ def create(self, do_compile=True, compile_folder_name=None): compile_folder_name (str, optional): Name of the folder in which the model is compiled. Default: value from initialization. + warn (bool, optional): + If True a warning is printed during compilation if other models are + initialized but not created (they will not be compiled). Default: True. """ if self.created: print("model", self.name, "already created!") @@ -222,7 +229,7 @@ def create(self, do_compile=True, compile_folder_name=None): self._attribute_df = self._get_attribute_df() if do_compile: - self.compile(compile_folder_name) + self.compile(compile_folder_name, warn) def _check_if_models_created(self): """ @@ -403,7 +410,3 @@ def _get_attribute_df(self): ### return dataframe return pd.DataFrame(attribute_dict) - - -### old name for compatibility, TODO: remove -generate_model = CompNeuroModel diff --git a/src/CompNeuroPy/generate_simulation.py b/src/CompNeuroPy/generate_simulation.py index e23de0c..a420312 100644 --- a/src/CompNeuroPy/generate_simulation.py +++ b/src/CompNeuroPy/generate_simulation.py @@ -1,6 +1,6 @@ from ANNarchy import get_time from CompNeuroPy import extra_functions as ef -from CompNeuroPy import CompNeuroMonitors +from CompNeuroPy.monitors import CompNeuroMonitors import numpy as np from typing import Callable @@ -249,10 +249,6 @@ def simulation_info(self): return simulation_info_obj -### old name for backward compatibility, TODO: remove -generate_simulation = CompNeuroSim - - class SimInfo: """ Class for storing the simulation information. diff --git a/src/CompNeuroPy/model_functions.py b/src/CompNeuroPy/model_functions.py index 2b2cfd0..df2f3c3 100644 --- a/src/CompNeuroPy/model_functions.py +++ b/src/CompNeuroPy/model_functions.py @@ -4,10 +4,15 @@ projections, clear, ) +from ANNarchy import __version__ as ANNarchy_version import os from CompNeuroPy import system_functions as sf from CompNeuroPy.generate_model import CompNeuroModel -from ANNarchy.core import Global + +if ANNarchy_version >= "4.8": + from ANNarchy.intern.NetworkManager import NetworkManager +else: + from ANNarchy.core import Global def compile_in_folder(folder_name, net=None, clean=False, silent=False): @@ -43,7 +48,10 @@ def annarchy_compiled(net_id=0): net_id (int, optional): Network ID. Default: 0. """ - return Global._network[net_id]["compiled"] + if ANNarchy_version >= "4.8": + return NetworkManager().is_compiled(net_id) + else: + return Global._network[net_id]["compiled"] def get_full_model(): diff --git a/src/CompNeuroPy/monitors.py b/src/CompNeuroPy/monitors.py index 48046f5..d28504c 100644 --- a/src/CompNeuroPy/monitors.py +++ b/src/CompNeuroPy/monitors.py @@ -618,10 +618,6 @@ def _unpack_mon_dict_keys(self, s: str, warning: bool = False): return compartment_type, compartment_name, period -### old name for backwards compatibility, TODO: remove in future -Monitors = CompNeuroMonitors - - class RecordingTimes: def __init__(self, recording_times_list): """ @@ -696,7 +692,7 @@ def all(self): """ return self.recording_times_list - def nr_periods(self, chunk=None, compartment=None): + def nbr_periods(self, chunk=None, compartment=None): """ Get the number of recording periods (start-pause) of a specified chunk/model compartment. @@ -717,6 +713,8 @@ def nr_periods(self, chunk=None, compartment=None): compartment = self.__check_compartment__(compartment, chunk) return self._get_nr_periods(chunk, compartment) + nr_periods = nbr_periods + def combine_periods( self, recordings: list, @@ -752,9 +750,6 @@ def combine_periods( time_step = recordings[0]["dt"] time_list = [] - ### get data arr - data_arr = recordings[chunk][recording_data_str] - ### get time arr for period in range(nr_periods): start_time, end_time = self.time_lims( @@ -766,9 +761,16 @@ def combine_periods( time_list.append(times) time_arr = np.concatenate(time_list, 0) + ### get data arr + try: + data_arr = recordings[chunk][recording_data_str] + except: + ### create an nan array with the same length as time_arr + data_arr = np.full(time_arr.shape, np.nan) + ### fill gaps with nan or interpolate if fill == "nan": - time_arr, data_arr = af.time_data_add_nan( + time_arr, data_arr = af.time_data_fill_gaps( time_arr, data_arr, fill_time_step=period_time, @@ -799,7 +801,7 @@ def combine_chunks( considered and filled with nan values. !!! warning - If you use mode="consecutive": Missing recordings at the end of chunks + If you use mode="consecutive": Missing recordings AT THE END OF chunks (simulated but not recorded) are not considered, this leads to times which differ from the original simulation times (these time periods without recording are simply ignored)! @@ -891,7 +893,7 @@ def combine_chunks( ### check if there are gaps in the time array ### fill them with the corersponding times and ### the data array with nan values - time_arr, data_arr = af.time_data_add_nan( + time_arr, data_arr = af.time_data_fill_gaps( time_arr, data_arr, fill_time_step=period_time, diff --git a/src/CompNeuroPy/neuron_models/__init__.py b/src/CompNeuroPy/neuron_models/__init__.py index 3733d12..84aa27b 100644 --- a/src/CompNeuroPy/neuron_models/__init__.py +++ b/src/CompNeuroPy/neuron_models/__init__.py @@ -23,6 +23,7 @@ Izhikevich2003NoisyAmpaOscillating, Izhikevich2003NoisyBase, Izhikevich2003NoisyBaseNonlin, + Izhikevich2003NoisyBaseSNR, ) from .final_models.izhikevich_2007_like_nm import ( Izhikevich2007, @@ -44,6 +45,7 @@ Izhikevich2007CorbitFsiNoisyAmpa, Izhikevich2007CorbitFsiNoisyBase, Izhikevich2007NoisyAmpaOscillating, + IzhikevichGolomb, ) from .final_models.artificial_nm import ( integrator_neuron, diff --git a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py index ec397b3..e13303a 100644 --- a/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py +++ b/src/CompNeuroPy/neuron_models/experimental_models/fit_Corbit_nm.py @@ -432,3 +432,54 @@ name="_Izhikevich2007_Corbit12", description="Simple neuron model equations from Izhikevich (2007) adjusted version to fit the striatal FSI neuron model from Corbit et al. (2016) should be able to produce late spiking.", ) + +_Izhikevich2007_Corbit13 = Neuron( + parameters=""" + ### base parameters + C = 0 + k = 0 + v_r = 0 + v_t = 0 + a = 0 + b = 0 + c = 0 + d = 0 + v_peak = 30 + ### after spike current parameters + a_uu = 0 + dd = 0 + ### slow currents parameters + a_s = 0 + a_n = 0 + b_n = 0 + ### input current + I_app = 0 + ### synaptic current parameters + tau_ampa = 1 + tau_gaba = 1 + E_ampa = 0 + E_gaba = -90 + ### input current scaling + a_I = 1 + """, + equations=""" + dg_ampa/dt = -g_ampa/tau_ampa + dg_gaba/dt = -g_gaba/tau_gaba + I = a_I*(I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))) + + C * dv/dt = k*(v - v_r)*(v - v_t) - u - pos(uu*(v - E_gaba)) - pos(n) + I + du/dt = a*(b*(v - v_r) - u) + duu/dt = -a_uu*uu + + ds/dt = a_s*(I - s) + dn/dt = a_n*(b_n*(I - s) - n) + """, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + uu = uu + dd + """, + name="_Izhikevich2007_Corbit13", + description="Simple neuron model equations from Izhikevich (2007) adjusted version to fit the striatal FSI neuron model from Corbit et al. (2016) should be able to produce late spiking.", +) diff --git a/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py b/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py index 88e001d..9766a69 100644 --- a/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/artificial_nm.py @@ -27,7 +27,7 @@ class IntegratorNeuron(Neuron): threshold (float, optional): Threshold for the decision g_ampa has to reach. Default: 1. - Examples: + Example: ```python from ANNarchy import Population, simulate_until from CompNeuroPy.neuron_models import Integrator @@ -116,7 +116,7 @@ class IntegratorNeuronSimple(Neuron): tau (float, optional): Time constant in ms of the neuron. Default: 1. - Examples: + Example: ```python from ANNarchy import Population, simulate_until from CompNeuroPy.neuron_models import Integrator diff --git a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py index 8f80dd0..2647fa9 100644 --- a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py @@ -458,6 +458,151 @@ def __init__( self._instantiated.append(True) +class Izhikevich2003NoisyBaseSNR(Neuron): + """ + TEMPLATE + + [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with + additional conductance based synapses for AMPA and GABA currents and a noisy baseline + current defined by the signal-to-noise ratio (SNR). + + Parameters: + a (float, optional): + Time constant of the recovery variable u. + b (float, optional): + Sensitivity of the recovery variable u to the membrane potential v. + c (float, optional): + After-spike reset value of the membrane potential v. + d (float, optional): + After-spike change of the recovery variable u. + n2 (float, optional): + Factor of the quadratic equation of the membrane potential v. + n1 (float, optional): + Factor of the quadratic equation of the membrane potential v. + n0 (float, optional): + Factor of the quadratic equation of the membrane potential v. + tau_ampa (float, optional): + Time constant of the AMPA conductance. + tau_gaba (float, optional): + Time constant of the GABA conductance. + E_ampa (float, optional): + Reversal potential of the AMPA conductance. + E_gaba (float, optional): + Reversal potential of the GABA conductance. + I_app (float, optional): + External applied current. + I_base (float, optional): + Baseline current. + noise (float, optional): + Can be set to 0 to disable the noise and 1 to enable it. (For other values + the noise is scaled accordingly but the target snr is only reached for 1.) + tau_power (float, optional): + Time constant of the power calculation. + snr_target (float, optional): + Target signal-to-noise ratio (SNR). + rate_noise (float, optional): + Rate of the Poisson distributed noise in the baseline current, i.e. how + often the baseline current is changed randomly. + + Variables to record: + - g_ampa + - g_gaba + - power_I_signal + - I_noise + - I_signal + - I + - v + - u + - r + """ + + # For reporting + _instantiated = [] + + def __init__( + self, + a: float = 0, + b: float = 0, + c: float = 0, + d: float = 0, + n2: float = 0, + n1: float = 0, + n0: float = 0, + tau_ampa: float = 1, + tau_gaba: float = 1, + E_ampa: float = 0, + E_gaba: float = 0, + I_app: float = 0, + I_base: float = 0, + noise: float = 1, + tau_power: float = 1, + snr_target: float = 1, + rate_noise: float = 0, + ): + # Create the arguments + parameters = f""" + ### izhikevich parameters + a = {a} : population + b = {b} : population + c = {c} : population + d = {d} : population + n2 = {n2} : population + n1 = {n1} : population + n0 = {n0} : population + ### synaptic currents + tau_ampa = {tau_ampa} : population + tau_gaba = {tau_gaba} : population + E_ampa = {E_ampa} : population + E_gaba = {E_gaba} : population + ### external currents + I_app = {I_app} + I_base = {I_base} + ### noise + noise = {noise} + tau_power = {tau_power} + snr_target = {snr_target} + rate_noise = {rate_noise} + """ + + super().__init__( + parameters=parameters, + equations=""" + ### input current + I_noise = noise*ite(Uniform(0, 1) * 1000.0 / dt > rate_noise, I_noise, Normal(0, 1)) + I_signal = I_base - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba)) + I_app + ### scale noise to reach target snr, scale factor is: + ### scaling_factor = sqrt((power_I_signal/power_I_noise)/snr_target) + ### since power of N(0,1) is 1, we can scale the noise by: + ### scaling_factor = sqrt(power_I_signal/snr_target) + I = I_signal + I_noise * sqrt(power_I_signal/snr_target) + ### synaptic conductances + tau_ampa * dg_ampa/dt = -g_ampa + tau_gaba * dg_gaba/dt = -g_gaba + ### power of signal + tau_power * dpower_I_signal/dt = I_signal**2 - power_I_signal + ### membrane potential and recovery variable + dv/dt = n2 * v * v + n1 * v + n0 - u + I : min=-100, max=0 + du/dt = a * (b * v - u) + """, + spike=""" + v >= 0 + """, + reset=""" + v = c + u = u + d + """, + name="Izhikevich2003_noisy_I_snr", + description=""" + Neuron model from Izhikevich (2003). With additional conductance based + synapses for AMPA and GABA currents and a noisy baseline current with + a specified signal-to-noise ratio (SNR). + """, + ) + + # For reporting + self._instantiated.append(True) + + class Izhikevich2003NoisyBase(Neuron): """ TEMPLATE diff --git a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py index 0f84d20..b495c00 100644 --- a/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py +++ b/src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py @@ -1372,6 +1372,141 @@ def __init__( self._instantiated.append(True) +class IzhikevichGolomb(Neuron): + """ + PREDEFINED + + [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model + with conductance-based AMPA and GABA synapses, noise in the baseline current, a + separated after-spike hyperpolarization and an inductive-like current causing late + spiking. Mechanisms and parameters were adjusted to fit the striatal FSI neuron + model from [Golomb et al. (2007)](https://doi.org/10.1371/journal.pcbi.0030156) + also used by [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) + as striatal FSI neuron. + + Parameters: + I_app (float, optional): + External applied input current. + tau_ampa (float, optional): + Time constant of the AMPA synapse. + tau_gaba (float, optional): + Time constant of the GABA synapse. + E_ampa (float, optional): + Reversal potential of the AMPA synapse. + E_gaba (float, optional): + Reversal potential of the GABA synapse. + base_mean (float, optional): + Mean of the baseline current. + base_noise (float, optional): + Standard deviation of the baseline current noise. + rate_base_noise (float, optional): + Rate of the noise update (Poisson distributed) in the baseline current. + params_for_pop (bool, optional): + If True, the parameters are population-wide and not neuron-specific. + init (dict, optional): + Initial values for the variables. + + Variables to record: + - offset_base + - I_base + - g_ampa + - g_gaba + - I_v + - v + - u + - uu + - s + - n + """ + + # For reporting + _instantiated = [] + + def __init__( + self, + I_app: float = 0.0, + tau_ampa: float = 10.0, + tau_gaba: float = 10.0, + E_ampa: float = 0.0, + E_gaba: float = -90.0, + base_mean: float = 0.0, + base_noise: float = 0.0, + rate_base_noise: float = 0.0, + params_for_pop: bool = False, + init: dict = {}, + ): + # Create the arguments + parameters = f""" + ### base parameters + C = 100 + k = 2.3422021975590845 + v_r = -70 + v_t = -50 + a = 0.4077132173988824 + b = 37.027824808742196 + c = -50 + d = 0 + v_peak = 0 + ### after-spike current parameters + a_uu = 0.4077132173988824 + dd = 819.0218598481788 + ### slow currents parameters + a_s = 0.19087175635342485 + a_n = 0.008987424013380247 + b_n = 2.9609600149723434 + ### input current + I_app = {I_app} + ### synaptic current parameters + tau_ampa = {tau_ampa} {': population' if params_for_pop else ''} + tau_gaba = {tau_gaba} {': population' if params_for_pop else ''} + E_ampa = {E_ampa} {': population' if params_for_pop else ''} + E_gaba = {E_gaba} {': population' if params_for_pop else ''} + ### input current scaling + a_I = 223.0822501641062 + ### baseline current parameters + base_mean = {base_mean} + base_noise = {base_noise} + rate_base_noise = {rate_base_noise} + """ + + prefix = _I_base_noise + syn = _syn_default + i_v = f"a_I*(I_app {_I_syn} + I_base)" + dv = f"{_dv_default} - pos(uu*(v - E_gaba)) - pos(n)" + affix = """ + duu/dt = -a_uu*uu + ds/dt = a_s*(I_v - s) + dn/dt = a_n*(b_n*(I_v - s) - n) + """ + + # get equations + equations = _get_equation_izhikevich_2007( + syn=syn, i_v=i_v, dv=dv, prefix=prefix, affix=affix + ) + + # set initial values + equations = _set_init(equations, init) + + super().__init__( + parameters=parameters, + equations=equations, + spike="v >= v_peak", + reset=""" + v = c + u = u + d + uu = uu + dd + """, + name="IzhikevichGolomb", + description=""" + Izhikevich (2007)-like neuron model fitted to the FSI neuron model + from Golomb et al. (2007) and Corbit et al. (2016). + """, + ) + + # For reporting + self._instantiated.append(True) + + ### create objects for backwards compatibility Izhikevich2007_record_currents = Izhikevich2007RecCur() Izhikevich2007_voltage_clamp = Izhikevich2007VoltageClamp() diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 88cece8..8100ded 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -183,7 +183,9 @@ def __init__( self.bads_params_dict = bads_params_dict self.loss_history = [] self.start_time = time() - self.recording_period = recording_period + self.recording_period_str = self._get_recording_period_string( + recording_period + ) ### if using deap pop size is the number of individuals for the optimization if method == "deap": @@ -230,6 +232,28 @@ def __init__( self.monitors = monitors self.experiment = experiment(monitors=monitors) + def _get_recording_period_string(self, recording_period: float | None): + """ + Get the recording period string for the CompNeuroMonitors. If there is no + recording period or if there is only the variable "spike" recorded, the + recording period string is empty. + + Args: + recording_period (float, optional): + The recording period for the simulation in ms. Default: None. + + Returns: + recording_period_str (str): + The recording period string for the CompNeuroMonitors. + """ + recording_period_str = ( + f";{recording_period}" + if recording_period is not None + and ("spike" not in self.record or len(self.record) > 1) + else "" + ) + return recording_period_str + def _get_lower_upper_p0(self): """ Returns the lower and upper bounds and the initial values for the cma @@ -401,15 +425,9 @@ def _generate_models(self, popsize=1): ### create monitors if len(self.record) > 0: - recording_period_str = ( - f";{self.recording_period}" - if self.recording_period is not None - and ("spike" not in self.record or len(self.record) > 1) - else "" - ) monitors = CompNeuroMonitors( { - f"{pop_name}{recording_period_str}": self.record + f"{pop_name}{self.recording_period_str}": self.record for pop_name in [ model.populations[0], target_model.populations[0], @@ -438,14 +456,10 @@ def _generate_models(self, popsize=1): ) ### create monitors if len(self.record) > 0: - recording_period_str = ( - f";{self.recording_period}" - if self.recording_period is not None - and ("spike" not in self.record or len(self.record) > 1) - else "" - ) monitors = CompNeuroMonitors( - {f"{model.populations[0]}{recording_period_str}": self.record} + { + f"{model.populations[0]}{self.recording_period_str}": self.record + } ) return model, target_model, monitors @@ -1304,12 +1318,16 @@ def run( best (dict): dictionary containing the optimized parameters (as keys) and: - - "loss": the loss - - "all_loss": the individual losses of the get_loss_function - - "std": the SD of the loss (in case of noisy models with multiple - runs per loss calculation) - - "results": the results generated by the experiment - - "results_soll": the target results + - "loss" (float): the loss (of best run) + - "all_loss" (list): the individual losses of the get_loss_function + - "std" (float): the SD of the loss (in case of noisy models with + multiple runs per loss calculation) + - "results" (CompNeuroExp._ResultsCl): the results generated by the + experiment + - "results_soll" (as given by the user or CompNeuroExp._ResultsCl): the + target results + - "parameters" (dict): all parameters given in the variable bounds and + their optimized values """ self.verbose = False self.verbose_run = verbose @@ -1342,6 +1360,7 @@ def run( best["std"] = fit["std"] best["results"] = fit["results"] best["results_soll"] = self.results_soll + best["parameters"] = self._get_final_parameters(best) self.results = best ### create loss history array @@ -1370,6 +1389,36 @@ def run( return best + def _get_final_parameters(self, best): + """ + Returns the final parameters as dictionary. + + Args: + best (dict): + dictionary containing the optimized parameters (as keys) and other keys. + + Returns: + final_parameters (dict): + dictionary containing all parameters of the variable bounds (as keys) + and their optimized values. + """ + final_parameters = {} + ### first all optimized variables (bounds=list) + for param_name, param_bounds in self.variables_bounds.items(): + if isinstance(param_bounds, list): + final_parameters[param_name] = best[param_name] + else: + final_parameters[param_name] = param_bounds + + ### now all string variables (bounds=str) + for param_name, param_bounds in self.variables_bounds.items(): + if isinstance(param_bounds, str): + final_parameters[param_name] = ef.evaluate_expression_with_dict( + param_bounds, final_parameters + ) + + return final_parameters + def _run_with_deap(self, max_evals, deap_plot_file): """ Runs the optimization with deap. @@ -1380,6 +1429,10 @@ def _run_with_deap(self, max_evals, deap_plot_file): deap_plot_file (str): the name of the figure which will be saved and shows the logbook + + Returns: + Dictionary containing the best parameters, the logbook, the last population + of individuals and the best fitness. """ return self._deap_cma.run( diff --git a/src/CompNeuroPy/simulation_functions.py b/src/CompNeuroPy/simulation_functions.py index 1f8c2da..a674150 100644 --- a/src/CompNeuroPy/simulation_functions.py +++ b/src/CompNeuroPy/simulation_functions.py @@ -1,4 +1,14 @@ -from ANNarchy import simulate, get_population, dt +from ANNarchy import ( + simulate, + get_population, + dt, + simulate_until, + get_current_step, + get_time, +) +from CompNeuroPy import analysis_functions as af +import numpy as np +from typing import Callable def attr_sim(pop: str, attr_dict, t=500): @@ -265,3 +275,497 @@ def increasing_current(pop: str, a0, da, nr_steps, dur_step): """ increasing_attr_return = increasing_attr(pop, "I_app", a0, da, nr_steps, dur_step) return {"current_list": increasing_attr_return["attr_list"]} + + +class SimulationEvents: + """ + Class to create a Simulation consiting of multiple events. Add the effects + (functions) of the events in a class which inherits from SimulationEvents. Within + the effect functions you can use the attributes of the class which inherits from + SimulationEvents. Do never simulate within the effect functions of the events. The + simulation is done between the events. + + Example: + ```python + from CompNeuroPy import SimulationEvents + + ### define a class which inherits from SimulationEvents + ### define the effects of the events in the class + class MySim(SimulationEvents): + + def __init__( + self, + p=0.8, + verbose=False, + ): + ### set attributes which should be used in the effect functions + self.p = p + super().__init__(verbose=verbose) + + def effect1(self): + ### set the parameter of a population to the value of p + pop.parameter = self.p + + def effect2(self): + ### set the parameter of a population to 0 + pop.parameter = 0 + + ### create the simulation object + my_sim = MySim() + + ### add events to the simulation + ### start event right at the beginning which triggers event1 after 100 ms + my_sim.add_event(name="start", trigger={"event1": 100}) + ### event1 causes effect1 and triggers event2 after 200 ms + my_sim.add_event(name="event1", effect=my_sim.effect1, trigger={"event2": 200}) + ### event2 causes effect2 and triggers end event after 300 ms + my_sim.add_event(name="event2", effect=my_sim.effect2, trigger={"end": 300}) + + ### run the simulation + my_sim.run() + ``` + """ + + def __init__(self, verbose=False): + """ + Args: + verbose (bool): + if True, additional information is printed during simulation + """ + ### set verbose + self.verbose = verbose + ### initialize events + self._initialize() + ### list for storing added events, without changing them + self.stored_event_list = [] + self.called_during_restore = False + ### add the end event + self.add_event(name="end", effect=self._end_sim) + + def _initialize(self): + """ + initialize locals + """ + if self.verbose: + print("initialize locals") + ### list of events + self.event_list = [] + self.event_name_list = [] + ### as long as end == False simulation runs + self.end = False + ### if events occur depends on happened events + self.happened_event_list = [] + ### initialize model triggers empty, before first simulation, there should not be model_trigger_events + ### model_trigger_list = name of populations of which the decision should be checked + self.model_trigger_list = [] + self.past_model_trigger_list = [] + + def add_event( + self, + name: str, + onset: int = None, + model_trigger: str = None, + requirement_string: str = None, + effect: Callable = None, + trigger: dict[str, int | Callable[[], int]] = None, + ): + """ + Adds an event to the simulation. You always have to trigger the end event to end + the simulation. + + Args: + name (str): + name of the event + onset (int): + time in simulation steps when the event should occur + model_trigger (str): + name of population which can trigger the event (by setting variable + decision to -1) + requirement_string (str): + string containing the requirements for the event to occur TODO: replace with function + effect (function): + Function which is executed during the event. Within the effect function + you can use the attributes of the class which inherits from + SimulationEvents. + trigger (dict): + dictionary containing the names of other events as keys and the + relative time in simulation steps to the onset of the current event as + values. The values can also be callable functions which return the + time (without any aruments). They are called when this event is triggered. + """ + self.event_list.append( + self._Event( + trial_procedure=self, + name=name, + onset=onset, + model_trigger=model_trigger, + requirement_string=requirement_string, + effect=effect, + trigger=trigger, + ) + ) + self.event_name_list.append(name) + + if not self.called_during_restore: + self.stored_event_list.append( + { + "name": name, + "onset": onset, + "model_trigger": model_trigger, + "requirement_string": requirement_string, + "effect": effect, + "trigger": trigger, + } + ) + + def _restore_event_list(self): + """ + Restore the event list after simulation to the state before the first call of + run. To be able to run the simulation multiple times. + """ + self.called_during_restore = True + for event in self.stored_event_list: + self.add_event(**event) + self.called_during_restore = False + + def run(self): + """ + Run the simulation. The simulation runs until the end event is triggered. The + simulation can be run multiple times by calling this function multiple times. + """ + ### check if there are events which have no onset and are not triggered by other + ### events and have no model_trigger --> they would never start + ### --> set their onset to current step --> they ar run directly after calling run + triggered_events = [] + for event in self.event_list: + if event.trigger is not None: + triggered_events.extend(list(event.trigger.keys())) + for event in self.event_list: + if ( + event.onset is None + and event.model_trigger is None + and event.name not in triggered_events + ): + event.onset = get_current_step() + if self.verbose: + print(event.name, "set onset to start of run") + + ### run simulation + while not (self.end): + ### check if model triggers were activated --> if yes run the corresponding events, model_trigger events can trigger other events (with onset) --> run current_step events after model trigger events + ### if that's the case --> model trigger event would run twice (because during first run it gets an onset) --> define here run_event_list which prevents events run twice + self.run_event_list = [] + self._run_model_trigger_events() + ### run the events of the current time, based on mode and happened events + self._run_current_events() + ### if event triggered end --> end simulation / skip rest + if self.end: + if self.verbose: + print("end event triggered --> end simulation") + continue + ### check then next events occur + next_events_time = self._get_next_events_time() + ### check if there are model triggers + self.model_trigger_list = self._get_model_trigger_list() + ### simulate until next event(s) or model triggers + if self.verbose: + print("check_triggers:", self.model_trigger_list) + if len(self.model_trigger_list) > 1: + ### multiple model triggers + simulate_until( + max_duration=next_events_time, + population=[ + get_population(pop_name) for pop_name in self.model_trigger_list + ], + operator="or", + ) + elif len(self.model_trigger_list) > 0: + ### a single model trigger + simulate_until( + max_duration=next_events_time, + population=get_population(self.model_trigger_list[0]), + ) + else: + ### no model_triggers + simulate(next_events_time) + + ### after run finishes initialize again + self._initialize() + + ### restore event_list + self._restore_event_list() + + def _run_current_events(self): + """ + Run all events with start == current step + """ + ### run all events of the current step + ### repeat this until no event was run, because events can set the onset of other events to the current step + ### due to repeat --> prevent that same event is run twice + event_run = True + while event_run: + event_run = False + for event in self.event_list: + if ( + event.onset == get_current_step() + and not (event.name in self.run_event_list) + and event._check_requirements() + ): + event.run() + event_run = True + self.run_event_list.append(event.name) + + def _run_model_trigger_events(self): + """ + check the current model triggers stored in self.model_trigger_list + if they are activated --> run corresponding events + prevent that these model triggers are stored again in self.model_trigger_list + """ + ### loop to check if model trigger got active + for model_trigger in self.model_trigger_list: + if ( + int(get_population(model_trigger).decision[0]) == -1 + ): ### TODO this is not generalized yet, only works if the model_trigger populations have the variable decision which is set to -1 if the model trigger is active + ### -1 means got active + ### find the events triggerd by the model_trigger and run them + for event in self.event_list: + if event.model_trigger == model_trigger: + event.run() + self.run_event_list.append(event.name) + ### prevent that these model_triggers are used again + self.past_model_trigger_list.append(model_trigger) + + def _get_next_events_time(self): + """ + go through all events and get onsets + get onset which are > current_step + return smallest diff in ms (ms value = full timesteps!) + + Returns: + time (float): + time in ms until the next event, rounded to full timesteps + """ + next_event_time = np.inf + for event in self.event_list: + ### skip events without onset + if event.onset == None: + continue + ### check if onset in the future and nearest + if ( + event.onset > get_current_step() + and (event.onset - get_current_step()) < next_event_time + ): + next_event_time = event.onset - get_current_step() + ### return difference (simulation duration until nearest next event) in ms, round to full timesteps + return round(next_event_time * dt(), af.get_number_of_decimals(dt())) + + def _get_model_trigger_list(self): + """ + check if there are events with model_triggers + check if these model triggers already happened + check if the requirements of the events are met + not happend + requirements met --> add model_trigger to model_trigger_list + returns the (new) model_trigger_list + + Returns: + model_trigger_list (list): + list of model triggers which are not in past_model_trigger_list and + have their requirements met + """ + ret = [] + for event in self.event_list: + if event.model_trigger != None: + if ( + not (event.model_trigger in self.past_model_trigger_list) + and event._check_requirements() + ): + ret.append(event.model_trigger) + return ret + + def _end_sim(self): + """ + Event to end the simulation + """ + self.end = True + + class _Event: + """ + Class for events in the simulation + """ + + def __init__( + self, + trial_procedure, + name, + onset=None, + model_trigger=None, + requirement_string=None, + effect=None, + trigger=None, + ): + """ + Args: + trial_procedure (SimulationEvents): + SimulationEvents object + name (str): + name of the event + onset (int): + time in simulation steps when the event should occur + model_trigger (str): + name of population which can trigger the event (by setting variable + decision to -1) + requirement_string (str): + string containing the requirements for the event to occur TODO: replace with function + effect (function): + function which is executed during the event + trigger (dict): + dictionary containing the names of other events as keys and the + relative time in simulation steps to the onset of the current event as + values. The values can also be callable functions which return the + time (without any aruments). They are called when this event is triggered. + """ + self.trial_procedure = trial_procedure + self.name = name + self.onset = onset + self.model_trigger = model_trigger + self.requirement_string = requirement_string + self.effect = effect + self.trigger = trigger + + def run(self): + """ + Run the event i.e. execute the effect of the event and trigger other events + """ + ### check requirements + if self._check_requirements(): + ### run the event + if self.trial_procedure.verbose: + print("run event:", self.name, get_time()) + ### for events which are triggered by model --> set onset + if self.onset == None: + self.onset = get_current_step() + ### run the effect + if self.effect is not None: + self.effect() + ### trigger other events + if self.trigger is not None: + ### loop over all triggered events + for name, delay in self.trigger.items(): + ### get the other event + event_idx = self.trial_procedure.event_name_list.index(name) + ### set onset of other event + if callable(delay): + add = delay() + self.trial_procedure.event_list[event_idx].onset = ( + self.onset + add + ) + else: + self.trial_procedure.event_list[event_idx].onset = ( + self.onset + delay + ) + ### store event in happened events + self.trial_procedure.happened_event_list.append(self.name) + + ### TODO replace requirement_string with a function (which has access to the + ### attributes) checking the requirements + def _check_requirements(self): + """ + Check if the requirements for the event are met + + Returns: + met (bool): + True if requirements are met, False otherwise + """ + if self.requirement_string != None: + ### check requirement with requirement string + return self._eval_requirement_string() + else: + ### no requirement + return True + + def _eval_requirement_string(self): + """ + evaluates a condition string in format like 'XXX==XXX and (XXX==XXX or + XXX==XXX)' + + Returns: + met (bool): + True if requirements are met, False otherwise + """ + ### split condition string + string = self.requirement_string + string = string.split(" and ") + string = [sub_string.split(" or ") for sub_string in string] + + ### loop over string splitted string parts + final_string = [] + for sub_idx, sub_string in enumerate(string): + ### combine outer list eelemts with and + ### and combine inner list elements with or + if len(sub_string) == 1: + if sub_idx < len(string) - 1: + final_string.append( + self._get_condition_part(sub_string[0]) + " and " + ) + else: + final_string.append(self._get_condition_part(sub_string[0])) + else: + for sub_sub_idx, sub_sub_string in enumerate(sub_string): + if sub_sub_idx < len(sub_string) - 1: + final_string.append( + self._get_condition_part(sub_sub_string) + " or " + ) + elif sub_idx < len(string) - 1: + final_string.append( + self._get_condition_part(sub_sub_string) + " and " + ) + else: + final_string.append( + self._get_condition_part(sub_sub_string) + ) + return eval("".join(final_string)) + + def _get_condition_part(self, string): + """ + converts a string in format like '((XXX==XXX)' into '((True)' + """ + ### remove spaces from string + string = string.strip() + string = string.split() + string = "".join(string) + + ### recursively remove brackets + ### at the end evaluate term (without brackets) and then return the evaluated value with the former brackets + if string[0] == "(": + return "(" + self._get_condition_part(string[1:]) + elif string[-1] == ")": + return self._get_condition_part(string[:-1]) + ")" + else: + return str(self._eval_condition_part(string)) + + def _eval_condition_part(self, string): + """ + gets string in format 'XXX==XXX' + + evaluates the term for mode and happened events + + returns True/False + """ + + var = string.split("==")[0] + val = string.split("==")[1] + if var == "mode": + test = self.trial_procedure.mode == val + elif var == "happened_event_list": + ### remove brackets + val = val.strip("[]") + ### split entries + val = val.split(",") + ### remove spaces from entries + happened_event_list_from_string = [val_val.strip() for val_val in val] + ### check if all events are in happened_event_list, if not --> return False + test = True + for event in happened_event_list_from_string: + if not (event in self.trial_procedure.happened_event_list): + test = False + return test diff --git a/src/CompNeuroPy/system_functions.py b/src/CompNeuroPy/system_functions.py index 799efae..125147d 100644 --- a/src/CompNeuroPy/system_functions.py +++ b/src/CompNeuroPy/system_functions.py @@ -7,6 +7,7 @@ from joblib import Parallel, delayed import inspect import subprocess +import textwrap def clear_dir(path): @@ -93,7 +94,7 @@ def save_variables(variable_list: list, name_list: list, path: str | list = "./" save path for all variables, or save path for each variable of the variable_list. Default: "./" - Examples: + Example: ```python import numpy as np from CompNeuroPy import save_variables, load_variables @@ -146,7 +147,7 @@ def load_variables(name_list: list, path: str | list = "./"): dictionary with the loaded variables, keys are the names of the files, values are the loaded variables - Examples: + Example: ```python import numpy as np from CompNeuroPy import save_variables, load_variables @@ -292,7 +293,7 @@ def create_data_raw_folder( **kwargs (Any, optional): Global variables of the caller script. - Examples: + Example: ```python from CompNeuroPy import create_data_raw_folder @@ -507,3 +508,120 @@ def create_data_raw_folder( f.write("# CompNeuroPy was installed locally with commit:\n") compneuropy_commit = compneuropy_git_log[0].replace("\n", "") f.write(f"# {compneuropy_commit}") + + +def _find_folder_with_prefix(base_path, prefix): + """ + Find a folder with a specified prefix in the given base path. + + Args: + base_path (str): + Path to the base directory to search in. + prefix (str): + Prefix of the folder to find. + + Returns: + str or None: + Name of the folder with the specified prefix if found, otherwise None. + """ + # List all items (files and directories) in the base_path + items = os.listdir(base_path) + + # Iterate through the items to find a folder with the specified prefix + for item in items: + item_path = os.path.join(base_path, item) + + # Check if the item is a directory and its name starts with the given prefix + if os.path.isdir(item_path) and item.startswith(prefix): + return item + + # If no folder with the specified prefix is found, return None + return None + + +class Logger: + """ + Logger singleton class to log the progress of the model configuration. Has to be + initialized with the path to the log file once.""" + + _instance = None + _log_file: str | None + _call_stack = "" + + def __new__(cls, log_file: str | None = None): + """ + Args: + log_file (str): + Path to the log file + """ + if cls._instance is None: + cls._instance = super(Logger, cls).__new__(cls) + cls._log_file = log_file + if log_file is not None: + with open(log_file, "w") as f: + print("Logger file:", file=f) + return cls._instance + + def log(self, txt): + """ + Log the given text to the log file. Only if the log file was given during + the first initialization. + + Args: + txt (str): + Text to be logged + """ + if self._log_file is None: + return + + _, call_stack = self._trace_calls() + + if call_stack == self._call_stack: + txt = f"{textwrap.indent(str(txt), ' ')}" + else: + txt = f"\n[{call_stack}]:\n{textwrap.indent(str(txt), ' ')}" + + self._call_stack = call_stack + + with open(self._log_file, "a") as f: + print(txt, file=f) + + def _trace_calls(self): + # Get the call stack + stack = inspect.stack() + + call_stack = [] + for frame in stack: + # Get the function name + function_name = frame.function + # Check if it's a method of a class by looking for 'self' or 'cls' + locals = frame.frame.f_locals + if "self" in locals: + class_name = locals["self"].__class__.__name__ + full_name = f"{class_name}.{function_name}" + elif "cls" in locals: + class_name = locals["cls"].__name__ + full_name = f"{class_name}.{function_name}" + else: + # If function_name is '', replace it with the module name + if function_name == "": + module_name = frame.frame.f_globals["__name__"] + full_name = f"{module_name}" + else: + full_name = function_name + call_stack.append(full_name) + + # Remove the first two elements of the call stack, which are the functions of + # the Logger class + call_stack = call_stack[2:] + + # Get the name of the current function + current_function_name = call_stack[0] + + # Reverse the call stack to get the order of the calls + call_stack = call_stack[::-1] + + # Convert the call stack to a string + call_stack = " -> ".join(call_stack) + + return current_function_name, call_stack