From b54c0b9571f3e1f17b9c3707e8abf0255664637b Mon Sep 17 00:00:00 2001 From: olimaol Date: Fri, 12 Jan 2024 15:38:26 +0100 Subject: [PATCH 01/44] updated site --- docs/index.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index ae58f2d..121d329 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,4 +1,6 @@ # Documentation for CompNeuroPy CompNeuroPy is an assisting Python package for working with ANNarchy ([GitHub](https://github.com/ANNarchy/ANNarchy), [documentation](https://annarchy.github.io/), [DOI](https://doi.org/10.5281/zenodo.6415039)). It is intended to help structure simulations with computational neuroscience models in a modular way and to make them more easily replicable. -People who want to start working with ANNarchy are strongly recommended to first learn exclusively the functionality of ANNarchy. CompNeuroPy uses very few features of ANNarchy at this time. But also adds various special features. \ No newline at end of file +People who want to start working with ANNarchy are strongly recommended to first learn exclusively the functionality of ANNarchy. CompNeuroPy uses very few features of ANNarchy at this time. But also adds various special features. + +- v1.0.0: [![DOI](https://zenodo.org/badge/422217136.svg)](https://zenodo.org/doi/10.5281/zenodo.10497610) \ No newline at end of file From f159e7d90d2e841fd978f3f557791a8ae4374ea8 Mon Sep 17 00:00:00 2001 From: olimaol Date: Fri, 12 Jan 2024 15:38:54 +0100 Subject: [PATCH 02/44] updated site --- site/additional/analysis_functions/index.html | 34 ++++++------ site/additional/extra_functions/index.html | 38 ++++++------- site/additional/model_functions/index.html | 8 +-- .../simulation_functions/index.html | 8 +-- .../simulation_requirements/index.html | 6 +- site/additional/system_functions/index.html | 10 ++-- site/built_in/models/index.html | 14 ++--- site/built_in/neuron_models/index.html | 52 +++++++++--------- site/built_in/synapse_models/index.html | 2 +- site/index.html | 3 + site/main/dbs_stimulator/index.html | 10 ++-- site/main/define_experiment/index.html | 10 ++-- site/main/generate_models/index.html | 10 ++-- site/main/generate_simulations/index.html | 16 +++--- site/main/monitors_recordings/index.html | 32 +++++------ site/main/optimize_neuron/index.html | 6 +- site/search/search_index.json | 2 +- site/sitemap.xml.gz | Bin 127 -> 127 bytes 18 files changed, 132 insertions(+), 129 deletions(-) diff --git a/site/additional/analysis_functions/index.html b/site/additional/analysis_functions/index.html index ef5fb8d..f98a77f 100644 --- a/site/additional/analysis_functions/index.html +++ b/site/additional/analysis_functions/index.html @@ -1394,7 +1394,7 @@

TODO: CHeck if there are memory issues with large recordings or many subplots.

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1787
 1788
 1789
@@ -3255,7 +3255,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1794
 1795
 1796
@@ -3523,7 +3523,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
15
 16
 17
@@ -3676,7 +3676,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
37
 38
 39
@@ -3848,7 +3848,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
69
 70
 71
@@ -4060,7 +4060,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
136
 137
 138
@@ -4496,7 +4496,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
275
 276
 277
@@ -4753,7 +4753,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
628
 629
 630
@@ -5108,7 +5108,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
726
 727
 728
@@ -5301,7 +5301,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1508
 1509
 1510
@@ -5445,7 +5445,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1538
 1539
 1540
@@ -5614,7 +5614,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1567
 1568
 1569
@@ -5815,7 +5815,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1602
 1603
 1604
@@ -6072,7 +6072,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1687
 1688
 1689
@@ -6195,7 +6195,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1705
 1706
 1707
@@ -6304,7 +6304,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1723
 1724
 1725
@@ -6431,7 +6431,7 @@ 

- Source code in src/CompNeuroPy/analysis_functions.py + Source code in CompNeuroPy/analysis_functions.py
1750
 1751
 1752
diff --git a/site/additional/extra_functions/index.html b/site/additional/extra_functions/index.html
index 3f2820b..84e9dc4 100644
--- a/site/additional/extra_functions/index.html
+++ b/site/additional/extra_functions/index.html
@@ -1454,7 +1454,7 @@ 

with a value between 0 and 1 to get the corresponding rgb value.

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
127
 128
 129
@@ -1653,7 +1653,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
133
 134
 135
@@ -1772,7 +1772,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
148
 149
 150
@@ -1889,7 +1889,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
169
 170
 171
@@ -1947,7 +1947,7 @@ 

Class to create a decision tree.

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
350
 351
 352
@@ -2171,7 +2171,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
355
 356
 357
@@ -2292,7 +2292,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
364
 365
 366
@@ -2429,7 +2429,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
390
 391
 392
@@ -2529,7 +2529,7 @@ 

Class to create a node in a decision tree.

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
452
 453
 454
@@ -2755,7 +2755,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
459
 460
 461
@@ -2896,7 +2896,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
485
 486
 487
@@ -2985,7 +2985,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
502
 503
 504
@@ -3069,7 +3069,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
14
 15
 16
@@ -3171,7 +3171,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
28
 29
 30
@@ -3320,7 +3320,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
59
 60
 61
@@ -3384,7 +3384,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
78
 79
 80
@@ -3511,7 +3511,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
 98
  99
 100
@@ -3716,7 +3716,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
203
 204
 205
@@ -4000,7 +4000,7 @@ 

- Source code in src/CompNeuroPy/extra_functions.py + Source code in CompNeuroPy/extra_functions.py
519
 520
 521
diff --git a/site/additional/model_functions/index.html b/site/additional/model_functions/index.html
index 8a42686..57f6809 100644
--- a/site/additional/model_functions/index.html
+++ b/site/additional/model_functions/index.html
@@ -1221,7 +1221,7 @@ 

- Source code in src/CompNeuroPy/model_functions.py + Source code in CompNeuroPy/model_functions.py
13
 14
 15
@@ -1320,7 +1320,7 @@ 

- Source code in src/CompNeuroPy/model_functions.py + Source code in CompNeuroPy/model_functions.py
38
 39
 40
@@ -1386,7 +1386,7 @@ 

- Source code in src/CompNeuroPy/model_functions.py + Source code in CompNeuroPy/model_functions.py
49
 50
 51
@@ -1507,7 +1507,7 @@ 

- Source code in src/CompNeuroPy/model_functions.py + Source code in CompNeuroPy/model_functions.py
64
 65
 66
diff --git a/site/additional/simulation_functions/index.html b/site/additional/simulation_functions/index.html
index bc2facb..98abd9d 100644
--- a/site/additional/simulation_functions/index.html
+++ b/site/additional/simulation_functions/index.html
@@ -1261,7 +1261,7 @@ 

- Source code in src/CompNeuroPy/simulation_functions.py + Source code in CompNeuroPy/simulation_functions.py
 4
  5
  6
@@ -1425,7 +1425,7 @@ 

- Source code in src/CompNeuroPy/simulation_functions.py + Source code in CompNeuroPy/simulation_functions.py
46
 47
 48
@@ -1628,7 +1628,7 @@ 

- Source code in src/CompNeuroPy/simulation_functions.py + Source code in CompNeuroPy/simulation_functions.py
 65
  66
  67
@@ -1869,7 +1869,7 @@ 

- Source code in src/CompNeuroPy/simulation_functions.py + Source code in CompNeuroPy/simulation_functions.py
118
 119
 120
diff --git a/site/additional/simulation_requirements/index.html b/site/additional/simulation_requirements/index.html
index 48511b6..4dba76c 100644
--- a/site/additional/simulation_requirements/index.html
+++ b/site/additional/simulation_requirements/index.html
@@ -1141,7 +1141,7 @@ 

- Source code in src/CompNeuroPy/simulation_requirements.py + Source code in CompNeuroPy/simulation_requirements.py
 4
  5
  6
@@ -1294,7 +1294,7 @@ 

- Source code in src/CompNeuroPy/simulation_requirements.py + Source code in CompNeuroPy/simulation_requirements.py
 9
 10
 11
@@ -1371,7 +1371,7 @@ 

- Source code in src/CompNeuroPy/simulation_requirements.py + Source code in CompNeuroPy/simulation_requirements.py
25
 26
 27
diff --git a/site/additional/system_functions/index.html b/site/additional/system_functions/index.html
index 1bba35d..039bc76 100644
--- a/site/additional/system_functions/index.html
+++ b/site/additional/system_functions/index.html
@@ -1195,7 +1195,7 @@ 

- Source code in src/CompNeuroPy/system_functions.py + Source code in CompNeuroPy/system_functions.py
 9
 10
 11
@@ -1328,7 +1328,7 @@ 

- Source code in src/CompNeuroPy/system_functions.py + Source code in CompNeuroPy/system_functions.py
37
 38
 39
@@ -1516,7 +1516,7 @@ 

- Source code in src/CompNeuroPy/system_functions.py + Source code in CompNeuroPy/system_functions.py
 82
  83
  84
@@ -1725,7 +1725,7 @@ 

- Source code in src/CompNeuroPy/system_functions.py + Source code in CompNeuroPy/system_functions.py
132
 133
 134
@@ -1883,7 +1883,7 @@ 

- Source code in src/CompNeuroPy/system_functions.py + Source code in CompNeuroPy/system_functions.py
186
 187
 188
diff --git a/site/built_in/models/index.html b/site/built_in/models/index.html
index 40eb514..e84dad9 100644
--- a/site/built_in/models/index.html
+++ b/site/built_in/models/index.html
@@ -1350,7 +1350,7 @@ 

- Source code in src/CompNeuroPy/full_models/bgm_22/bgm.py + Source code in CompNeuroPy/full_models/bgm_22/bgm.py
 21
  22
  23
@@ -2406,7 +2406,7 @@ 

- Source code in src/CompNeuroPy/full_models/bgm_22/bgm.py + Source code in CompNeuroPy/full_models/bgm_22/bgm.py
 52
  53
  54
@@ -2636,7 +2636,7 @@ 

- Source code in src/CompNeuroPy/full_models/bgm_22/bgm.py + Source code in CompNeuroPy/full_models/bgm_22/bgm.py
183
 184
 185
@@ -2724,7 +2724,7 @@ 

creates/compiles the network.

- Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py + Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
11
 12
 13
@@ -2969,7 +2969,7 @@ 

- Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py + Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
18
 19
 20
@@ -3091,7 +3091,7 @@ 

optionally creates/compiles the network.

- Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py + Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
 69
  70
  71
@@ -3336,7 +3336,7 @@ 

- Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py + Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
 76
  77
  78
diff --git a/site/built_in/neuron_models/index.html b/site/built_in/neuron_models/index.html
index 92dc1dc..eacaf8b 100644
--- a/site/built_in/neuron_models/index.html
+++ b/site/built_in/neuron_models/index.html
@@ -1726,7 +1726,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py + Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
 7
  8
  9
@@ -2027,7 +2027,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py + Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
 99
 100
 101
@@ -2268,7 +2268,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py + Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
180
 181
 182
@@ -2451,7 +2451,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py + Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
227
 228
 229
@@ -2676,7 +2676,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py + Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
279
 280
 281
@@ -2883,7 +2883,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
332
 333
 334
@@ -3106,7 +3106,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
400
 401
 402
@@ -3310,7 +3310,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
472
 473
 474
@@ -3457,7 +3457,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
561
 562
 563
@@ -3615,7 +3615,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
514
 515
 516
@@ -3933,7 +3933,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
  7
   8
   9
@@ -4398,7 +4398,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
107
 108
 109
@@ -4900,7 +4900,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
218
 219
 220
@@ -5436,7 +5436,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
339
 340
 341
@@ -5962,7 +5962,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
461
 462
 463
@@ -6500,7 +6500,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
582
 583
 584
@@ -6964,7 +6964,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
 41
  42
  43
@@ -7335,7 +7335,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
122
 123
 124
@@ -7730,7 +7730,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
216
 217
 218
@@ -8172,7 +8172,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
305
 306
 307
@@ -8677,7 +8677,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
411
 412
 413
@@ -9217,7 +9217,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
527
 528
 529
@@ -9769,7 +9769,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
649
 650
 651
@@ -10368,7 +10368,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
771
 772
 773
@@ -11030,7 +11030,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
 917
  918
  919
@@ -11661,7 +11661,7 @@ 

- Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py + Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
1071
 1072
 1073
diff --git a/site/built_in/synapse_models/index.html b/site/built_in/synapse_models/index.html
index 90250d8..d267c9c 100644
--- a/site/built_in/synapse_models/index.html
+++ b/site/built_in/synapse_models/index.html
@@ -1104,7 +1104,7 @@ 

- Source code in src/CompNeuroPy/synapse_models/synapse_models.py + Source code in CompNeuroPy/synapse_models/synapse_models.py
 4
  5
  6
diff --git a/site/index.html b/site/index.html
index 5976fac..f7b32af 100644
--- a/site/index.html
+++ b/site/index.html
@@ -986,6 +986,9 @@
 

Documentation for CompNeuroPy#

CompNeuroPy is an assisting Python package for working with ANNarchy (GitHub, documentation, DOI). It is intended to help structure simulations with computational neuroscience models in a modular way and to make them more easily replicable. People who want to start working with ANNarchy are strongly recommended to first learn exclusively the functionality of ANNarchy. CompNeuroPy uses very few features of ANNarchy at this time. But also adds various special features.

+
    +
  • v1.0.0: DOI
  • +
diff --git a/site/main/dbs_stimulator/index.html b/site/main/dbs_stimulator/index.html index af672b3..2f818a8 100644 --- a/site/main/dbs_stimulator/index.html +++ b/site/main/dbs_stimulator/index.html @@ -1188,7 +1188,7 @@

- Source code in src/CompNeuroPy/dbs.py + Source code in CompNeuroPy/dbs.py
 944
  945
  946
@@ -3223,7 +3223,7 @@ 

- Source code in src/CompNeuroPy/dbs.py + Source code in CompNeuroPy/dbs.py
 994
  995
  996
@@ -3776,7 +3776,7 @@ 

- Source code in src/CompNeuroPy/dbs.py + Source code in CompNeuroPy/dbs.py
1618
 1619
 1620
@@ -3985,7 +3985,7 @@ 

Deactivate DBS.

- Source code in src/CompNeuroPy/dbs.py + Source code in CompNeuroPy/dbs.py
1750
 1751
 1752
@@ -4086,7 +4086,7 @@ 

- Source code in src/CompNeuroPy/dbs.py + Source code in CompNeuroPy/dbs.py
1763
 1764
 1765
diff --git a/site/main/define_experiment/index.html b/site/main/define_experiment/index.html
index 7732733..fb5c198 100644
--- a/site/main/define_experiment/index.html
+++ b/site/main/define_experiment/index.html
@@ -1201,7 +1201,7 @@ 

- Source code in src/CompNeuroPy/experiment.py + Source code in CompNeuroPy/experiment.py
  7
   8
   9
@@ -1613,7 +1613,7 @@ 

- Source code in src/CompNeuroPy/experiment.py + Source code in CompNeuroPy/experiment.py
41
 42
 43
@@ -1761,7 +1761,7 @@ 

- Source code in src/CompNeuroPy/experiment.py + Source code in CompNeuroPy/experiment.py
 56
  57
  58
@@ -1931,7 +1931,7 @@ 

- Source code in src/CompNeuroPy/experiment.py + Source code in CompNeuroPy/experiment.py
108
 109
 110
@@ -2037,7 +2037,7 @@ 

- Source code in src/CompNeuroPy/experiment.py + Source code in CompNeuroPy/experiment.py
170
 171
 172
diff --git a/site/main/generate_models/index.html b/site/main/generate_models/index.html
index c9b6aa5..adce4be 100644
--- a/site/main/generate_models/index.html
+++ b/site/main/generate_models/index.html
@@ -1385,7 +1385,7 @@ 

- Source code in src/CompNeuroPy/generate_model.py + Source code in CompNeuroPy/generate_model.py
 10
  11
  12
@@ -2391,7 +2391,7 @@ 

- Source code in src/CompNeuroPy/generate_model.py + Source code in CompNeuroPy/generate_model.py
41
 42
 43
@@ -2535,7 +2535,7 @@ 

- Source code in src/CompNeuroPy/generate_model.py + Source code in CompNeuroPy/generate_model.py
148
 149
 150
@@ -2677,7 +2677,7 @@ 

- Source code in src/CompNeuroPy/generate_model.py + Source code in CompNeuroPy/generate_model.py
186
 187
 188
@@ -2873,7 +2873,7 @@ 

- Source code in src/CompNeuroPy/generate_model.py + Source code in CompNeuroPy/generate_model.py
252
 253
 254
diff --git a/site/main/generate_simulations/index.html b/site/main/generate_simulations/index.html
index 8403b3b..e7361ea 100644
--- a/site/main/generate_simulations/index.html
+++ b/site/main/generate_simulations/index.html
@@ -1408,7 +1408,7 @@ 

Class for generating a CompNeuroPy simulation.

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
  8
   9
  10
@@ -2043,7 +2043,7 @@ 

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
15
 16
 17
@@ -2221,7 +2221,7 @@ 

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
 77
  78
  79
@@ -2419,7 +2419,7 @@ 

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
174
 175
 176
@@ -2574,7 +2574,7 @@ 

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
229
 230
 231
@@ -2751,7 +2751,7 @@ 

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
256
 257
 258
@@ -3215,7 +3215,7 @@ 

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
279
 280
 281
@@ -3398,7 +3398,7 @@ 

- Source code in src/CompNeuroPy/generate_simulation.py + Source code in CompNeuroPy/generate_simulation.py
322
 323
 324
diff --git a/site/main/monitors_recordings/index.html b/site/main/monitors_recordings/index.html
index e2f8d85..1541b30 100644
--- a/site/main/monitors_recordings/index.html
+++ b/site/main/monitors_recordings/index.html
@@ -1610,7 +1610,7 @@ 

Class to bring together ANNarchy monitors into one object.

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
 17
  18
  19
@@ -2875,7 +2875,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
22
 23
 24
@@ -2953,7 +2953,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
78
 79
 80
@@ -3041,7 +3041,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
 96
  97
  98
@@ -3215,7 +3215,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
114
 115
 116
@@ -3401,7 +3401,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
184
 185
 186
@@ -3496,7 +3496,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
209
 210
 211
@@ -3605,7 +3605,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
241
 242
 243
@@ -3731,7 +3731,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
272
 273
 274
@@ -3807,7 +3807,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
624
 625
 626
@@ -4575,7 +4575,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
625
 626
 627
@@ -4647,7 +4647,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
635
 636
 637
@@ -4752,7 +4752,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
661
 662
 663
@@ -4851,7 +4851,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
688
 689
 690
@@ -4963,7 +4963,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
698
 699
 700
@@ -5125,7 +5125,7 @@ 

- Source code in src/CompNeuroPy/monitors.py + Source code in CompNeuroPy/monitors.py
719
 720
 721
diff --git a/site/main/optimize_neuron/index.html b/site/main/optimize_neuron/index.html
index 904026a..4a09d93 100644
--- a/site/main/optimize_neuron/index.html
+++ b/site/main/optimize_neuron/index.html
@@ -1429,7 +1429,7 @@ 

This class is used to optimize neuron models with ANNarchy.

- Source code in src/CompNeuroPy/opt_neuron.py + Source code in CompNeuroPy/opt_neuron.py
 36
  37
  38
@@ -3331,7 +3331,7 @@ 

- Source code in src/CompNeuroPy/opt_neuron.py + Source code in CompNeuroPy/opt_neuron.py
 43
  44
  45
@@ -3746,7 +3746,7 @@ 

- Source code in src/CompNeuroPy/opt_neuron.py + Source code in CompNeuroPy/opt_neuron.py
796
 797
 798
diff --git a/site/search/search_index.json b/site/search/search_index.json
index 24f0fd7..121035e 100644
--- a/site/search/search_index.json
+++ b/site/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Documentation for CompNeuroPy","text":"

CompNeuroPy is an assisting Python package for working with ANNarchy (GitHub, documentation, DOI). It is intended to help structure simulations with computational neuroscience models in a modular way and to make them more easily replicable. People who want to start working with ANNarchy are strongly recommended to first learn exclusively the functionality of ANNarchy. CompNeuroPy uses very few features of ANNarchy at this time. But also adds various special features.

"},{"location":"installation/","title":"Installation","text":"

From PyPI using pip:

pip install CompNeuroPy\n

With downloaded source code; using pip in the top-level directory of the downloaded source code:

pip install .\n

or in development mode:

pip install -e .\n

You must install ANNarchy separately, best after CompNeuroPy.

git clone https://github.com/ANNarchy/ANNarchy\ncd ANNarchy\ngit checkout develop\npip install .\ncd ..\nrm -rf ANNarchy\n

Optional install torch, sbi, and hyperopt to be able to use OptNeuron

pip install torch sbi hyperopt\n

"},{"location":"license/","title":"License","text":""},{"location":"license/#mit-license","title":"MIT License","text":"

Copyright (c) 2022 Oliver Maith

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

"},{"location":"additional/analysis_functions/","title":"Analysis Functions","text":""},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.PlotRecordings","title":"PlotRecordings","text":"

Plot recordings from CompNeuroMonitors.

TODO: CHeck if there are memory issues with large recordings or many subplots.

Source code in src/CompNeuroPy/analysis_functions.py
class PlotRecordings:\n    \"\"\"\n    Plot recordings from CompNeuroMonitors.\n\n    TODO: CHeck if there are memory issues with large recordings or many subplots.\n    \"\"\"\n\n    @check_types()\n    def __init__(\n        self,\n        figname: str,\n        recordings: list[dict],\n        recording_times: RecordingTimes,\n        shape: tuple[int, int],\n        plan: dict,\n        chunk: int = 0,\n        time_lim: None | tuple[float, float] = None,\n        dpi: int = 300,\n    ) -> None:\n        \"\"\"\n        Create and save the plot.\n\n        Args:\n            figname (str):\n                The name of the figure to be saved.\n            recordings (list):\n                A recordings list obtained from CompNeuroMonitors.\n            recording_times (RecordingTimes):\n                The RecordingTimes object containing the recording times obtained from\n                CompNeuroMonitors.\n            shape (tuple):\n                The shape of the figure. (number of rows, number of columns)\n            plan (dict):\n                Defines which recordings are plotted in which subplot and how. The plan\n                has to contain the following keys: \"position\", \"compartment\",\n                \"variable\", \"format\". The values of the keys have to be lists of the\n                same length. The values of the key \"position\" have to be integers\n                between 1 and the number of subplots (defined by shape). The values of\n                the key \"compartment\" have to be the names of the model compartments as\n                strings. The values of the key \"variable\" have to be strings containing\n                the names of the recorded variables or equations using the recorded\n                variables. The values of the key \"format\" have to be strings defining\n                how the recordings are plotted. The following formats are available for\n                spike recordings: \"raster\", \"mean\", \"hybrid\", \"interspike\". The\n                following formats are available for other recordings: \"line\",\n                \"line_mean\", \"matrix\", \"matrix_mean\".\n            chunk (int, optional):\n                The chunk of the recordings to be plotted. Default: 0.\n            time_lim (tuple, optional):\n                Defines the x-axis for all subplots. The tuple contains two\n                numbers: start and end time in ms. The times have to be\n                within the chunk. Default: None, i.e., the whole chunk is plotted.\n            dpi (int, optional):\n                The dpi of the saved figure. Default: 300.\n        \"\"\"\n        ### print start message\n        print(f\"Generate fig {figname}\", end=\"... \", flush=True)\n\n        ### set attributes\n        self.figname = figname\n        self.recordings = recordings\n        self.recording_times = recording_times\n        self.shape = shape\n        self.plan = plan\n        self.chunk = chunk\n        self.time_lim = time_lim\n        self.dpi = dpi\n\n        ### get available compartments (from recordings) and recorded variables for each\n        ### compartment\n        (\n            self._compartment_list,\n            self._compartment_recordings_dict,\n        ) = self._get_compartment_recordings()\n\n        ### check plan keys and values\n        self._check_plan()\n\n        ### get start and end time for plotting and timestep\n        self._start_time, self._end_time, self._time_step = self._get_start_end_time()\n\n        ### get compbined time array for recordings of each compartment\n        self._time_arr_list = self._get_time_arr_list()\n\n        ### get data from recordings for each subplot\n        self._raw_data_list = self._get_raw_data_list()\n\n        ### create plot\n        self._plot()\n\n        ### print end message\n        print(\"Done\\n\")\n\n    def _get_compartment_recordings(self):\n        \"\"\"\n        Get available compartment names from recordings.\n        Get recorded variables (names) for each compartment.\n\n        Returns:\n            compartment_list (list):\n                List of compartment names.\n            compartment_recordings_dict (dict):\n                Dictionary with compartment names as keys and list of recorded variables\n                as values.\n        \"\"\"\n        ### check if chunk is valid\n        if self.chunk >= len(self.recordings) or self.chunk < 0:\n            print(\n                f\"\\nERROR PlotRecordings: chunk {self.chunk} is not valid.\\n\"\n                f\"Number of chunks: {len(self.recordings)}\\n\"\n            )\n            quit()\n\n        ### get compartment names and recorded variables for each compartment\n        compartment_list = []\n        compartment_recordings_dict = {}\n        for recordings_key in self.recordings[self.chunk].keys():\n            if \";\" not in recordings_key:\n                continue\n\n            ### get compartment\n            compartment, recorded_variable = recordings_key.split(\";\")\n            if compartment not in compartment_list:\n                compartment_list.append(compartment)\n                compartment_recordings_dict[compartment] = []\n\n            ### get recordings for compartment\n            if recorded_variable != \"period\" and recorded_variable != \"parameter_dict\":\n                compartment_recordings_dict[compartment].append(recorded_variable)\n\n        return compartment_list, compartment_recordings_dict\n\n    def _check_plan(self):\n        \"\"\"\n        Check if plan is valid.\n        \"\"\"\n\n        ### check if plan keys are valid\n        valid_keys = [\"position\", \"compartment\", \"variable\", \"format\"]\n        for key in self.plan.keys():\n            if key not in valid_keys:\n                print(\n                    f\"\\nERROR PlotRecordings: plan key {key} is not valid.\\n\"\n                    f\"Valid keys are {valid_keys}.\\n\"\n                )\n                quit()\n\n        ### check if plan values are valid (have same length)\n        for key in self.plan.keys():\n            if len(self.plan[key]) != len(self.plan[\"position\"]):\n                print(\n                    f\"\\nERROR PlotRecordings: plan value of key '{key}' has not the same length as plan value of key 'position'.\\n\"\n                )\n                quit()\n\n        ### check if plan positions are valid\n        ### check if min and max are valid\n        if get_minimum(self.plan[\"position\"]) < 1:\n            print(\n                f\"\\nERROR PlotRecordings: plan position has to be >= 1.\\n\"\n                f\"plan position: {self.plan['position']}\\n\"\n            )\n            quit()\n        if get_maximum(self.plan[\"position\"]) > self.shape[0] * self.shape[1]:\n            print(\n                f\"\\nERROR PlotRecordings: plan position has to be <= shape[0] * shape[1].\\n\"\n                f\"plan position: {self.plan['position']}\\n\"\n                f\"shape: {self.shape}\\n\"\n            )\n            quit()\n        ### check if plan positions are unique\n        if len(np.unique(self.plan[\"position\"])) != len(self.plan[\"position\"]):\n            print(\n                f\"\\nERROR PlotRecordings: plan position has to be unique.\\n\"\n                f\"plan position: {self.plan['position']}\\n\"\n            )\n            quit()\n\n        ### check if plan compartments are valid\n        for compartment in self.plan[\"compartment\"]:\n            if compartment not in self._compartment_list:\n                print(\n                    f\"\\nERROR PlotRecordings: plan compartment {compartment} is not valid.\\n\"\n                    f\"Valid compartments are {self._compartment_list}.\\n\"\n                )\n                quit()\n\n        ### check if plan variables are valid\n        for plot_idx in range(len(self.plan[\"variable\"])):\n            compartment = self.plan[\"compartment\"][plot_idx]\n            variable: str = self.plan[\"variable\"][plot_idx]\n            ### check if variable contains a mathematical expression\n            if \"+\" in variable or \"-\" in variable or \"*\" in variable or \"/\" in variable:\n                ### separate variables\n                variable = variable.replace(\" \", \"\")\n                variable = variable.replace(\"+\", \" \")\n                variable = variable.replace(\"-\", \" \")\n                variable = variable.replace(\"*\", \" \")\n                variable = variable.replace(\"/\", \" \")\n                variables_list = variable.split(\" \")\n                ### remove numbers\n                variables_list = [var for var in variables_list if not var.isdigit()]\n                ### spike and axon_spike are not allowed in equations\n                if \"spike\" in variables_list or \"axon_spike\" in variables_list:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan variable {variable} is not valid.\\n\"\n                        f\"Variables 'spike' and 'axon_spike' are not allowed in equations.\\n\"\n                    )\n                    quit()\n            else:\n                variables_list = [variable]\n            ### check if variables are valid\n            for var in variables_list:\n                if var not in self._compartment_recordings_dict[compartment]:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan variable {var} is not valid for compartment {compartment}.\\n\"\n                        f\"Valid variables are {self._compartment_recordings_dict[compartment]}.\\n\"\n                    )\n                    quit()\n\n        ### check if plan formats are valid\n        valid_formats_spike = [\"raster\", \"mean\", \"hybrid\", \"interspike\", \"cv\"]\n        valid_formats_other = [\"line\", \"line_mean\", \"matrix\", \"matrix_mean\"]\n        for plot_idx in range(len(self.plan[\"format\"])):\n            variable = self.plan[\"variable\"][plot_idx]\n            format = self.plan[\"format\"][plot_idx]\n            ### check if format is valid\n            if variable == \"spike\" or variable == \"axon_spike\":\n                if format not in valid_formats_spike:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan format {format} is not valid for variable {variable}.\\n\"\n                        f\"Valid formats are {valid_formats_spike}.\\n\"\n                    )\n                    quit()\n            else:\n                if format not in valid_formats_other:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan format {format} is not valid for variable {variable}.\\n\"\n                        f\"Valid formats are {valid_formats_other}.\\n\"\n                    )\n                    quit()\n\n    def _get_start_end_time(self):\n        \"\"\"\n        Check if time_lim is given and valid. If it's not given get it from recordings.\n        Get timestep from recordings.\n\n        Returns:\n            start_time (float):\n                The start time of the recordings.\n            end_time (float):\n                The end time of the recordings.\n            time_step (float):\n                The timestep of the recordings.\n\n        Raises:\n            ValueError: If given time_lim is not within the chunk.\n        \"\"\"\n\n        chunk_time_lims = self.recording_times.time_lims(chunk=self.chunk)\n        ### check if time_lim is given\n        if isinstance(self.time_lim, type(None)):\n            ### get start and end time from recording_times\n            start_time, end_time = chunk_time_lims\n        else:\n            ### check if time_lim is within chunk\n            if (\n                self.time_lim[0] < chunk_time_lims[0]\n                or self.time_lim[1] > chunk_time_lims[1]\n            ):\n                raise ValueError(\n                    f\"\\nERROR PlotRecordings: time_lim {self.time_lim} is not within chunk.\\n\"\n                    f\"chunk time lims: {chunk_time_lims[0]} - {chunk_time_lims[1]}\\n\"\n                )\n            start_time, end_time = self.time_lim\n\n        ### get timestep\n        time_step = self.recordings[self.chunk][\"dt\"]\n\n        return start_time, end_time, time_step\n\n    def _get_time_arr_list(self):\n        \"\"\"\n        Get combined time array for each subplot of plan.\n\n        Returns:\n            time_arr_list (list):\n                List with time arrays for each subplot of plan.\n        \"\"\"\n        ### loop over compartments of plan\n        time_arr_dict = {}\n        for compartment in np.unique(self.plan[\"compartment\"]):\n            actual_period = self.recordings[self.chunk][f\"{compartment};period\"]\n\n            ### get time array for each recording period of the chunk\n            time_arr_period_list = []\n            nr_periods = self.recording_times._get_nr_periods(\n                chunk=self.chunk, compartment=compartment\n            )\n            for period in range(nr_periods):\n                time_lims = self.recording_times.time_lims(\n                    chunk=self.chunk, compartment=compartment, period=period\n                )\n                start_time_preiod = time_lims[0]\n                end_time_period = round(\n                    time_lims[1] + actual_period, get_number_of_decimals(actual_period)\n                )\n                time_arr_period_list.append(\n                    np.arange(start_time_preiod, end_time_period, actual_period)\n                )\n\n            ### combine time arrays of periods\n            time_arr_dict[compartment] = np.concatenate(time_arr_period_list)\n\n        ### get time array for each subplot of plan\n        time_arr_list = []\n        for plot_idx in range(len(self.plan[\"position\"])):\n            compartment = self.plan[\"compartment\"][plot_idx]\n            time_arr_list.append(time_arr_dict[compartment])\n\n        return time_arr_list\n\n    def _get_raw_data_list(self):\n        \"\"\"\n        Get raw data for each subplot of plan.\n\n        Returns:\n            data_list (dict):\n                List with data for each subplot of plan.\n        \"\"\"\n        data_list = []\n        ### loop over subplots of plan\n        for plot_idx in range(len(self.plan[\"position\"])):\n            compartment = self.plan[\"compartment\"][plot_idx]\n            variable: str = self.plan[\"variable\"][plot_idx]\n            ### check if variable is equation\n            if \"+\" in variable or \"-\" in variable or \"*\" in variable or \"/\" in variable:\n                ### get the values of the recorded variables of the compartment, store\n                ### them in dict\n                value_dict = {\n                    rec_var_name: self.recordings[self.chunk][\n                        f\"{compartment};{rec_var_name}\"\n                    ]\n                    for rec_var_name in self._compartment_recordings_dict[compartment]\n                }\n                ### evaluate equation with these values\n                variable_data = ef.evaluate_expression_with_dict(\n                    expression=variable, value_dict=value_dict\n                )\n            else:\n                ### get data from recordings\n                variable_data = self.recordings[self.chunk][f\"{compartment};{variable}\"]\n            ### append data to data_list\n            data_list.append(variable_data)\n\n        return data_list\n\n    def _plot(self):\n        \"\"\"\n        Create plot.\n        \"\"\"\n        ### create figure\n        plt.figure(figsize=([6.4 * self.shape[1], 4.8 * self.shape[0]]))\n\n        ### loop over subplots of plan\n        for plot_idx in range(len(self.plan[\"position\"])):\n            ### create subplot\n            plt.subplot(self.shape[0], self.shape[1], self.plan[\"position\"][plot_idx])\n\n            ### fill subplot\n            self._fill_subplot(plot_idx)\n\n        ### save figure\n        plt.tight_layout()\n        figname_parts = self.figname.split(\"/\")\n        if len(figname_parts) > 1:\n            save_dir = \"/\".join(figname_parts[:-1])\n            sf.create_dir(save_dir)\n        plt.savefig(self.figname, dpi=self.dpi)\n        plt.close()\n\n    def _fill_subplot(self, plot_idx):\n        \"\"\"\n        Fill subplot with data.\n\n        Args:\n            plot_idx (int):\n                The index of the subplot in the plan.\n        \"\"\"\n        variable: str = self.plan[\"variable\"][plot_idx]\n\n        ### general subplot settings\n        plt.xlabel(\"time [ms]\")\n        plt.xlim(self._start_time, self._end_time)\n\n        if variable == \"spike\" or variable == \"axon_spike\":\n            ### spike recordings\n            self._fill_subplot_spike(plot_idx)\n        else:\n            ### other (array) recordings\n            self._fill_subplot_other(plot_idx)\n\n    def _fill_subplot_spike(self, plot_idx):\n        \"\"\"\n        Fill subplot with spike data.\n\n        Args:\n            plot_idx (int):\n                The index of the subplot in the plan.\n        \"\"\"\n        ### get data\n        compartment = self.plan[\"compartment\"][plot_idx]\n        format: str = self.plan[\"format\"][plot_idx]\n        data = self._raw_data_list[plot_idx]\n\n        ### get spike times and ranks\n        spike_times, spike_ranks = my_raster_plot(data)\n        spike_times = spike_times * self._time_step\n\n        ### get spikes within time_lims\n        mask: np.ndarray = (\n            (spike_times >= self._start_time).astype(int)\n            * (spike_times <= self._end_time).astype(int)\n        ).astype(bool)\n\n        ### check if there are no spikes\n        if mask.size == 0:\n            ### set title\n            plt.title(f\"Spikes {compartment}\")\n            ### print warning\n            print(\n                f\"\\n  WARNING PlotRecordings: {compartment} does not contain any spikes in the given time interval.\"\n            )\n            ### plot text\n            plt.text(\n                0.5,\n                0.5,\n                f\"{compartment} does not contain any spikes.\",\n                va=\"center\",\n                ha=\"center\",\n            )\n            plt.xticks([])\n            plt.yticks([])\n            plt.xlim(0, 1)\n            plt.xlabel(\"\")\n            return\n\n        ### plot raster plot\n        if format == \"raster\" or format == \"hybrid\":\n            self._raster_plot(compartment, spike_ranks, spike_times, mask)\n\n        ### plot mean firing rate\n        if format == \"mean\" or format == \"hybrid\":\n            self._mean_firing_rate_plot(compartment, data, format)\n\n        ### plot interspike interval histogram\n        if format == \"interspike\":\n            self._interspike_interval_plot(compartment, data)\n\n        ### plot coefficient of variation histogram\n        if format == \"cv\":\n            self._coefficient_of_variation_plot(compartment, data)\n\n    def _raster_plot(self, compartment, spike_ranks, spike_times, mask):\n        \"\"\"\n        Plot raster plot.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            spike_ranks (array):\n                The spike ranks.\n            spike_times (array):\n                The spike times.\n            mask (array):\n                The mask for the spike times.\n        \"\"\"\n        ### set title\n        plt.title(f\"Spikes {compartment} ({spike_ranks.max() + 1})\")\n        ### check if there is only one neuron\n        if spike_ranks.max() == 0:\n            marker, size = [\"|\", 3000]\n        else:\n            marker, size = [\".\", 3]\n        ### plot spikes\n        plt.scatter(\n            spike_times[mask],\n            spike_ranks[mask],\n            color=\"k\",\n            marker=marker,\n            s=size,\n            linewidth=0.1,\n        )\n        ### set limits\n        plt.ylim(-0.5, spike_ranks.max() + 0.5)\n        ### set ylabel\n        plt.ylabel(\"# neurons\")\n        ### set yticks\n        if spike_ranks.max() == 0:\n            plt.yticks([0])\n        else:\n            plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))\n\n    def _mean_firing_rate_plot(self, compartment, data, format):\n        \"\"\"\n        Plot mean firing rate.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            data (array):\n                The spike data.\n            format (str):\n                The format of the plot.\n        \"\"\"\n        ### set title\n        plt.title(f\"Activity {compartment} ({len(data)})\")\n        ### set axis\n        ax = plt.gca()\n        color = \"k\"\n        ### for hybrid format plot mean firing rate in second y-axis\n        if format == \"hybrid\":\n            ax = plt.gca().twinx()\n            color = \"r\"\n        ### get mean firing rate\n        time_arr, firing_rate = get_pop_rate(\n            spikes=data,\n            t_start=self._start_time,\n            t_end=self._end_time,\n            time_step=self._time_step,\n        )\n        ### plot mean firing rate\n        ax.plot(time_arr, firing_rate, color=color)\n        ### set limits\n        ax.set_xlim(self._start_time, self._end_time)\n        ### set ylabel\n        ax.set_ylabel(\"Mean firing rate [Hz]\", color=color)\n        ax.tick_params(axis=\"y\", colors=color)\n\n    def _interspike_interval_plot(self, compartment, data):\n        \"\"\"\n        Plot interspike interval histogram.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            data (dict):\n                The spike data.\n        \"\"\"\n        ### set title\n        plt.title(f\"Interspike interval histogram {compartment} ({len(data)})\")\n        ### get interspike intervals\n        interspike_intervals_list = inter_spike_interval(spikes=data)\n        ### plot histogram\n        plt.hist(\n            interspike_intervals_list,\n            bins=100,\n            range=(0, 200),\n            density=True,\n            color=\"k\",\n        )\n        ### set limits\n        plt.xlim(0, 200)\n        ### set ylabel\n        plt.ylabel(\"Probability\")\n        plt.xlabel(\"Interspike interval [ms]\")\n\n    def _coefficient_of_variation_plot(self, compartment, data):\n        \"\"\"\n        Plot coefficient of variation histogram.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            data (dict):\n                The spike data.\n        \"\"\"\n        ### set title\n        plt.title(f\"Coefficient of variation histogram {compartment} ({len(data)})\")\n        ### get coefficient of variation\n        coefficient_of_variation_dict = coefficient_of_variation(\n            spikes=data,\n            per_neuron=True,\n        )\n        coefficient_of_variation_list = list(coefficient_of_variation_dict.values())\n        ### plot histogram\n        plt.hist(\n            coefficient_of_variation_list,\n            bins=100,\n            range=(0, 2),\n            density=True,\n            color=\"k\",\n        )\n        ### set limits\n        plt.xlim(0, 2)\n        ### set ylabel\n        plt.ylabel(\"Probability\")\n        plt.xlabel(\"Coefficient of variation\")\n\n    def _fill_subplot_other(self, plot_idx):\n        \"\"\"\n        Fill subplot with array data.\n\n        Args:\n            plot_idx (int):\n                The index of the subplot in the plan.\n        \"\"\"\n        ### get data\n        compartment = self.plan[\"compartment\"][plot_idx]\n        variable: str = self.plan[\"variable\"][plot_idx]\n        format: str = self.plan[\"format\"][plot_idx]\n        data_arr = self._raw_data_list[plot_idx]\n        time_arr = self._time_arr_list[plot_idx]\n\n        ### get data within time_lims\n        mask: np.ndarray = (\n            (time_arr >= self._start_time).astype(int)\n            * (time_arr <= self._end_time).astype(int)\n        ).astype(bool)\n\n        ### fill gaps in time_arr and data_arr with nan\n        time_arr, data_arr = time_data_add_nan(\n            time_arr=time_arr[mask], data_arr=data_arr[mask], axis=0\n        )\n\n        ### plot line plot\n        if \"line\" in format:\n            self._line_plot(\n                compartment,\n                variable,\n                time_arr,\n                data_arr,\n                plot_idx,\n                mean=\"mean\" in format,\n            )\n\n        ### plot matrix plot\n        if \"matrix\" in format:\n            self._matrix_plot(\n                compartment,\n                variable,\n                time_arr,\n                data_arr,\n                plot_idx,\n                mean=\"mean\" in format,\n            )\n\n    def _line_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean):\n        \"\"\"\n        Plot line plot.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            variable (str):\n                The name of the variable.\n            time_arr (array):\n                The time array.\n            data_arr (array):\n                The data array.\n            plot_idx (int):\n                The index of the subplot in the plan.\n            mean (bool):\n                If True, plot the mean of the data. Population: average over neurons.\n                Projection: average over preneurons (results in one line for each\n                postneuron).\n        \"\"\"\n\n        ### set title\n        plt.title(f\"Variable {variable} of {compartment} ({data_arr.shape[1]})\")\n\n        ### Shape of data defines how to plot\n        ### 2D array where elements are no lists\n        ### = population data [time, neurons]\n        ### --> plot line for each neuron\n        if len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is not True:\n            ### mean -> average over neurons\n            if mean:\n                data_arr = np.mean(data_arr, 1, keepdims=True)\n            ### plot line for each neuron\n            for neuron in range(data_arr.shape[1]):\n                plt.plot(\n                    time_arr,\n                    data_arr[:, neuron],\n                    color=\"k\",\n                )\n\n        ### 2D array where elements are lists\n        ### = projection data [time, postneurons][preneurons]\n        ### 3D array\n        ### = projection data [time, postneurons, preneurons]\n        ### --> plot line for each preneuron postneuron pair\n        elif len(data_arr.shape) == 3 or (\n            len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is True\n        ):\n            ### plot line for each preneuron postneuron pair\n            for post_neuron in range(data_arr.shape[1]):\n                ### the post_neuron has a constant number of preneurons\n                ### --> create array with preneuron indices [time, preneurons]\n                post_neuron_data = np.array(data_arr[:, post_neuron])\n                ### mean -> average over preneurons\n                if mean:\n                    post_neuron_data = np.mean(post_neuron_data, 1, keepdims=True)\n                for pre_neuron in range(post_neuron_data.shape[1]):\n                    plt.plot(\n                        time_arr,\n                        post_neuron_data[:, pre_neuron],\n                        color=\"k\",\n                    )\n        else:\n            print(\n                f\"\\nERROR PlotRecordings: shape of data not supported, {compartment}, {variable} in plot {plot_idx}.\\n\"\n            )\n\n    def _matrix_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean):\n        \"\"\"\n        Plot matrix plot.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            variable (str):\n                The name of the variable.\n            time_arr (array):\n                The time array.\n            data_arr (array):\n                The data array.\n            plot_idx (int):\n                The index of the subplot in the plan.\n            mean (bool):\n                If True, plot the mean of the data. Population: average over neurons.\n                Projection: average over preneurons (results in one line for each\n                postneuron).\n        \"\"\"\n        ### number of neurons i.e. postneurons\n        nr_neurons = data_arr.shape[1]\n\n        ### Shape of data defines how to plot\n        ### 2D array where elements are no lists\n        ### = population data [time, neurons]\n        ### --> plot matrix row for each neuron\n        ### mean -> average over neurons\n        if len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is not True:\n            ### mean -> average over neurons\n            if mean:\n                data_arr = np.mean(data_arr, 1, keepdims=True)\n\n        ### 2D array where elements are lists\n        ### = projection data [time, postneurons][preneurons]\n        ### 3D array\n        ### = projection data [time, postneurons, preneurons]\n        ### --> plot matrix row for each preneuron postneuron pair (has to reshape to 2D array [time, neuron pair])\n        ### mean -> average over preneurons\n        elif len(data_arr.shape) == 3 or (\n            len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is True\n        ):\n            array_2D_list = []\n            ### loop over postneurons\n            for post_neuron in range(data_arr.shape[1]):\n                ### the post_neuron has a constant number of preneurons\n                ### --> create array with preneuron indices [time, preneurons]\n                post_neuron_data = np.array(data_arr[:, post_neuron])\n                ### mean --> average over preneurons\n                if mean:\n                    post_neuron_data = np.mean(post_neuron_data, 1, keepdims=True)\n                ### append all preneurons arrays to array_2D_list\n                for pre_neuron in range(post_neuron_data.shape[1]):\n                    array_2D_list.append(post_neuron_data[:, pre_neuron])\n                ### append a None array to array_2D_list to separate postneurons\n                array_2D_list.append(np.empty(post_neuron_data.shape[0]) * np.nan)\n\n            ### convert array_2D_list to 2D array, not use last None array\n            data_arr = np.array(array_2D_list[:-1]).T\n\n        ### some other shape not supported\n        else:\n            print(\n                f\"\\nERROR PlotRecordings: shape of data not supported, {compartment}, {variable} in plot {plot_idx}.\\n\"\n            )\n\n        ### plot matrix row for each neuron or preneuron postneuron pair\n        plt.imshow(\n            data_arr.T,\n            aspect=\"auto\",\n            vmin=np.nanmin(data_arr),\n            vmax=np.nanmax(data_arr),\n            extent=[\n                time_arr.min()\n                - self.recordings[self.chunk][f\"{compartment};period\"] / 2,\n                time_arr.max()\n                + self.recordings[self.chunk][f\"{compartment};period\"] / 2,\n                data_arr.shape[1] - 0.5,\n                -0.5,\n            ],\n            cmap=\"viridis\",\n            interpolation=\"none\",\n        )\n        if data_arr.shape[1] == 1:\n            plt.yticks([0])\n        else:\n            ### all y ticks\n            y_tick_positions_all_arr = np.arange(data_arr.shape[1])\n            ### boolean array of valid y ticks\n            valid_y_ticks = np.logical_not(np.isnan(data_arr).any(axis=0))\n            ### get y tick labels\n            if False in valid_y_ticks:\n                ### there are nan entries\n                ### split at nan entries\n                y_tick_positions_split_list = np.array_split(\n                    y_tick_positions_all_arr, np.where(np.logical_not(valid_y_ticks))[0]\n                )\n                ### decrease by 1 after each nan entry\n                y_tick_positions_split_list = [\n                    y_tick_positions_split - idx_split\n                    for idx_split, y_tick_positions_split in enumerate(\n                        y_tick_positions_split_list\n                    )\n                ]\n                ### join split arrays\n                y_tick_labels_all_arr = np.concatenate(y_tick_positions_split_list)\n            else:\n                y_tick_labels_all_arr = y_tick_positions_all_arr\n\n            valid_y_ticks_selected_idx_arr = np.linspace(\n                0,\n                np.sum(valid_y_ticks),\n                num=min([10, np.sum(valid_y_ticks)]),\n                dtype=int,\n                endpoint=False,\n            )\n            valid_y_ticks_selected_arr = y_tick_positions_all_arr[valid_y_ticks][\n                valid_y_ticks_selected_idx_arr\n            ]\n            valid_y_ticks_labels_selected_arr = y_tick_labels_all_arr[valid_y_ticks][\n                valid_y_ticks_selected_idx_arr\n            ]\n\n            plt.yticks(valid_y_ticks_selected_arr, valid_y_ticks_labels_selected_arr)\n\n        ### set title\n        plt.title(\n            f\"Variable {variable} of {compartment} ({nr_neurons}) [{ef.sci(np.nanmin(data_arr))}, {ef.sci(np.nanmax(data_arr))}]\"\n        )\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.PlotRecordings.__init__","title":"__init__(figname, recordings, recording_times, shape, plan, chunk=0, time_lim=None, dpi=300)","text":"

Create and save the plot.

Parameters:

Name Type Description Default figname str

The name of the figure to be saved.

required recordings list

A recordings list obtained from CompNeuroMonitors.

required recording_times RecordingTimes

The RecordingTimes object containing the recording times obtained from CompNeuroMonitors.

required shape tuple

The shape of the figure. (number of rows, number of columns)

required plan dict

Defines which recordings are plotted in which subplot and how. The plan has to contain the following keys: \"position\", \"compartment\", \"variable\", \"format\". The values of the keys have to be lists of the same length. The values of the key \"position\" have to be integers between 1 and the number of subplots (defined by shape). The values of the key \"compartment\" have to be the names of the model compartments as strings. The values of the key \"variable\" have to be strings containing the names of the recorded variables or equations using the recorded variables. The values of the key \"format\" have to be strings defining how the recordings are plotted. The following formats are available for spike recordings: \"raster\", \"mean\", \"hybrid\", \"interspike\". The following formats are available for other recordings: \"line\", \"line_mean\", \"matrix\", \"matrix_mean\".

required chunk int

The chunk of the recordings to be plotted. Default: 0.

0 time_lim tuple

Defines the x-axis for all subplots. The tuple contains two numbers: start and end time in ms. The times have to be within the chunk. Default: None, i.e., the whole chunk is plotted.

None dpi int

The dpi of the saved figure. Default: 300.

300 Source code in src/CompNeuroPy/analysis_functions.py
@check_types()\ndef __init__(\n    self,\n    figname: str,\n    recordings: list[dict],\n    recording_times: RecordingTimes,\n    shape: tuple[int, int],\n    plan: dict,\n    chunk: int = 0,\n    time_lim: None | tuple[float, float] = None,\n    dpi: int = 300,\n) -> None:\n    \"\"\"\n    Create and save the plot.\n\n    Args:\n        figname (str):\n            The name of the figure to be saved.\n        recordings (list):\n            A recordings list obtained from CompNeuroMonitors.\n        recording_times (RecordingTimes):\n            The RecordingTimes object containing the recording times obtained from\n            CompNeuroMonitors.\n        shape (tuple):\n            The shape of the figure. (number of rows, number of columns)\n        plan (dict):\n            Defines which recordings are plotted in which subplot and how. The plan\n            has to contain the following keys: \"position\", \"compartment\",\n            \"variable\", \"format\". The values of the keys have to be lists of the\n            same length. The values of the key \"position\" have to be integers\n            between 1 and the number of subplots (defined by shape). The values of\n            the key \"compartment\" have to be the names of the model compartments as\n            strings. The values of the key \"variable\" have to be strings containing\n            the names of the recorded variables or equations using the recorded\n            variables. The values of the key \"format\" have to be strings defining\n            how the recordings are plotted. The following formats are available for\n            spike recordings: \"raster\", \"mean\", \"hybrid\", \"interspike\". The\n            following formats are available for other recordings: \"line\",\n            \"line_mean\", \"matrix\", \"matrix_mean\".\n        chunk (int, optional):\n            The chunk of the recordings to be plotted. Default: 0.\n        time_lim (tuple, optional):\n            Defines the x-axis for all subplots. The tuple contains two\n            numbers: start and end time in ms. The times have to be\n            within the chunk. Default: None, i.e., the whole chunk is plotted.\n        dpi (int, optional):\n            The dpi of the saved figure. Default: 300.\n    \"\"\"\n    ### print start message\n    print(f\"Generate fig {figname}\", end=\"... \", flush=True)\n\n    ### set attributes\n    self.figname = figname\n    self.recordings = recordings\n    self.recording_times = recording_times\n    self.shape = shape\n    self.plan = plan\n    self.chunk = chunk\n    self.time_lim = time_lim\n    self.dpi = dpi\n\n    ### get available compartments (from recordings) and recorded variables for each\n    ### compartment\n    (\n        self._compartment_list,\n        self._compartment_recordings_dict,\n    ) = self._get_compartment_recordings()\n\n    ### check plan keys and values\n    self._check_plan()\n\n    ### get start and end time for plotting and timestep\n    self._start_time, self._end_time, self._time_step = self._get_start_end_time()\n\n    ### get compbined time array for recordings of each compartment\n    self._time_arr_list = self._get_time_arr_list()\n\n    ### get data from recordings for each subplot\n    self._raw_data_list = self._get_raw_data_list()\n\n    ### create plot\n    self._plot()\n\n    ### print end message\n    print(\"Done\\n\")\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.my_raster_plot","title":"my_raster_plot(spikes)","text":"

Returns two vectors representing for each recorded spike 1) the spike times and 2) the ranks of the neurons. The spike times are always in simulation steps (in contrast to default ANNarchy raster_plot).

Parameters:

Name Type Description Default spikes dict

ANNarchy spike dict of one population

required

Returns:

Name Type Description t array

spike times in simulation steps

n array

ranks of the neurons

Source code in src/CompNeuroPy/analysis_functions.py
def my_raster_plot(spikes: dict):\n    \"\"\"\n    Returns two vectors representing for each recorded spike 1) the spike times and 2)\n    the ranks of the neurons. The spike times are always in simulation steps (in\n    contrast to default ANNarchy raster_plot).\n\n    Args:\n        spikes (dict):\n            ANNarchy spike dict of one population\n\n    Returns:\n        t (array):\n            spike times in simulation steps\n        n (array):\n            ranks of the neurons\n    \"\"\"\n    t, n = raster_plot(spikes)\n    np.zeros(10)\n    t = np.round(t / dt(), 0).astype(int)\n    return t, n\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_nanmean","title":"get_nanmean(a, axis=None, dtype=None)","text":"

Same as np.nanmean but without printing warnings.

Parameters:

Name Type Description Default a array_like

Array containing numbers whose mean is desired. If a is not an array, a conversion is attempted.

required axis None or int or tuple of ints

Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.

.. numpy versionadded:: 1.7.0

If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before.

None dtype data - type

Type to use in computing the mean. For integer inputs, the default is float64; for floating point inputs, it is the same as the input dtype.

None

Returns:

Name Type Description m ndarray, see dtype parameter above

If out=None, returns a new array containing the mean values, otherwise a reference to the output array is returned.

Source code in src/CompNeuroPy/analysis_functions.py
def get_nanmean(a, axis=None, dtype=None):\n    \"\"\"\n    Same as np.nanmean but without printing warnings.\n\n    Args:\n        a (array_like):\n            Array containing numbers whose mean is desired. If `a` is not an\n            array, a conversion is attempted.\n        axis (None or int or tuple of ints, optional):\n            Axis or axes along which the means are computed. The default is to\n            compute the mean of the flattened array.\n\n            .. numpy versionadded:: 1.7.0\n\n            If this is a tuple of ints, a mean is performed over multiple axes,\n            instead of a single axis or all the axes as before.\n        dtype (data-type, optional):\n            Type to use in computing the mean.  For integer inputs, the default\n            is `float64`; for floating point inputs, it is the same as the\n            input dtype.\n\n    Returns:\n        m (ndarray, see dtype parameter above):\n            If `out=None`, returns a new array containing the mean values,\n            otherwise a reference to the output array is returned.\n    \"\"\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        ret = np.nanmean(a, axis=axis, dtype=dtype)\n    return ret\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_nanstd","title":"get_nanstd(a, axis=None, dtype=None)","text":"

Same as np.nanstd but without printing warnings.

Parameters:

Name Type Description Default a array_like

Calculate the standard deviation of these values.

required axis None or int or tuple of ints

Axis or axes along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array.

.. numpy versionadded:: 1.7.0

If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single axis or all the axes as before.

None dtype dtype

Type to use in computing the standard deviation. For arrays of integer type the default is float64, for arrays of float types it is the same as the array type.

None

Returns:

Name Type Description standard_deviation ndarray, see dtype parameter above

If out is None, return a new array containing the standard deviation, otherwise return a reference to the output array.

Source code in src/CompNeuroPy/analysis_functions.py
def get_nanstd(a, axis=None, dtype=None):\n    \"\"\"\n    Same as np.nanstd but without printing warnings.\n\n    Args:\n        a (array_like):\n            Calculate the standard deviation of these values.\n        axis (None or int or tuple of ints, optional):\n            Axis or axes along which the standard deviation is computed. The\n            default is to compute the standard deviation of the flattened array.\n\n            .. numpy versionadded:: 1.7.0\n\n            If this is a tuple of ints, a standard deviation is performed over\n            multiple axes, instead of a single axis or all the axes as before.\n        dtype (dtype, optional):\n            Type to use in computing the standard deviation. For arrays of\n            integer type the default is float64, for arrays of float types it is\n            the same as the array type.\n\n    Returns:\n        standard_deviation (ndarray, see dtype parameter above):\n            If `out` is None, return a new array containing the standard deviation,\n            otherwise return a reference to the output array.\n    \"\"\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        ret = np.nanstd(a, axis=axis, dtype=dtype)\n    return ret\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_population_power_spectrum","title":"get_population_power_spectrum(spikes, time_step, t_start=None, t_end=None, fft_size=None)","text":"

Generates power spectrum of population spikes, returns frequency_arr and power_spectrum_arr. Using the Welch methode from: Welch, P. (1967). The use of fast Fourier transform for the estimation of power spectra: a method based on time averaging over short, modified periodograms. IEEE Transactions on audio and electroacoustics, 15(2), 70-73.

The spike arrays are splitted into multiple arrays and then multiple FFTs are performed and the results are averaged.

Size of splitted signals and the time step of the simulation determine the frequency resolution and the maximum frequency: maximum frequency [Hz] = 500 / time_step frequency resolution [Hz] = 1000 / (time_step * fftSize)

Parameters:

Name Type Description Default spikes dicitonary

ANNarchy spike dict of one population

required time_step float

time step of the simulation in ms

required t_start float or int

start time of analyzed data in ms. Default: time of first spike

None t_end float or int

end time of analyzed data in ms. Default: time of last spike

None fft_size int

signal size for the FFT (size of splitted arrays) has to be a power of 2. Default: maximum

None

Returns:

Name Type Description frequency_arr array

array with frequencies

spectrum array

array with power spectrum

Source code in src/CompNeuroPy/analysis_functions.py
def get_population_power_spectrum(\n    spikes,\n    time_step,\n    t_start=None,\n    t_end=None,\n    fft_size=None,\n):\n    \"\"\"\n    Generates power spectrum of population spikes, returns frequency_arr and\n    power_spectrum_arr. Using the Welch methode from: Welch, P. (1967). The use of fast\n    Fourier transform for the estimation of power spectra: a method based on time\n    averaging over short, modified periodograms. IEEE Transactions on audio and\n    electroacoustics, 15(2), 70-73.\n\n    The spike arrays are splitted into multiple arrays and then multiple FFTs are\n    performed and the results are averaged.\n\n    Size of splitted signals and the time step of the simulation determine the frequency\n    resolution and the maximum frequency:\n        maximum frequency [Hz] = 500 / time_step\n        frequency resolution [Hz] = 1000 / (time_step * fftSize)\n\n    Args:\n        spikes (dicitonary):\n            ANNarchy spike dict of one population\n        time_step (float):\n            time step of the simulation in ms\n        t_start (float or int, optional):\n            start time of analyzed data in ms. Default: time of first spike\n        t_end (float or int, optional):\n            end time of analyzed data in ms. Default: time of last spike\n        fft_size (int, optional):\n            signal size for the FFT (size of splitted arrays)\n            has to be a power of 2. Default: maximum\n\n    Returns:\n        frequency_arr (array):\n            array with frequencies\n        spectrum (array):\n            array with power spectrum\n    \"\"\"\n\n    def ms_to_s(x):\n        return x / 1000\n\n    ### get population_size / sampling_frequency\n    populations_size = len(list(spikes.keys()))\n    sampling_frequency = 1 / ms_to_s(time_step)  # in Hz\n\n    ### check if there are spikes in data\n    t, _ = my_raster_plot(spikes)\n    if len(t) < 2:\n        ### there are no 2 spikes\n        print(\"WARNING: get_population_power_spectrum: <2 spikes!\")\n        ### --> return None or zeros\n        if fft_size == None:\n            print(\n                \"ERROR: get_population_power_spectrum: <2 spikes and no fft_size given!\"\n            )\n            quit()\n        else:\n            frequency_arr = np.fft.fftfreq(fft_size, 1.0 / sampling_frequency)\n            frequency_arr_ret = frequency_arr[2 : int(fft_size / 2)]\n            spectrum_ret = np.zeros(frequency_arr_ret.shape)\n            return [frequency_arr_ret, spectrum_ret]\n\n    ### check if t_start / t_end are None\n    if t_start == None:\n        t_start = round(t.min() * time_step, get_number_of_decimals(time_step))\n    if t_end == None:\n        t_end = round(t.max() * time_step, get_number_of_decimals(time_step))\n\n    ### calculate time\n    simulation_time = round(t_end - t_start, get_number_of_decimals(time_step))  # in ms\n\n    ### get fft_size\n    ### if None --> as large as possible\n    if fft_size is None:\n        pow = 1\n        while (2 ** (pow + 1)) / sampling_frequency < ms_to_s(simulation_time):\n            pow = pow + 1\n        fft_size = 2**pow\n\n    if ms_to_s(simulation_time) < (fft_size / sampling_frequency):\n        ### catch a too large fft_size\n        print(\n            f\"Too large fft_size {fft_size} for duration {simulation_time} ms. FFT_size has to be smaller than {int(ms_to_s(simulation_time)*sampling_frequency)}!\"\n        )\n        return [np.zeros(int(fft_size / 2 - 2)), np.zeros(int(fft_size / 2 - 2))]\n    elif (np.log2(fft_size) - int(np.log2(fft_size))) != 0:\n        ### catch fft_size if its not power of 2\n        print(\"FFT_size hast to be power of 2!\")\n        return [np.zeros(int(fft_size / 2 - 2)), np.zeros(int(fft_size / 2 - 2))]\n    else:\n        print(\n            f\"power sepctrum, min = {1000 / (time_step * fft_size)}, max = {500 / time_step}\"\n        )\n        ### calculate frequency powers\n        spectrum = np.zeros((populations_size, fft_size))\n        for neuron in range(populations_size):\n            ### sampling steps array\n            spiketrain = np.zeros(\n                int(np.round(ms_to_s(simulation_time) * sampling_frequency))\n            )\n            ### spike times as sampling steps\n            idx = (\n                np.round(\n                    ms_to_s((np.array(spikes[neuron]) * time_step)) * sampling_frequency\n                )\n            ).astype(np.int32)\n            ### cut the spikes before t_start and after t_end\n            idx_start = ms_to_s(t_start) * sampling_frequency\n            idx_end = ms_to_s(t_end) * sampling_frequency\n            mask = ((idx > idx_start).astype(int) * (idx < idx_end).astype(int)).astype(\n                bool\n            )\n            idx = (idx[mask] - idx_start).astype(np.int32)\n\n            ### set spiketrain array to one if there was a spike at sampling step\n            spiketrain[idx] = 1\n\n            ### generate multiple overlapping sequences out of the spike trains\n            spiketrain_sequences = _hanning_split_overlap(\n                spiketrain, fft_size, int(fft_size / 2)\n            )\n\n            ### generate power spectrum\n            spectrum[neuron] = get_nanmean(\n                np.abs(np.fft.fft(spiketrain_sequences)) ** 2, 0\n            )\n\n        ### mean spectrum over all neurons\n        spectrum = get_nanmean(spectrum, 0)\n\n        frequency_arr = np.fft.fftfreq(fft_size, 1.0 / sampling_frequency)\n\n        return (frequency_arr[2 : int(fft_size / 2)], spectrum[2 : int(fft_size / 2)])\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_power_spektrum_from_time_array","title":"get_power_spektrum_from_time_array(arr, presimulationTime, simulationTime, simulation_dt, samplingfrequency=250, fftSize=1024)","text":"

Generates power spectrum of time signal (returns frequencies_arr and power_arr). Using the Welch methode (Welch,1967).

amplingfrequency: to sample the arr, in Hz --> max frequency = samplingfrequency / 2 fftSize: signal size for FFT, duration (in s) = fftSize / samplingfrequency --> frequency resolution = samplingfrequency / fftSize

Parameters:

Name Type Description Default arr array

time array, value for each timestep

required presimulationTime float or int

simulation time which will not be analyzed

required simulationTime float or int

analyzed simulation time

required simulation_dt float or int

simulation timestep

required samplingfrequency float or int

sampling frequency for sampling the time array. Default: 250

250 fftSize int

signal size for the FFT (size of splitted arrays) has to be a power of 2. Default: 1024

1024

Returns:

Name Type Description frequency_arr array

array with frequencies

spectrum array

array with power spectrum

Source code in src/CompNeuroPy/analysis_functions.py
def get_power_spektrum_from_time_array(\n    arr,\n    presimulationTime,\n    simulationTime,\n    simulation_dt,\n    samplingfrequency=250,\n    fftSize=1024,\n):\n    \"\"\"\n    Generates power spectrum of time signal (returns frequencies_arr and power_arr).\n    Using the Welch methode (Welch,1967).\n\n    amplingfrequency: to sample the arr, in Hz --> max frequency = samplingfrequency / 2\n    fftSize: signal size for FFT, duration (in s) = fftSize / samplingfrequency\n    --> frequency resolution = samplingfrequency / fftSize\n\n    Args:\n        arr (array):\n            time array, value for each timestep\n        presimulationTime (float or int):\n            simulation time which will not be analyzed\n        simulationTime (float or int):\n            analyzed simulation time\n        simulation_dt (float or int):\n            simulation timestep\n        samplingfrequency (float or int, optional):\n            sampling frequency for sampling the time array. Default: 250\n        fftSize (int, optional):\n            signal size for the FFT (size of splitted arrays)\n            has to be a power of 2. Default: 1024\n\n    Returns:\n        frequency_arr (array):\n            array with frequencies\n        spectrum (array):\n            array with power spectrum\n    \"\"\"\n\n    if (simulationTime / 1000) < (fftSize / samplingfrequency):\n        print(\"Simulation time has to be >=\", fftSize / samplingfrequency, \"s for FFT!\")\n        return [np.zeros(int(fftSize / 2 - 2)), np.zeros(int(fftSize / 2 - 2))]\n    else:\n        ### sampling steps array\n        sampling_arr = arr[0 :: int((1 / samplingfrequency) * 1000 / simulation_dt)]\n\n        ### generate multiple overlapping sequences\n        sampling_arr_sequences = _hanning_split_overlap(\n            sampling_arr, fftSize, int(fftSize / 2)\n        )\n\n        ### generate power spectrum\n        spektrum = get_nanmean(np.abs(np.fft.fft(sampling_arr_sequences)) ** 2, 0)\n\n        frequenzen = np.fft.fftfreq(fftSize, 1.0 / samplingfrequency)\n\n        return (frequenzen[2 : int(fftSize / 2)], spektrum[2 : int(fftSize / 2)])\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_pop_rate","title":"get_pop_rate(spikes, t_start=None, t_end=None, time_step=1, t_smooth_ms=-1)","text":"

Generates a smoothed population firing rate. Returns a time array and a firing rate array.

Parameters:

Name Type Description Default spikes dictionary

ANNarchy spike dict of one population

required t_start float or int

start time of analyzed data in ms. Default: time of first spike

None t_end float or int

end time of analyzed data in ms. Default: time of last spike

None time_step float or int

time step of the simulation in ms. Default: 1

1 t_smooth_ms float or int

time window for firing rate calculation in ms, if -1 --> time window sizes are automatically detected. Default: -1

-1

Returns:

Name Type Description time_arr array

array with time steps in ms

rate array

array with population rate in Hz for each time step

Source code in src/CompNeuroPy/analysis_functions.py
def get_pop_rate(\n    spikes: dict,\n    t_start: float | int | None = None,\n    t_end: float | int | None = None,\n    time_step: float | int = 1,\n    t_smooth_ms: float | int = -1,\n):\n    \"\"\"\n    Generates a smoothed population firing rate. Returns a time array and a firing rate\n    array.\n\n    Args:\n        spikes (dictionary):\n            ANNarchy spike dict of one population\n        t_start (float or int, optional):\n            start time of analyzed data in ms. Default: time of first spike\n        t_end (float or int, optional):\n            end time of analyzed data in ms. Default: time of last spike\n        time_step (float or int, optional):\n            time step of the simulation in ms. Default: 1\n        t_smooth_ms (float or int, optional):\n            time window for firing rate calculation in ms, if -1 --> time window sizes\n            are automatically detected. Default: -1\n\n    Returns:\n        time_arr (array):\n            array with time steps in ms\n        rate (array):\n            array with population rate in Hz for each time step\n    \"\"\"\n    dt = time_step\n\n    t, _ = my_raster_plot(spikes)\n\n    ### check if there are spikes in population at all\n    if len(t) > 1:\n        if t_start == None:\n            t_start = round(t.min() * time_step, get_number_of_decimals(time_step))\n        if t_end == None:\n            t_end = round(t.max() * time_step, get_number_of_decimals(time_step))\n\n        duration = round(t_end - t_start, get_number_of_decimals(time_step))\n\n        ### if t_smooth is given --> use classic time_window method\n        if t_smooth_ms > 0:\n            return _get_pop_rate_old(\n                spikes, duration, dt=dt, t_start=t_start, t_smooth_ms=t_smooth_ms\n            )\n        else:\n            ### concatenate all spike times and sort them\n            spike_arr = dt * np.sort(\n                np.concatenate(\n                    [np.array(spikes[neuron]).astype(int) for neuron in spikes.keys()]\n                )\n            )\n            nr_neurons = len(list(spikes.keys()))\n            nr_spikes = spike_arr.size\n\n            ### use _recursive_rate to get firing rate\n            ### spike array is splitted in time bins\n            ### time bins widths are automatically found\n            time_population_rate, population_rate = _recursive_rate(\n                spike_arr / 1000.0,\n                t0=t_start / 1000.0,\n                t1=(t_start + duration) / 1000.0,\n                duration_init=duration / 1000.0,\n                nr_neurons=nr_neurons,\n                nr_spikes=nr_spikes,\n            )\n            ### time_population_rate was returned in s --> transform it into ms\n            time_population_rate = time_population_rate * 1000\n            time_arr0 = np.arange(t_start, t_start + duration, dt)\n            if len(time_population_rate) > 1:\n                ### interpolate\n                interpolate_func = interp1d(\n                    time_population_rate,\n                    population_rate,\n                    kind=\"linear\",\n                    bounds_error=False,\n                    fill_value=(population_rate[0], population_rate[-1]),\n                )\n                population_rate_arr = interpolate_func(time_arr0)\n            else:\n                population_rate_arr = np.zeros(len(time_arr0))\n                mask = time_arr0 == time_population_rate[0]\n                population_rate_arr[mask] = population_rate[0]\n\n            ret = population_rate_arr\n    else:\n        if t_start == None or t_end == None:\n            return None\n        else:\n            duration = t_end - t_start\n            ret = np.zeros(int(duration / dt))\n\n    return (np.arange(t_start, t_start + duration, dt), ret)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.plot_recordings","title":"plot_recordings(figname, recordings, recording_times, chunk, shape, plan, time_lim=None, dpi=300)","text":"

Plots the recordings of a single chunk from recordings. Plotted variables are specified in plan.

Parameters:

Name Type Description Default figname str

path + name of figure (e.g. \"figures/my_figure.png\")

required recordings list

a recordings list from CompNeuroPy obtained with the function get_recordings() from a CompNeuroMonitors object.

required recording_times object

recording_times object from CompNeuroPy obtained with the function get_recording_times() from a CompNeuroMonitors object.

required chunk int

which chunk of recordings should be used (the index of chunk)

required shape tuple

Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns

required plan list of strings

Defines which recordings are plotted in which subplot and how. Entries of the list have the structure: \"subplot_nr;model_component_name;variable_to_plot;format\", e.g. \"1,my_pop1;v;line\". mode: defines how the data is plotted, available modes: - for spike data: raster, mean, hybrid - for other data: line, mean, matrix - only for projection data: matrix_mean

required time_lim tuple

Defines the x-axis for all subplots. The list contains two numbers: start and end time in ms. The times have to be within the chunk. Default: None, i.e., time lims of chunk

None dpi int

The dpi of the saved figure. Default: 300

300 Source code in src/CompNeuroPy/analysis_functions.py
@check_types()\ndef plot_recordings(\n    figname: str,\n    recordings: list,\n    recording_times: RecordingTimes,\n    chunk: int,\n    shape: tuple,\n    plan: list[str],\n    time_lim: None | tuple = None,\n    dpi: int = 300,\n):\n    \"\"\"\n    Plots the recordings of a single chunk from recordings. Plotted variables are\n    specified in plan.\n\n    Args:\n        figname (str):\n            path + name of figure (e.g. \"figures/my_figure.png\")\n        recordings (list):\n            a recordings list from CompNeuroPy obtained with the function\n            get_recordings() from a CompNeuroMonitors object.\n        recording_times (object):\n            recording_times object from CompNeuroPy obtained with the\n            function get_recording_times() from a CompNeuroMonitors object.\n        chunk (int):\n            which chunk of recordings should be used (the index of chunk)\n        shape (tuple):\n            Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns\n        plan (list of strings):\n            Defines which recordings are plotted in which subplot and how.\n            Entries of the list have the structure:\n                \"subplot_nr;model_component_name;variable_to_plot;format\",\n                e.g. \"1,my_pop1;v;line\".\n                mode: defines how the data is plotted, available modes:\n                    - for spike data: raster, mean, hybrid\n                    - for other data: line, mean, matrix\n                    - only for projection data: matrix_mean\n        time_lim (tuple, optional):\n            Defines the x-axis for all subplots. The list contains two\n            numbers: start and end time in ms. The times have to be\n            within the chunk. Default: None, i.e., time lims of chunk\n        dpi (int, optional):\n            The dpi of the saved figure. Default: 300\n    \"\"\"\n    proc = Process(\n        target=_plot_recordings,\n        args=(figname, recordings, recording_times, chunk, shape, plan, time_lim, dpi),\n    )\n    proc.start()\n    proc.join()\n    if proc.exitcode != 0:\n        quit()\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_number_of_zero_decimals","title":"get_number_of_zero_decimals(nr)","text":"

For numbers which are smaller than zero get the number of digits after the decimal point which are zero (plus 1). For the number 0 or numbers >=1 return zero, e.g.:

Parameters:

Name Type Description Default nr float or int

the number from which the number of digits are obtained

required

Returns:

Name Type Description decimals int

number of digits after the decimal point which are zero (plus 1)

Examples:

>>> get_number_of_zero_decimals(0.12)\n1\n>>> get_number_of_zero_decimals(0.012)\n2\n>>> get_number_of_zero_decimals(1.012)\n0\n
Source code in src/CompNeuroPy/analysis_functions.py
def get_number_of_zero_decimals(nr):\n    \"\"\"\n    For numbers which are smaller than zero get the number of digits after the decimal\n    point which are zero (plus 1). For the number 0 or numbers >=1 return zero, e.g.:\n\n    Args:\n        nr (float or int):\n            the number from which the number of digits are obtained\n\n    Returns:\n        decimals (int):\n            number of digits after the decimal point which are zero (plus 1)\n\n    Examples:\n        >>> get_number_of_zero_decimals(0.12)\n        1\n        >>> get_number_of_zero_decimals(0.012)\n        2\n        >>> get_number_of_zero_decimals(1.012)\n        0\n    \"\"\"\n    decimals = 0\n    if nr != 0:\n        while abs(nr) < 1:\n            nr = nr * 10\n            decimals = decimals + 1\n\n    return decimals\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_number_of_decimals","title":"get_number_of_decimals(nr)","text":"

Get number of digits after the decimal point.

Parameters:

Name Type Description Default nr float or int

the number from which the number of digits are obtained

required

Returns:

Name Type Description decimals int

number of digits after the decimal point

Examples:

>>> get_number_of_decimals(5)\n0\n>>> get_number_of_decimals(5.1)\n1\n>>> get_number_of_decimals(0.0101)\n4\n
Source code in src/CompNeuroPy/analysis_functions.py
def get_number_of_decimals(nr):\n    \"\"\"\n    Get number of digits after the decimal point.\n\n    Args:\n        nr (float or int):\n            the number from which the number of digits are obtained\n\n    Returns:\n        decimals (int):\n            number of digits after the decimal point\n\n    Examples:\n        >>> get_number_of_decimals(5)\n        0\n        >>> get_number_of_decimals(5.1)\n        1\n        >>> get_number_of_decimals(0.0101)\n        4\n    \"\"\"\n\n    if nr != int(nr):\n        decimals = len(str(nr).split(\".\")[1])\n    else:\n        decimals = 0\n\n    return decimals\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.sample_data_with_timestep","title":"sample_data_with_timestep(time_arr, data_arr, timestep)","text":"

Samples a data array each timestep using interpolation

Parameters:

Name Type Description Default time_arr array

times of data_arr in ms

required data_arr array

array with data values from which will be sampled

required timestep float or int

timestep in ms for sampling

required

Returns:

Name Type Description time_arr array

sampled time array

data_arr array

sampled data array

Source code in src/CompNeuroPy/analysis_functions.py
def sample_data_with_timestep(time_arr, data_arr, timestep):\n    \"\"\"\n    Samples a data array each timestep using interpolation\n\n    Args:\n        time_arr (array):\n            times of data_arr in ms\n        data_arr (array):\n            array with data values from which will be sampled\n        timestep (float or int):\n            timestep in ms for sampling\n\n    Returns:\n        time_arr (array):\n            sampled time array\n        data_arr (array):\n            sampled data array\n    \"\"\"\n    interpolate_func = interp1d(\n        time_arr, data_arr, bounds_error=False, fill_value=\"extrapolate\"\n    )\n    min_time = round(\n        round(time_arr[0] / timestep, 0) * timestep,\n        get_number_of_decimals(timestep),\n    )\n    max_time = round(\n        round(time_arr[-1] / timestep, 0) * timestep,\n        get_number_of_decimals(timestep),\n    )\n    new_time_arr = np.arange(min_time, max_time + timestep, timestep)\n    new_data_arr = interpolate_func(new_time_arr)\n\n    return (new_time_arr, new_data_arr)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.time_data_add_nan","title":"time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0)","text":"

If there are gaps in time_arr --> fill them with respective time values. Fill the corresponding data_arr values with nan.

By default it is tried to fill the time array with continuously increasing times based on the smallest time difference found there can still be discontinuities after filling the arrays (because existing time values are not changed).

But one can also give a fixed fill time step.

Parameters:

Name Type Description Default time_arr 1D array

times of data_arr in ms

required data_arr nD array

the size of the specified dimension of data array must have the same length as time_arr

required fill_time_step number, optional, default=None

if there are gaps they are filled with this time step

None axis int

which dimension of the data_arr belongs to the time_arr

0

Returns:

Name Type Description time_arr 1D array

time array with gaps filled

data_arr nD array

data array with gaps filled

Source code in src/CompNeuroPy/analysis_functions.py
def time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0):\n    \"\"\"\n    If there are gaps in time_arr --> fill them with respective time values.\n    Fill the corresponding data_arr values with nan.\n\n    By default it is tried to fill the time array with continuously increasing times\n    based on the smallest time difference found there can still be discontinuities after\n    filling the arrays (because existing time values are not changed).\n\n    But one can also give a fixed fill time step.\n\n    Args:\n        time_arr (1D array):\n            times of data_arr in ms\n        data_arr (nD array):\n            the size of the specified dimension of data array must have the same length\n            as time_arr\n        fill_time_step (number, optional, default=None):\n            if there are gaps they are filled with this time step\n        axis (int):\n            which dimension of the data_arr belongs to the time_arr\n\n    Returns:\n        time_arr (1D array):\n            time array with gaps filled\n        data_arr (nD array):\n            data array with gaps filled\n    \"\"\"\n    time_arr = time_arr.astype(float)\n    data_arr = data_arr.astype(float)\n    data_arr_shape = data_arr.shape\n\n    if data_arr_shape[axis] != time_arr.size:\n        print(\n            \"ERROR time_data_add_nan: time_arr must have same length as specified axis (default=0) of data_arr!\"\n        )\n        quit()\n\n    ### find gaps\n    time_diff_arr = np.round(np.diff(time_arr), 6)\n    if isinstance(fill_time_step, type(None)):\n        time_diff_min = time_diff_arr.min()\n    else:\n        time_diff_min = fill_time_step\n    gaps_arr = time_diff_arr > time_diff_min\n\n    ### split arrays at gaps\n    time_arr_split = np.split(\n        time_arr, indices_or_sections=np.where(gaps_arr)[0] + 1, axis=0\n    )\n    data_arr_split = np.split(\n        data_arr, indices_or_sections=np.where(gaps_arr)[0] + 1, axis=axis\n    )\n\n    ### fill gaps between splits\n    data_arr_append_shape = list(data_arr_shape)\n    for split_arr_idx in range(len(time_arr_split) - 1):\n        ### get gaps boundaries\n        current_end = time_arr_split[split_arr_idx][-1]\n        next_start = time_arr_split[split_arr_idx + 1][0]\n        ### create gap filling arrays\n        time_arr_append = np.arange(\n            current_end + time_diff_min, next_start, time_diff_min\n        )\n        data_arr_append_shape[axis] = time_arr_append.size\n        data_arr_append = np.zeros(tuple(data_arr_append_shape)) * np.nan\n        ### append gap filling arrays to splitted arrays\n        time_arr_split[split_arr_idx] = np.append(\n            arr=time_arr_split[split_arr_idx],\n            values=time_arr_append,\n            axis=0,\n        )\n        data_arr_split[split_arr_idx] = np.append(\n            arr=data_arr_split[split_arr_idx],\n            values=data_arr_append,\n            axis=axis,\n        )\n\n    ### combine splitted arrays again\n    time_arr = np.concatenate(time_arr_split, axis=0)\n    data_arr = np.concatenate(data_arr_split, axis=axis)\n\n    return (time_arr, data_arr)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.rmse","title":"rmse(a, b)","text":"

Calculates the root-mean-square error between two arrays.

Parameters:

Name Type Description Default a array

first array

required b array

second array

required

Returns:

Name Type Description rmse float

root-mean-square error

Source code in src/CompNeuroPy/analysis_functions.py
def rmse(a, b):\n    \"\"\"\n    Calculates the root-mean-square error between two arrays.\n\n    Args:\n        a (array):\n            first array\n        b (array):\n            second array\n\n    Returns:\n        rmse (float):\n            root-mean-square error\n    \"\"\"\n\n    return np.sqrt(np.mean((a - b) ** 2))\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.rsse","title":"rsse(a, b)","text":"

Calculates the root-sum-square error between two arrays.

Parameters:

Name Type Description Default a array

first array

required b array

second array

required

Returns:

Name Type Description rsse float

root-sum-square error

Source code in src/CompNeuroPy/analysis_functions.py
def rsse(a, b):\n    \"\"\"\n    Calculates the root-sum-square error between two arrays.\n\n    Args:\n        a (array):\n            first array\n        b (array):\n            second array\n\n    Returns:\n        rsse (float):\n            root-sum-square error\n    \"\"\"\n\n    return np.sqrt(np.sum((a - b) ** 2))\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_minimum","title":"get_minimum(input_data)","text":"

Returns the minimum of the input data.

Parameters:

Name Type Description Default input_data list, np.ndarray, tuple, or float

The input data from which the minimum is to be obtained.

required

Returns:

Name Type Description minimum float

The minimum of the input data.

Source code in src/CompNeuroPy/analysis_functions.py
def get_minimum(input_data: list | np.ndarray | tuple | float):\n    \"\"\"\n    Returns the minimum of the input data.\n\n    Args:\n        input_data (list, np.ndarray, tuple, or float):\n            The input data from which the minimum is to be obtained.\n\n    Returns:\n        minimum (float):\n            The minimum of the input data.\n    \"\"\"\n    if isinstance(input_data, (list, np.ndarray, tuple)):\n        # If the input is a list, numpy array, or tuple, we handle them as follows\n        flattened_list = [\n            item\n            for sublist in input_data\n            for item in (\n                sublist if isinstance(sublist, (list, np.ndarray, tuple)) else [sublist]\n            )\n        ]\n        return float(min(flattened_list))\n    else:\n        # If the input is a single value, return it as the minimum\n        return float(input_data)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_maximum","title":"get_maximum(input_data)","text":"

Returns the maximum of the input data.

Parameters:

Name Type Description Default input_data list, np.ndarray, tuple, or float

The input data from which the maximum is to be obtained.

required

Returns:

Name Type Description maximum float

The maximum of the input data.

Source code in src/CompNeuroPy/analysis_functions.py
def get_maximum(input_data: list | np.ndarray | tuple | float):\n    \"\"\"\n    Returns the maximum of the input data.\n\n    Args:\n        input_data (list, np.ndarray, tuple, or float):\n            The input data from which the maximum is to be obtained.\n\n    Returns:\n        maximum (float):\n            The maximum of the input data.\n    \"\"\"\n\n    if isinstance(input_data, (list, np.ndarray, tuple)):\n        # If the input is a list, numpy array, or tuple, we handle them as follows\n        flattened_list = [\n            item\n            for sublist in input_data\n            for item in (\n                sublist if isinstance(sublist, (list, np.ndarray, tuple)) else [sublist]\n            )\n        ]\n        return float(max(flattened_list))\n    else:\n        # If the input is a single value, return it as the maximum\n        return float(input_data)\n
"},{"location":"additional/extra_functions/","title":"Extra Functions","text":""},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap","title":"Cmap","text":"

Class to create a colormap with a given name and range. The colormap can be called with a value between 0 and 1 to get the corresponding rgb value.

Source code in src/CompNeuroPy/extra_functions.py
class Cmap:\n    \"\"\"\n    Class to create a colormap with a given name and range. The colormap can be called\n    with a value between 0 and 1 to get the corresponding rgb value.\n    \"\"\"\n\n    def __init__(self, cmap_name, vmin, vmax):\n        \"\"\"\n        Args:\n            cmap_name (str):\n                Name of the colormap\n            vmin (float):\n                Lower limit of the colormap\n            vmax (float):\n                Upper limit of the colormap\n        \"\"\"\n        self.cmap_name = cmap_name\n        self.cmap = plt.get_cmap(cmap_name)\n        self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n        self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)\n\n    def __call__(self, x, alpha=1):\n        \"\"\"\n        Returns the rgba value of the colormap at the given value.\n\n        Args:\n            x (float):\n                Value between 0 and 1\n            alpha (float):\n                Alpha value of the rgba value\n\n        Returns:\n            rgba (tuple):\n                RGBA value of the colormap at the given value\n        \"\"\"\n        vals = self.get_rgb(x)\n        if isinstance(vals, tuple):\n            vals = vals[:3] + (alpha,)\n        else:\n            vals[:, -1] = alpha\n        return vals\n\n    def get_rgb(self, val):\n        \"\"\"\n        Returns the rgb value of the colormap at the given value.\n\n        Args:\n            val (float):\n                Value between 0 and 1\n\n        Returns:\n            rgb (tuple):\n                RGB value of the colormap at the given value\n        \"\"\"\n        return self.scalarMap.to_rgba(val)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap.__init__","title":"__init__(cmap_name, vmin, vmax)","text":"

Parameters:

Name Type Description Default cmap_name str

Name of the colormap

required vmin float

Lower limit of the colormap

required vmax float

Upper limit of the colormap

required Source code in src/CompNeuroPy/extra_functions.py
def __init__(self, cmap_name, vmin, vmax):\n    \"\"\"\n    Args:\n        cmap_name (str):\n            Name of the colormap\n        vmin (float):\n            Lower limit of the colormap\n        vmax (float):\n            Upper limit of the colormap\n    \"\"\"\n    self.cmap_name = cmap_name\n    self.cmap = plt.get_cmap(cmap_name)\n    self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n    self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap.__call__","title":"__call__(x, alpha=1)","text":"

Returns the rgba value of the colormap at the given value.

Parameters:

Name Type Description Default x float

Value between 0 and 1

required alpha float

Alpha value of the rgba value

1

Returns:

Name Type Description rgba tuple

RGBA value of the colormap at the given value

Source code in src/CompNeuroPy/extra_functions.py
def __call__(self, x, alpha=1):\n    \"\"\"\n    Returns the rgba value of the colormap at the given value.\n\n    Args:\n        x (float):\n            Value between 0 and 1\n        alpha (float):\n            Alpha value of the rgba value\n\n    Returns:\n        rgba (tuple):\n            RGBA value of the colormap at the given value\n    \"\"\"\n    vals = self.get_rgb(x)\n    if isinstance(vals, tuple):\n        vals = vals[:3] + (alpha,)\n    else:\n        vals[:, -1] = alpha\n    return vals\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap.get_rgb","title":"get_rgb(val)","text":"

Returns the rgb value of the colormap at the given value.

Parameters:

Name Type Description Default val float

Value between 0 and 1

required

Returns:

Name Type Description rgb tuple

RGB value of the colormap at the given value

Source code in src/CompNeuroPy/extra_functions.py
def get_rgb(self, val):\n    \"\"\"\n    Returns the rgb value of the colormap at the given value.\n\n    Args:\n        val (float):\n            Value between 0 and 1\n\n    Returns:\n        rgb (tuple):\n            RGB value of the colormap at the given value\n    \"\"\"\n    return self.scalarMap.to_rgba(val)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree","title":"DecisionTree","text":"

Class to create a decision tree.

Source code in src/CompNeuroPy/extra_functions.py
class DecisionTree:\n    \"\"\"\n    Class to create a decision tree.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        Create a new empty decision tree.\n        \"\"\"\n        ### node list is a list of lists\n        ### first idx = level of tree\n        ### second idx = all nodes in the level\n        self.node_list = [[]]\n\n    def node(self, parent=None, prob=0, name=None):\n        \"\"\"\n        Create a new node in the decision tree.\n\n        Args:\n            parent (node object):\n                Parent node of the new node\n            prob (float):\n                Probability of the new node\n            name (str):\n                Name of the new node\n\n        Returns:\n            new_node (node object):\n                The new node\n        \"\"\"\n\n        ### create new node\n        new_node = DecisionTreeNode(tree=self, parent=parent, prob=prob, name=name)\n        ### add it to node_list\n        if len(self.node_list) == new_node.level:\n            self.node_list.append([])\n        self.node_list[new_node.level].append(new_node)\n        ### return the node object\n        return new_node\n\n    def get_path_prod(self, name):\n        \"\"\"\n        Get the path and path product of a node with a given name.\n\n        Args:\n            name (str):\n                Name of the node\n\n        Returns:\n            path (str):\n                Path to the node\n            path_prod (float):\n                Path product of the node\n        \"\"\"\n\n        ### search for all nodes with name\n        ### start from behind\n        search_node_list = []\n        path_list = []\n        path_prod_list = []\n        for level in range(len(self.node_list) - 1, -1, -1):\n            for node in self.node_list[level]:\n                if node.name == name:\n                    search_node_list.append(node)\n        ### get the paths and path products for the found nodes\n        for node in search_node_list:\n            path, path_prod = self._get_path_prod_rec(node)\n            path_list.append(path)\n            path_prod_list.append(path_prod)\n        ### return the paths and path products\n        return [\n            [path_list[idx], path_prod_list[idx]]\n            for idx in range(len(search_node_list))\n        ]\n\n    def _get_path_prod_rec(self, node):\n        \"\"\"\n        Recursive function to get the path and path product of a node.\n\n        Args:\n            node (node object):\n                Node to get the path and path product of\n\n        Returns:\n            path_str (str):\n                Path to the node\n            prob (float):\n                Path product of the node\n        \"\"\"\n        node: DecisionTreeNode = node\n\n        if node.parent == None:\n            return [\"/\" + node.name, node.prob]\n        else:\n            path_str, prob = self._get_path_prod_rec(node.parent)\n            return [path_str + \"/\" + node.name, prob * node.prob]\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree.__init__","title":"__init__()","text":"

Create a new empty decision tree.

Source code in src/CompNeuroPy/extra_functions.py
def __init__(self):\n    \"\"\"\n    Create a new empty decision tree.\n    \"\"\"\n    ### node list is a list of lists\n    ### first idx = level of tree\n    ### second idx = all nodes in the level\n    self.node_list = [[]]\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree.node","title":"node(parent=None, prob=0, name=None)","text":"

Create a new node in the decision tree.

Parameters:

Name Type Description Default parent node object

Parent node of the new node

None prob float

Probability of the new node

0 name str

Name of the new node

None

Returns:

Name Type Description new_node node object

The new node

Source code in src/CompNeuroPy/extra_functions.py
def node(self, parent=None, prob=0, name=None):\n    \"\"\"\n    Create a new node in the decision tree.\n\n    Args:\n        parent (node object):\n            Parent node of the new node\n        prob (float):\n            Probability of the new node\n        name (str):\n            Name of the new node\n\n    Returns:\n        new_node (node object):\n            The new node\n    \"\"\"\n\n    ### create new node\n    new_node = DecisionTreeNode(tree=self, parent=parent, prob=prob, name=name)\n    ### add it to node_list\n    if len(self.node_list) == new_node.level:\n        self.node_list.append([])\n    self.node_list[new_node.level].append(new_node)\n    ### return the node object\n    return new_node\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree.get_path_prod","title":"get_path_prod(name)","text":"

Get the path and path product of a node with a given name.

Parameters:

Name Type Description Default name str

Name of the node

required

Returns:

Name Type Description path str

Path to the node

path_prod float

Path product of the node

Source code in src/CompNeuroPy/extra_functions.py
def get_path_prod(self, name):\n    \"\"\"\n    Get the path and path product of a node with a given name.\n\n    Args:\n        name (str):\n            Name of the node\n\n    Returns:\n        path (str):\n            Path to the node\n        path_prod (float):\n            Path product of the node\n    \"\"\"\n\n    ### search for all nodes with name\n    ### start from behind\n    search_node_list = []\n    path_list = []\n    path_prod_list = []\n    for level in range(len(self.node_list) - 1, -1, -1):\n        for node in self.node_list[level]:\n            if node.name == name:\n                search_node_list.append(node)\n    ### get the paths and path products for the found nodes\n    for node in search_node_list:\n        path, path_prod = self._get_path_prod_rec(node)\n        path_list.append(path)\n        path_prod_list.append(path_prod)\n    ### return the paths and path products\n    return [\n        [path_list[idx], path_prod_list[idx]]\n        for idx in range(len(search_node_list))\n    ]\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode","title":"DecisionTreeNode","text":"

Class to create a node in a decision tree.

Source code in src/CompNeuroPy/extra_functions.py
class DecisionTreeNode:\n    \"\"\"\n    Class to create a node in a decision tree.\n    \"\"\"\n\n    id_counter = 0\n\n    def __init__(self, tree: DecisionTree, parent=None, prob=0, name=\"\"):\n        \"\"\"\n        Create a new node in a decision tree.\n\n        Args:\n            tree (DecisionTree object):\n                Decision tree the node belongs to\n            parent (node object):\n                Parent node of the new node\n            prob (float):\n                Probability of the new node\n            name (str):\n                Name of the new node\n        \"\"\"\n        self.tree = tree\n        parent: DecisionTreeNode = parent\n        self.parent = parent\n        self.prob = prob\n        self.name = name\n        self.id = int(self.id_counter)\n        self.id_counter += 1\n        if parent != None:\n            self.level = int(parent.level + 1)\n        else:\n            self.level = int(0)\n\n    def add(self, name, prob):\n        \"\"\"\n        Add a child node to the node.\n\n        Args:\n            name (str):\n                Name of the new node\n            prob (float):\n                Probability of the new node\n\n        Returns:\n            new_node (node object):\n                The new node\n        \"\"\"\n\n        return self.tree.node(parent=self, prob=prob, name=name)\n\n    def get_path_prod(self):\n        \"\"\"\n        Get the path and path product of the node.\n\n        Returns:\n            path (str):\n                Path to the node\n            path_prod (float):\n                Path product of the node\n        \"\"\"\n        return self.tree._get_path_prod_rec(self)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode.__init__","title":"__init__(tree, parent=None, prob=0, name='')","text":"

Create a new node in a decision tree.

Parameters:

Name Type Description Default tree DecisionTree object

Decision tree the node belongs to

required parent node object

Parent node of the new node

None prob float

Probability of the new node

0 name str

Name of the new node

'' Source code in src/CompNeuroPy/extra_functions.py
def __init__(self, tree: DecisionTree, parent=None, prob=0, name=\"\"):\n    \"\"\"\n    Create a new node in a decision tree.\n\n    Args:\n        tree (DecisionTree object):\n            Decision tree the node belongs to\n        parent (node object):\n            Parent node of the new node\n        prob (float):\n            Probability of the new node\n        name (str):\n            Name of the new node\n    \"\"\"\n    self.tree = tree\n    parent: DecisionTreeNode = parent\n    self.parent = parent\n    self.prob = prob\n    self.name = name\n    self.id = int(self.id_counter)\n    self.id_counter += 1\n    if parent != None:\n        self.level = int(parent.level + 1)\n    else:\n        self.level = int(0)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode.add","title":"add(name, prob)","text":"

Add a child node to the node.

Parameters:

Name Type Description Default name str

Name of the new node

required prob float

Probability of the new node

required

Returns:

Name Type Description new_node node object

The new node

Source code in src/CompNeuroPy/extra_functions.py
def add(self, name, prob):\n    \"\"\"\n    Add a child node to the node.\n\n    Args:\n        name (str):\n            Name of the new node\n        prob (float):\n            Probability of the new node\n\n    Returns:\n        new_node (node object):\n            The new node\n    \"\"\"\n\n    return self.tree.node(parent=self, prob=prob, name=name)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode.get_path_prod","title":"get_path_prod()","text":"

Get the path and path product of the node.

Returns:

Name Type Description path str

Path to the node

path_prod float

Path product of the node

Source code in src/CompNeuroPy/extra_functions.py
def get_path_prod(self):\n    \"\"\"\n    Get the path and path product of the node.\n\n    Returns:\n        path (str):\n            Path to the node\n        path_prod (float):\n            Path product of the node\n    \"\"\"\n    return self.tree._get_path_prod_rec(self)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.print_df","title":"print_df(df)","text":"

Prints the complete dataframe df

Parameters:

Name Type Description Default df pandas dataframe

Dataframe to be printed

required Source code in src/CompNeuroPy/extra_functions.py
def print_df(df):\n    \"\"\"\n    Prints the complete dataframe df\n\n    Args:\n        df (pandas dataframe):\n            Dataframe to be printed\n    \"\"\"\n    with pd.option_context(\n        \"display.max_rows\", None\n    ):  # more options can be specified also\n        print(df)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.flatten_list","title":"flatten_list(lst)","text":"

Retuns flattened list

Parameters:

Name Type Description Default lst list of lists or mixed

values and lists): List to be flattened

required

Returns:

Name Type Description new_list list

Flattened list

Source code in src/CompNeuroPy/extra_functions.py
def flatten_list(lst):\n    \"\"\"\n    Retuns flattened list\n\n    Args:\n        lst (list of lists or mixed: values and lists):\n            List to be flattened\n\n    Returns:\n        new_list (list):\n            Flattened list\n    \"\"\"\n\n    ### if lists in lst --> upack them and retunr flatten_list of new list\n    new_lst = []\n    list_in_lst = False\n    for val in lst:\n        if isinstance(val, list):\n            list_in_lst = True\n            for sub_val in val:\n                new_lst.append(sub_val)\n        else:\n            new_lst.append(val)\n\n    if list_in_lst:\n        return flatten_list(new_lst)\n    ### else return lst\n    else:\n        return lst\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.remove_key","title":"remove_key(d, key)","text":"

Removes an element from a dict, returns the new dict

Parameters:

Name Type Description Default d dict

Dict to be modified

required key str

Key to be removed

required

Returns:

Name Type Description r dict

Modified dict

Source code in src/CompNeuroPy/extra_functions.py
def remove_key(d, key):\n    \"\"\"\n    Removes an element from a dict, returns the new dict\n\n    Args:\n        d (dict):\n            Dict to be modified\n        key (str):\n            Key to be removed\n\n    Returns:\n        r (dict):\n            Modified dict\n    \"\"\"\n    r = dict(d)\n    del r[key]\n    return r\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.suppress_stdout","title":"suppress_stdout()","text":"

Suppresses the print output of a function

Examples:

with suppress_stdout():\n    print(\"this will not be printed\")\n
Source code in src/CompNeuroPy/extra_functions.py
@contextmanager\ndef suppress_stdout():\n    \"\"\"\n    Suppresses the print output of a function\n\n    Examples:\n        ```python\n        with suppress_stdout():\n            print(\"this will not be printed\")\n        ```\n    \"\"\"\n    with open(os.devnull, \"w\") as devnull:\n        old_stdout = sys.stdout\n        sys.stdout = devnull\n        try:\n            yield\n        finally:\n            sys.stdout = old_stdout\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.sci","title":"sci(nr)","text":"

Rounds a number to a single decimal. If number is smaller than 0 it is converted to scientific notation with 1 decimal.

Parameters:

Name Type Description Default nr float or int

Number to be converted

required

Returns:

Name Type Description str str

String of the number in scientific notation

Examples:

>>> sci(0.0001)\n'1.0e-4'\n>>> sci(1.77)\n'1.8'\n>>> sci(1.77e-5)\n'1.8e-5'\n>>> sci(177.22)\n'177.2'\n
Source code in src/CompNeuroPy/extra_functions.py
def sci(nr):\n    \"\"\"\n    Rounds a number to a single decimal.\n    If number is smaller than 0 it is converted to scientific notation with 1 decimal.\n\n    Args:\n        nr (float or int):\n            Number to be converted\n\n    Returns:\n        str (str):\n            String of the number in scientific notation\n\n    Examples:\n        >>> sci(0.0001)\n        '1.0e-4'\n        >>> sci(1.77)\n        '1.8'\n        >>> sci(1.77e-5)\n        '1.8e-5'\n        >>> sci(177.22)\n        '177.2'\n    \"\"\"\n    if af.get_number_of_zero_decimals(nr) == 0:\n        return str(round(nr, 1))\n    else:\n        return f\"{nr*10**af.get_number_of_zero_decimals(nr):.1f}e-{af.get_number_of_zero_decimals(nr)}\"\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.create_cm","title":"create_cm(colors, name='my_cmap', N=256, gamma=1.0, vmin=0, vmax=1)","text":"

Create a LinearSegmentedColormap from a list of colors.

Parameters:

Name Type Description Default colors array-like of colors or array-like of (value, color

If only colors are given, they are equidistantly mapped from the range :math:[0, 1]; i.e. 0 maps to colors[0] and 1 maps to colors[-1]. If (value, color) pairs are given, the mapping is from value to color. This can be used to divide the range unevenly.

required name str

The name of the colormap, by default 'my_cmap'.

'my_cmap' N int

The number of rgb quantization levels, by default 256.

256 gamma float

Gamma correction value, by default 1.0.

1.0 vmin float

The minimum value of the colormap, by default 0.

0 vmax float

The maximum value of the colormap, by default 1.

1

Returns:

Name Type Description linear_colormap _LinearColormapClass

The colormap object

Source code in src/CompNeuroPy/extra_functions.py
def create_cm(colors, name=\"my_cmap\", N=256, gamma=1.0, vmin=0, vmax=1):\n    \"\"\"\n    Create a `LinearSegmentedColormap` from a list of colors.\n\n    Args:\n        colors (array-like of colors or array-like of (value, color)):\n            If only colors are given, they are equidistantly mapped from the\n            range :math:`[0, 1]`; i.e. 0 maps to ``colors[0]`` and 1 maps to\n            ``colors[-1]``.\n            If (value, color) pairs are given, the mapping is from *value*\n            to *color*. This can be used to divide the range unevenly.\n        name (str, optional):\n            The name of the colormap, by default 'my_cmap'.\n        N (int, optional):\n            The number of rgb quantization levels, by default 256.\n        gamma (float, optional):\n            Gamma correction value, by default 1.0.\n        vmin (float, optional):\n            The minimum value of the colormap, by default 0.\n        vmax (float, optional):\n            The maximum value of the colormap, by default 1.\n\n    Returns:\n        linear_colormap (_LinearColormapClass):\n            The colormap object\n    \"\"\"\n    if not np.iterable(colors):\n        raise ValueError(\"colors must be iterable\")\n\n    if (\n        isinstance(colors[0], Sized)\n        and len(colors[0]) == 2\n        and not isinstance(colors[0], str)\n    ):\n        # List of value, color pairs\n        vals, colors = zip(*colors)\n        vals = np.array(vals).astype(float)\n        colors = list(colors)\n        ### insert values for 0 and 1 if not given\n        ### they equal the colors of the borders of the given range\n        if vals.min() != 0.0:\n            colors = [colors[np.argmin(vals)]] + colors\n            vals = np.insert(vals, 0, 0.0)\n        if vals.max() != 1.0:\n            colors = colors + [colors[np.argmax(vals)]]\n            vals = np.insert(vals, len(vals), 1.0)\n    else:\n        vals = np.linspace(0, 1, len(colors))\n\n    ### sort values and colors, they have to increase\n    sort_idx = np.argsort(vals)\n    vals = vals[sort_idx]\n    colors = [colors[idx] for idx in sort_idx]\n\n    r_g_b_a = np.zeros((len(colors), 4))\n    for color_idx, color in enumerate(colors):\n        if isinstance(color, str):\n            ### color given by name\n            r_g_b_a[color_idx] = to_rgba_array(color)\n        else:\n            ### color given by rgb(maybe a) value\n            color = np.array(color).astype(float)\n            ### check color size\n            if len(color) != 3 and len(color) != 4:\n                raise ValueError(\n                    \"colors must be names or consist of 3 (rgb) or 4 (rgba) numbers\"\n                )\n            if color.max() > 1:\n                ### assume that max value is 255\n                color[:3] = color[:3] / 255\n            if len(color) == 4:\n                ### gamma already given\n                r_g_b_a[color_idx] = color\n            else:\n                ### add gamma\n                r_g_b_a[color_idx] = np.concatenate([color, np.array([gamma])])\n    r = r_g_b_a[:, 0]\n    g = r_g_b_a[:, 1]\n    b = r_g_b_a[:, 2]\n    a = r_g_b_a[:, 3]\n\n    cdict = {\n        \"red\": np.column_stack([vals, r, r]),\n        \"green\": np.column_stack([vals, g, g]),\n        \"blue\": np.column_stack([vals, b, b]),\n        \"alpha\": np.column_stack([vals, a, a]),\n    }\n\n    return _LinearColormapClass(name, cdict, N, gamma, vmin, vmax)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.evaluate_expression_with_dict","title":"evaluate_expression_with_dict(expression, value_dict)","text":"

Evaluate a mathematical expression using values from a dictionary.

This function takes a mathematical expression as a string and a dictionary containing variable names as keys and corresponding values as numpy arrays. It replaces the variable names in the expression with their corresponding values from the dictionary and evaluates the expression.

Parameters:

Name Type Description Default expression str

A mathematical expression to be evaluated. Variable names in the expression should match the keys in the value_dict.

required value_dict dict

A dictionary containing variable names (strings) as keys and corresponding numpy arrays or numbers as values.

required

Returns:

Name Type Description result value or array

The result of evaluating the expression using the provided values.

Examples:

>>> my_dict = {\"a\": np.ones(10), \"b\": np.arange(10)}\n>>> my_string = \"a*2-b+10\"\n>>> evaluate_expression_with_dict(my_string, my_dict)\narray([12., 11., 10.,  9.,  8.,  7.,  6.,  5.,  4.,  3.])\n
Source code in src/CompNeuroPy/extra_functions.py
def evaluate_expression_with_dict(expression, value_dict):\n    \"\"\"\n    Evaluate a mathematical expression using values from a dictionary.\n\n    This function takes a mathematical expression as a string and a dictionary\n    containing variable names as keys and corresponding values as numpy arrays.\n    It replaces the variable names in the expression with their corresponding\n    values from the dictionary and evaluates the expression.\n\n    Args:\n        expression (str):\n            A mathematical expression to be evaluated. Variable\n            names in the expression should match the keys in the value_dict.\n        value_dict (dict):\n            A dictionary containing variable names (strings) as\n            keys and corresponding numpy arrays or numbers as values.\n\n    Returns:\n        result (value or array):\n            The result of evaluating the expression using the provided values.\n\n    Examples:\n        >>> my_dict = {\"a\": np.ones(10), \"b\": np.arange(10)}\n        >>> my_string = \"a*2-b+10\"\n        >>> evaluate_expression_with_dict(my_string, my_dict)\n        array([12., 11., 10.,  9.,  8.,  7.,  6.,  5.,  4.,  3.])\n    \"\"\"\n    # Replace dictionary keys in the expression with their corresponding values\n    ### replace names with dict entries\n    expression = _replace_names_with_dict(\n        expression=expression, name_of_dict=\"value_dict\", dictionary=value_dict\n    )\n\n    ### evaluate the new expression\n    try:\n        result = eval(expression)\n        return result\n    except Exception as e:\n        raise ValueError(f\"Error while evaluating expression: {str(e)}\")\n
"},{"location":"additional/model_functions/","title":"Model Functions","text":""},{"location":"additional/model_functions/#CompNeuroPy.model_functions.compile_in_folder","title":"compile_in_folder(folder_name, net=None, clean=False, silent=False)","text":"

Creates the compilation folder in annarchy_folders/ or uses existing ones. Compiles the current network.

Parameters:

Name Type Description Default folder_name str

Name of the folder within annarchy_folders/

required net ANNarchy network

ANNarchy network. Default: None.

None clean bool

If True, the library is recompiled entirely, else only the changes since last compilation are compiled. Default: False.

False silent bool

Suppress output. Defaults to False.

False Source code in src/CompNeuroPy/model_functions.py
def compile_in_folder(folder_name, net=None, clean=False, silent=False):\n    \"\"\"\n    Creates the compilation folder in annarchy_folders/ or uses existing ones. Compiles\n    the current network.\n\n    Args:\n        folder_name (str):\n            Name of the folder within annarchy_folders/\n        net (ANNarchy network, optional):\n            ANNarchy network. Default: None.\n        clean (bool, optional):\n            If True, the library is recompiled entirely, else only the changes since\n            last compilation are compiled. Default: False.\n        silent (bool, optional):\n            Suppress output. Defaults to False.\n    \"\"\"\n    sf.create_dir(\"annarchy_folders/\" + folder_name, print_info=False)\n    if isinstance(net, type(None)):\n        compile(\"annarchy_folders/\" + folder_name, clean=clean, silent=silent)\n    else:\n        net.compile(\"annarchy_folders/\" + folder_name, clean=clean, silent=silent)\n    if os.getcwd().split(\"/\")[-1] == \"annarchy_folders\":\n        os.chdir(\"../\")\n
"},{"location":"additional/model_functions/#CompNeuroPy.model_functions.annarchy_compiled","title":"annarchy_compiled(net_id=0)","text":"

Check if ANNarchy network was compiled.

Parameters:

Name Type Description Default net_id int

Network ID. Default: 0.

0 Source code in src/CompNeuroPy/model_functions.py
def annarchy_compiled(net_id=0):\n    \"\"\"\n    Check if ANNarchy network was compiled.\n\n    Args:\n        net_id (int, optional):\n            Network ID. Default: 0.\n    \"\"\"\n    return Global._network[net_id][\"compiled\"]\n
"},{"location":"additional/model_functions/#CompNeuroPy.model_functions.get_full_model","title":"get_full_model()","text":"

Return all current population and projection names.

Returns:

Name Type Description model_dict dict

Dictionary with keys \"populations\" and \"projections\" and values lists of population and projection names, respectively.

Source code in src/CompNeuroPy/model_functions.py
def get_full_model():\n    \"\"\"\n    Return all current population and projection names.\n\n    Returns:\n        model_dict (dict):\n            Dictionary with keys \"populations\" and \"projections\" and values lists of\n            population and projection names, respectively.\n    \"\"\"\n    return {\n        \"populations\": [pop.name for pop in populations()],\n        \"projections\": [proj.name for proj in projections()],\n    }\n
"},{"location":"additional/model_functions/#CompNeuroPy.model_functions.cnp_clear","title":"cnp_clear(functions=True, neurons=True, synapses=True, constants=True)","text":"

Like clear with ANNarchy, but CompNeuroModel objects are also cleared.

Parameters:

Name Type Description Default functions bool

If True, all functions are cleared. Default: True.

True neurons bool

If True, all neurons are cleared. Default: True.

True synapses bool

If True, all synapses are cleared. Default: True.

True constants bool

If True, all constants are cleared. Default: True.

True Source code in src/CompNeuroPy/model_functions.py
def cnp_clear(functions=True, neurons=True, synapses=True, constants=True):\n    \"\"\"\n    Like clear with ANNarchy, but CompNeuroModel objects are also cleared.\n\n    Args:\n        functions (bool, optional):\n            If True, all functions are cleared. Default: True.\n        neurons (bool, optional):\n            If True, all neurons are cleared. Default: True.\n        synapses (bool, optional):\n            If True, all synapses are cleared. Default: True.\n        constants (bool, optional):\n            If True, all constants are cleared. Default: True.\n    \"\"\"\n    clear(functions=functions, neurons=neurons, synapses=synapses, constants=constants)\n    for model_name in CompNeuroModel._initialized_models.keys():\n        CompNeuroModel._initialized_models[model_name] = False\n    for model_name in CompNeuroModel._compiled_models.keys():\n        CompNeuroModel._compiled_models[model_name] = False\n
"},{"location":"additional/simulation_functions/","title":"Simulation Functions","text":""},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.current_step","title":"current_step(pop, t1=500, t2=500, a1=0, a2=100)","text":"

Stimulates a given population in two periods with two input currents.

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required t1 int

time in ms before current step

500 t2 int

time in ms after current step

500 a1 int

current amplitude before current step

0 a2 int

current amplitude after current step

100

Returns:

Name Type Description return_dict dict

dictionary containing:

  • duration (int): duration of the simulation
Source code in src/CompNeuroPy/simulation_functions.py
def current_step(pop, t1=500, t2=500, a1=0, a2=100):\n    \"\"\"\n    Stimulates a given population in two periods with two input currents.\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        t1 (int):\n            time in ms before current step\n        t2 (int):\n            time in ms after current step\n        a1 (int):\n            current amplitude before current step\n        a2 (int):\n            current amplitude after current step\n\n    Returns:\n        return_dict (dict):\n            dictionary containing:\n\n            - duration (int): duration of the simulation\n    \"\"\"\n\n    ### save prev input current\n    I_prev = get_population(pop).I_app\n\n    ### first/pre current step simulation\n    get_population(pop).I_app = a1\n    simulate(t1)\n\n    ### second/post current step simulation\n    get_population(pop).I_app = a2\n    simulate(t2)\n\n    ### reset input current to previous value\n    get_population(pop).I_app = I_prev\n\n    ### return some additional information which could be usefull\n    return {\"duration\": t1 + t2}\n
"},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.current_stim","title":"current_stim(pop, t=500, a=100)","text":"

Stimulates a given population during specified period 't' with input current with amplitude 'a', after this stimulation the current is reset to initial value (before stimulation).

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required t int

duration in ms

500 a int

current amplitude

100 Source code in src/CompNeuroPy/simulation_functions.py
def current_stim(pop, t=500, a=100):\n    \"\"\"\n    Stimulates a given population during specified period 't' with input current with\n    amplitude 'a', after this stimulation the current is reset to initial value\n    (before stimulation).\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        t (int):\n            duration in ms\n        a (int):\n            current amplitude\n    \"\"\"\n\n    return current_step(pop, t1=t, t2=0, a1=a, a2=0)\n
"},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.current_ramp","title":"current_ramp(pop, a0, a1, dur, n)","text":"

Conducts multiple current stimulations with constantly changing current inputs. After this current_ramp stimulation the current amplitude is reset to the initial value (before current ramp).

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required a0 int

initial current amplitude (of first stimulation)

required a1 int

final current amplitude (of last stimulation)

required dur int

duration of the complete current ramp (all stimulations)

required n int

number of stimulations

required

Warning

dur/n should be divisible by the simulation time step without remainder

Returns:

Name Type Description return_dict dict

dictionary containing:

  • da (int): current step size
  • dur_stim (int): duration of one stimulation

Raises:

Type Description AssertionError

if resulting duration of one stimulation is not divisible by the simulation time step without remainder

Source code in src/CompNeuroPy/simulation_functions.py
def current_ramp(pop, a0, a1, dur, n):\n    \"\"\"\n    Conducts multiple current stimulations with constantly changing current inputs.\n    After this current_ramp stimulation the current amplitude is reset to the initial\n    value (before current ramp).\n\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        a0 (int):\n            initial current amplitude (of first stimulation)\n        a1 (int):\n            final current amplitude (of last stimulation)\n        dur (int):\n            duration of the complete current ramp (all stimulations)\n        n (int):\n            number of stimulations\n\n    !!! warning\n        dur/n should be divisible by the simulation time step without remainder\n\n    Returns:\n        return_dict (dict):\n            dictionary containing:\n\n            - da (int): current step size\n            - dur_stim (int): duration of one stimulation\n\n    Raises:\n        AssertionError: if resulting duration of one stimulation is not divisible by the\n            simulation time step without remainder\n    \"\"\"\n\n    assert (dur / n) / dt() % 1 == 0, (\n        \"ERROR current_ramp: dur/n should result in a duration (for a single stimulation) which is divisible by the simulation time step (without remainder)\\ncurrent duration = \"\n        + str(dur / n)\n        + \", timestep = \"\n        + str(dt())\n        + \"!\\n\"\n    )\n\n    da = (a1 - a0) / (n - 1)  # for n stimulations only n-1 steps occur\n    dur_stim = dur / n\n    amp = a0\n    for _ in range(n):\n        current_stim(pop, t=dur_stim, a=amp)\n        amp = amp + da\n\n    return {\"da\": da, \"dur_stim\": dur_stim}\n
"},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.increasing_current","title":"increasing_current(pop, a0, da, nr_steps, dur_step)","text":"

Conducts multiple current stimulations with constantly increasing current inputs. After this increasing_current stimulation the current amplitude is reset to the initial value (before increasing_current).

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required a0 int

initial current amplitude (of first stimulation)

required da int

current step size

required nr_steps int

number of stimulations

required dur_step int

duration of one stimulation

required

Returns:

Name Type Description return_dict dict

dictionary containing:

  • current_list (list): list of current amplitudes for each stimulation
Source code in src/CompNeuroPy/simulation_functions.py
def increasing_current(pop, a0, da, nr_steps, dur_step):\n    \"\"\"\n    Conducts multiple current stimulations with constantly increasing current inputs.\n    After this increasing_current stimulation the current amplitude is reset to the\n    initial value (before increasing_current).\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        a0 (int):\n            initial current amplitude (of first stimulation)\n        da (int):\n            current step size\n        nr_steps (int):\n            number of stimulations\n        dur_step (int):\n            duration of one stimulation\n\n    Returns:\n        return_dict (dict):\n            dictionary containing:\n\n            - current_list (list): list of current amplitudes for each stimulation\n    \"\"\"\n    current_list = []\n    a = a0\n    for _ in range(nr_steps):\n        current_list.append(a)\n        current_stim(pop, t=dur_step, a=a)\n        a += da\n\n    return {\"current_list\": current_list}\n
"},{"location":"additional/simulation_requirements/","title":"Simulation Requirements","text":""},{"location":"additional/simulation_requirements/#CompNeuroPy.simulation_requirements.ReqPopHasAttr","title":"ReqPopHasAttr","text":"

Checks if population(s) contains the attribute(s) (parameters or variables)

Source code in src/CompNeuroPy/simulation_requirements.py
class ReqPopHasAttr:\n    \"\"\"\n    Checks if population(s) contains the attribute(s) (parameters or variables)\n    \"\"\"\n\n    def __init__(self, pop, attr):\n        \"\"\"\n        Args:\n            pop (str or list of strings):\n                population name(s)\n            attr (str or list of strings):\n                attribute name(s)\n        \"\"\"\n        self.pop_name_list = pop\n        self.attr_name_list = attr\n        ### convert single strings into list\n        if not (isinstance(pop, list)):\n            self.pop_name_list = [pop]\n        if not (isinstance(attr, list)):\n            self.attr_name_list = [attr]\n\n    def run(self):\n        \"\"\"\n        Checks if population(s) contains the attribute(s) (parameters or variables)\n\n        Raises:\n            ValueError: if population(s) does not contain the attribute(s)\n        \"\"\"\n        for attr_name in self.attr_name_list:\n            for pop_name in self.pop_name_list:\n                pop: Population = get_population(pop_name)\n                if not (attr_name in pop.attributes):\n                    raise ValueError(\n                        \"Population \"\n                        + pop_name\n                        + \" does not contain attribute \"\n                        + attr_name\n                        + \"!\\n\"\n                    )\n
"},{"location":"additional/simulation_requirements/#CompNeuroPy.simulation_requirements.ReqPopHasAttr.__init__","title":"__init__(pop, attr)","text":"

Parameters:

Name Type Description Default pop str or list of strings

population name(s)

required attr str or list of strings

attribute name(s)

required Source code in src/CompNeuroPy/simulation_requirements.py
def __init__(self, pop, attr):\n    \"\"\"\n    Args:\n        pop (str or list of strings):\n            population name(s)\n        attr (str or list of strings):\n            attribute name(s)\n    \"\"\"\n    self.pop_name_list = pop\n    self.attr_name_list = attr\n    ### convert single strings into list\n    if not (isinstance(pop, list)):\n        self.pop_name_list = [pop]\n    if not (isinstance(attr, list)):\n        self.attr_name_list = [attr]\n
"},{"location":"additional/simulation_requirements/#CompNeuroPy.simulation_requirements.ReqPopHasAttr.run","title":"run()","text":"

Checks if population(s) contains the attribute(s) (parameters or variables)

Raises:

Type Description ValueError

if population(s) does not contain the attribute(s)

Source code in src/CompNeuroPy/simulation_requirements.py
def run(self):\n    \"\"\"\n    Checks if population(s) contains the attribute(s) (parameters or variables)\n\n    Raises:\n        ValueError: if population(s) does not contain the attribute(s)\n    \"\"\"\n    for attr_name in self.attr_name_list:\n        for pop_name in self.pop_name_list:\n            pop: Population = get_population(pop_name)\n            if not (attr_name in pop.attributes):\n                raise ValueError(\n                    \"Population \"\n                    + pop_name\n                    + \" does not contain attribute \"\n                    + attr_name\n                    + \"!\\n\"\n                )\n
"},{"location":"additional/system_functions/","title":"System Functions","text":""},{"location":"additional/system_functions/#CompNeuroPy.system_functions.clear_dir","title":"clear_dir(path)","text":"

Deletes all files and subdirectories in the specified folder.

Parameters:

Name Type Description Default path str

Path to the folder to clear.

required Source code in src/CompNeuroPy/system_functions.py
def clear_dir(path):\n    \"\"\"\n    Deletes all files and subdirectories in the specified folder.\n\n    Args:\n        path (str):\n            Path to the folder to clear.\n    \"\"\"\n    try:\n        if not os.path.exists(path):\n            print(f\"The folder '{path}' does not exist.\")\n            return\n\n        for filename in os.listdir(path):\n            file_path = os.path.join(path, filename)\n            try:\n                if os.path.isfile(file_path) or os.path.islink(file_path):\n                    os.unlink(file_path)\n                elif os.path.isdir(file_path):\n                    shutil.rmtree(file_path)\n            except Exception:\n                print(traceback.format_exc())\n                print(f\"Failed to delete {file_path}\")\n    except Exception:\n        print(traceback.format_exc())\n        print(f\"Failed to clear {path}\")\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.create_dir","title":"create_dir(path, print_info=False, clear=False)","text":"

Creates a directory.

Parameters:

Name Type Description Default path str

Path to the directory to create.

required print_info bool

Whether to print information about the directory creation. Default: False.

False clear bool

Whether to clear the directory if it already exists. Default: False.

False Source code in src/CompNeuroPy/system_functions.py
def create_dir(path, print_info=False, clear=False):\n    \"\"\"\n    Creates a directory.\n\n    Args:\n        path (str):\n            Path to the directory to create.\n\n        print_info (bool, optional):\n            Whether to print information about the directory creation. Default: False.\n\n        clear (bool, optional):\n            Whether to clear the directory if it already exists. Default: False.\n    \"\"\"\n    try:\n        if isinstance(path, str):\n            if len(path) > 0:\n                os.makedirs(path)\n        else:\n            print(\"create_dir, ERROR: path is no str\")\n    except Exception:\n        if os.path.isdir(path):\n            if print_info:\n                print(path + \" already exists\")\n            if clear:\n                ### clear folder\n                ### do you really want?\n                answer = input(f\"Do you really want to clear {path} (y/n):\")\n                while answer != \"y\" and answer != \"n\":\n                    print(\"please enter y or n\")\n                    answer = input(f\"Do you really want to clear {path} (y/n):\")\n                ### clear or not depending on answer\n                if answer == \"y\":\n                    clear_dir(path)\n                    if print_info:\n                        print(path + \" already exists and was cleared.\")\n                else:\n                    if print_info:\n                        print(path + \" already exists and was not cleared.\")\n        else:\n            print(traceback.format_exc())\n            print(\"could not create \" + path + \" folder\")\n            quit()\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.save_variables","title":"save_variables(variable_list, name_list, path='./')","text":"

Parameters:

Name Type Description Default variable_list list

variables to save

required name_list list

names of the save files of the variables

required path str or list

save path for all variables, or save path for each variable of the variable_list. Default: \"./\"

'./'

Examples:

import numpy as np\nfrom CompNeuroPy import save_variables, load_variables\n\n### create variables\nvar1 = np.random.rand(10)\nvar2 = np.random.rand(10)\n\n### save variables\nsave_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n### load variables\nloaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n### use loaded variables\nprint(loaded_variables[\"var1_file\"])\nprint(loaded_variables[\"var2_file\"])\n
Source code in src/CompNeuroPy/system_functions.py
def save_variables(variable_list: list, name_list: list, path: str | list = \"./\"):\n    \"\"\"\n    Args:\n        variable_list (list):\n            variables to save\n        name_list (list):\n            names of the save files of the variables\n        path (str or list):\n            save path for all variables, or save path for each variable of the\n            variable_list. Default: \"./\"\n\n    Examples:\n        ```python\n        import numpy as np\n        from CompNeuroPy import save_variables, load_variables\n\n        ### create variables\n        var1 = np.random.rand(10)\n        var2 = np.random.rand(10)\n\n        ### save variables\n        save_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n        ### load variables\n        loaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n        ### use loaded variables\n        print(loaded_variables[\"var1_file\"])\n        print(loaded_variables[\"var2_file\"])\n        ```\n    \"\"\"\n    for idx in range(len(variable_list)):\n        ### set save path\n        if isinstance(path, str):\n            save_path = path\n        else:\n            save_path = path[idx]\n        if save_path.endswith(\"/\"):\n            save_path = save_path[:-1]\n        ### set file name\n        file_name = f\"{name_list[idx]}.pkl\"\n        ### set variable\n        variable = variable_list[idx]\n        ### generate save folder\n        create_dir(save_path)\n        ### Saving a variable to a file\n        with open(f\"{save_path}/{file_name}\", \"wb\") as file:\n            pickle.dump(variable, file)\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.load_variables","title":"load_variables(name_list, path='./')","text":"

Parameters:

Name Type Description Default name_list list

names of the save files of the variables

required path str or list

save path for all variables, or save path for each variable of the variable_list. Default: \"./\"

'./'

Returns:

Name Type Description variable_dict dict

dictionary with the loaded variables, keys are the names of the files, values are the loaded variables

Examples:

import numpy as np\nfrom CompNeuroPy import save_variables, load_variables\n\n### create variables\nvar1 = np.random.rand(10)\nvar2 = np.random.rand(10)\n\n### save variables\nsave_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n### load variables\nloaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n### use loaded variables\nprint(loaded_variables[\"var1_file\"])\nprint(loaded_variables[\"var2_file\"])\n
Source code in src/CompNeuroPy/system_functions.py
def load_variables(name_list: list, path: str | list = \"./\"):\n    \"\"\"\n    Args:\n        name_list (list):\n            names of the save files of the variables\n        path (str or list, optional):\n            save path for all variables, or save path for each variable of the\n            variable_list. Default: \"./\"\n\n    Returns:\n        variable_dict (dict):\n            dictionary with the loaded variables, keys are the names of the\n            files, values are the loaded variables\n\n    Examples:\n        ```python\n        import numpy as np\n        from CompNeuroPy import save_variables, load_variables\n\n        ### create variables\n        var1 = np.random.rand(10)\n        var2 = np.random.rand(10)\n\n        ### save variables\n        save_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n        ### load variables\n        loaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n        ### use loaded variables\n        print(loaded_variables[\"var1_file\"])\n        print(loaded_variables[\"var2_file\"])\n        ```\n    \"\"\"\n    variable_dict = {}\n    for idx in range(len(name_list)):\n        ### set save path\n        if isinstance(path, str):\n            save_path = path\n        else:\n            save_path = path[idx]\n        if save_path.endswith(\"/\"):\n            save_path = save_path[:-1]\n        ### set file name\n        file_name = f\"{name_list[idx]}.pkl\"\n        ### Loading the variable from the file\n        with open(f\"{save_path}/{file_name}\", \"rb\") as file:\n            loaded_variable = pickle.load(file)\n        ### store variable in variable_dict\n        variable_dict[name_list[idx]] = loaded_variable\n\n    return variable_dict\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.timing_decorator","title":"timing_decorator(threshold=0.1)","text":"

Decorator to measure the execution time of a function.

Parameters:

Name Type Description Default threshold float

Threshold in seconds. If the execution time of the function is larger than this threshold, the execution time is printed. Default: 0.1.

0.1 Source code in src/CompNeuroPy/system_functions.py
def timing_decorator(threshold=0.1):\n    \"\"\"\n    Decorator to measure the execution time of a function.\n\n    Args:\n        threshold (float, optional):\n            Threshold in seconds. If the execution time of the function is\n            larger than this threshold, the execution time is printed. Default: 0.1.\n    \"\"\"\n\n    def decorator(func):\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            start_time = time()\n            result = func(*args, **kwargs)\n            end_time = time()\n            execution_time = end_time - start_time\n            if execution_time >= threshold:\n                print(f\"{func.__name__} took {execution_time:.4f} seconds\")\n            return result\n\n        return wrapper\n\n    return decorator\n
"},{"location":"built_in/models/","title":"Full Models","text":""},{"location":"built_in/models/#CompNeuroPy.full_models.BGM","title":"CompNeuroPy.full_models.BGM","text":"

Bases: CompNeuroModel

The basal ganglia model based on the model from Goenner et al. (2021).

Attributes:

Name Type Description name str

name of the model

description str

description of the model

model_creation_function function

function which creates the model

compile_folder_name str

name of the folder in which the model is compiled

model_kwargs dict

keyword arguments for model_creation_function

populations list

list of names of all populations of the model

projections list

list of names of all projections of the model

created bool

True if the model is created

compiled bool

True if the model is compiled

attribute_df pandas dataframe

dataframe containing all attributes of the model compartments

params dict

dictionary containing all parameters of the model

name_appendix str

string which is appended to all model compartments and parameters

Source code in src/CompNeuroPy/full_models/bgm_22/bgm.py
class BGM(CompNeuroModel):\n    \"\"\"\n    The basal ganglia model based on the model from [Goenner et al. (2021)](https://doi.org/10.1111/ejn.15082).\n\n    Attributes:\n        name (str):\n            name of the model\n        description (str):\n            description of the model\n        model_creation_function (function):\n            function which creates the model\n        compile_folder_name (str):\n            name of the folder in which the model is compiled\n        model_kwargs (dict):\n            keyword arguments for model_creation_function\n        populations (list):\n            list of names of all populations of the model\n        projections (list):\n            list of names of all projections of the model\n        created (bool):\n            True if the model is created\n        compiled (bool):\n            True if the model is compiled\n        attribute_df (pandas dataframe):\n            dataframe containing all attributes of the model compartments\n        params (dict):\n            dictionary containing all parameters of the model\n        name_appendix (str):\n            string which is appended to all model compartments and parameters\n    \"\"\"\n\n    @check_types()\n    def __init__(\n        self,\n        name: str = \"BGM_v01_p01\",\n        do_create: bool = True,\n        do_compile: bool = True,\n        compile_folder_name: str | None = None,\n        seed: int | None = None,\n        name_appendix: str = \"\",\n    ):\n        \"\"\"\n        Args:\n            name (str, optional):\n                name of the model, syntax: \"BGM_v<model_version>_p<parameters_version>\"\n                replace <model_version> and <parameters_version> with the versions you\n                want to use, see CompNeuroPy.full_models.BGM_22.parameters for available\n                versions. Default: \"BGM_v01_p01\"\n            do_create (bool, optional):\n                if True, the model is created after initialization. Default: True\n            do_compile (bool, optional):\n                if True, the model is compiled after creation. Default: True\n            compile_folder_name (str, optional):\n                name of the folder in which the compiled model is saved. Default: None,\n                i.e. \"annarchy_BGM_v<model_version>\" is used\n            seed (int, optional):\n                the seed for the random number generator used during model creation.\n                Default: None, i.e. random seed is used\n            name_appendix (str, optional):\n                string which is appended to all model compartments and parameters.\n                Allows to create multiple models with the same name and keep names of\n                compartments and parameters unique. Default: \"\"\n        \"\"\"\n        ### check if name is correct, otherwise raise ValueError\n        if not (\n            len(name.split(\"_\")) == 3\n            and name.split(\"_\")[0] == \"BGM\"\n            and name.split(\"_\")[1][0] == \"v\"\n            and name.split(\"_\")[2][0] == \"p\"\n        ):\n            raise ValueError(\n                \"name has to be of the form 'BGM_v<model_version>_p<parameters_version>'\"\n            )\n\n        ### set attributes (except the ones which are set in the super().__init__())\n        self.name_appendix = name_appendix\n        self.seed = seed\n        if len(self.name_appendix) > 0:\n            self._name_appendix_to_add = \":\" + name_appendix\n        else:\n            self._name_appendix_to_add = \"\"\n\n        ### set model_version_name\n        self._model_version_name = \"_\".join(name.split(\"_\")[:2])\n\n        ### update name with name_appendix\n        name = name + self._name_appendix_to_add\n\n        ### init default compile_folder_name\n        if compile_folder_name == None:\n            compile_folder_name = \"annarchy_\" + self._model_version_name\n\n        ### set description\n        description = (\n            \"The basal ganglia model based on the model from Goenner et al. (2021)\"\n        )\n\n        ### init random number generator\n        self._rng = np.random.default_rng(seed)\n\n        ### get model parameters before init, ignore name_appendix\n        self.params = self._get_params(name.split(\":\")[0])\n\n        ### init\n        super().__init__(\n            model_creation_function=self._model_creation_function,\n            name=name,\n            description=description,\n            do_create=do_create,\n            do_compile=do_compile,\n            compile_folder_name=compile_folder_name,\n        )\n\n    def _add_name_appendix(self):\n        \"\"\"\n        Rename all model compartments, keys (except general) in params dict and\n        names in attribute_df by appending the name_appendix to the original name.\n        \"\"\"\n\n        ### update the attribute_df of the model object (it still contains the original\n        ### names of the model creation)\n        self.attribute_df[\"compartment_name\"] = (\n            self.attribute_df[\"compartment_name\"] + self._name_appendix_to_add\n        )\n        ### rename populations and projections\n        populations_new = []\n        for pop_name in self.populations:\n            populations_new.append(pop_name + self._name_appendix_to_add)\n            get_population(pop_name).name = pop_name + self._name_appendix_to_add\n        self.populations = populations_new\n        projections_new = []\n        for proj_name in self.projections:\n            projections_new.append(proj_name + self._name_appendix_to_add)\n            get_projection(proj_name).name = proj_name + self._name_appendix_to_add\n        self.projections = projections_new\n        ### rename parameter keys except general\n        params_new = {}\n        for key, param_val in self.params.items():\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            if param_object == \"general\":\n                params_new[key] = param_val\n                continue\n\n            param_object = param_object + self._name_appendix_to_add\n            key_new = param_object + \".\" + param_name\n            params_new[key_new] = param_val\n        self.params = params_new\n\n    def _model_creation_function(self):\n        \"\"\"\n        Creates the model using the model_creation_function from the\n        model_creation_functions.py file. The function is defined by the\n        model_version_name.\n        \"\"\"\n        model_creation_function = eval(\n            \"importlib.import_module('CompNeuroPy.full_models.bgm_22.model_creation_functions').\"\n            + self._model_version_name\n        )\n        model_creation_function(self)\n\n    def create(self, do_compile=True, compile_folder_name=None):\n        \"\"\"\n        Creates the model and optionally compiles it directly.\n\n        Args:\n            do_compile (bool, optional):\n                If True the model is compiled directly. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder in which the model is compiled. Default: value from\n                initialization.\n        \"\"\"\n        ### create the model, but do not compile to set parameters before compilation\n        super().create(do_compile=False, compile_folder_name=compile_folder_name)\n\n        ### update names of compartments and parameters\n        self._add_name_appendix()\n\n        ### set parameters and connectivity of projections\n        ### for each projection the connectivity has to be defined in the params\n        self._set_params()\n        self._set_noise_values()\n        self._set_connections()\n\n        ### compile the model, after setting all parameters (included in compile state)\n        if do_compile:\n            self.compile(compile_folder_name)\n\n    def _set_params(self):\n        \"\"\"\n        sets params of all populations\n        \"\"\"\n\n        ### loop over all params\n        for key, param_val in self.params.items():\n            ### split key in param object and param name\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            ### if param is a noise param --> skip (separate function)\n            if param_name.split(\"_\")[-1] == \"noise\":\n                continue\n\n            ### if param name ends with init --> actual param_name (in pop) is without init\n            if param_name.split(\"_\")[-1] == \"init\":\n                param_name = \"_\".join(param_name.split(\"_\")[:-1])\n\n            ### if param_object is a pop in network\n            if param_object in self.populations:\n                ### and the param_name is an attribute of the pop --> set param of pop\n                if param_name in vars(get_population(param_object))[\"attributes\"]:\n                    ### if parameter values are given as distribution --> get numpy array\n                    if isinstance(param_val, str):\n                        if (\n                            \"Uniform\" in param_val\n                            or \"DiscreteUniform\" in param_val\n                            or \"Normal\" in param_val\n                            or \"LogNormal\" in param_val\n                            or \"Exponential\" in param_val\n                            or \"Gamma\" in param_val\n                        ):\n                            distribution = eval(param_val)\n                            param_val = distribution.get_values(\n                                shape=get_population(param_object).geometry\n                            )\n                    self.set_param(\n                        compartment=param_object,\n                        parameter_name=param_name,\n                        parameter_value=param_val,\n                    )\n                    ### if parameter base_mean --> also set I_base\n                    if param_name == \"base_mean\":\n                        self.set_param(\n                            compartment=param_object,\n                            parameter_name=\"I_base\",\n                            parameter_value=param_val,\n                        )\n\n    def _set_noise_values(self):\n        \"\"\"\n        sets noise params of all populations\n        \"\"\"\n\n        ### loop over all params\n        for key, param_val in self.params.items():\n            ### split key in param object and param name\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            ### if param_object is a pop in network and param_name ends with noise --> set noise param of pop\n            if (\n                param_object in self.populations\n                and param_name.split(\"_\")[-1] == \"noise\"\n            ):\n                if param_name == \"mean_rate_noise\":\n                    ### for mean and sd the actual parameter of the pop has to be calculated\n                    mean = param_val\n                    try:\n                        ### noise values defined by mean and sd\n                        sd = self.params[param_object + \".rate_sd_noise\"]\n                    except:\n                        ### if only mean is available, only set mean\n                        sd = 0\n                    if sd != 0:\n                        self.set_param(\n                            compartment=param_object,\n                            parameter_name=\"rates_noise\",\n                            parameter_value=self._rng.normal(\n                                mean, sd, get_population(param_object).size\n                            ),\n                        )\n                    else:\n                        self.set_param(\n                            compartment=param_object,\n                            parameter_name=\"rates_noise\",\n                            parameter_value=mean,\n                        )\n                elif param_name in vars(get_population(param_object))[\"attributes\"]:\n                    ### noise parameters which are actual attributes of the pop are simply set\n                    self.set_param(\n                        compartment=param_object,\n                        parameter_name=param_name,\n                        parameter_value=param_val,\n                    )\n                else:\n                    continue\n\n    def _set_connections(self):\n        \"\"\"\n        sets the connectivity and parameters of all projections\n        \"\"\"\n\n        ### dict for each projection, which params were already set during connectivity definition\n        already_set_params = {}\n\n        ### set connectivity\n        ### loop over all projections\n        set_con_failed = False\n        error_message_list = []\n        for proj_name in self.projections:\n            ### get the type of connectivity for projection\n            try:\n                connectivity = self.params[proj_name + \".connectivity\"]\n            except:\n                print(\n                    \"\\nERROR: missing connectivity parameter for\",\n                    proj_name,\n                    \"\\n\",\n                    proj_name + \".connectivity\",\n                    \"needed!\\n\",\n                    \"parameters id:\",\n                    self.params[\"general.id\"],\n                    \"\\n\",\n                )\n                quit()\n\n            possible_con_list = [\n                \"connect_fixed_number_pre\",\n                \"connect_all_to_all\",\n                \"connect_one_to_one\",\n                \"connect_fixed_probability\",\n            ]\n            if connectivity in possible_con_list:\n                try:\n                    # get all possible parameters of the connectivity function\n                    con_func = eval(f\"get_projection(proj_name).{connectivity}\")\n                    possible_con_params_list = list(\n                        inspect.signature(con_func).parameters.keys()\n                    )\n                    # check if paramters are given in the params dict and create the kwargs for the connectivity function\n                    con_kwargs = {}\n                    for con_param_key in possible_con_params_list:\n                        if proj_name + \".\" + con_param_key in self.params:\n                            con_kwargs[con_param_key] = eval(\n                                str(self.params[proj_name + \".\" + con_param_key])\n                            )\n                    # call the connectivity function with the obtained kwargs\n                    con_func(**con_kwargs)\n                    # store which parameters have been set\n                    already_set_params[proj_name] = list(con_kwargs.keys())\n                except:\n                    exc_type, exc_value, exc_traceback = sys.exc_info()\n                    error_message = traceback.format_exception_only(exc_type, exc_value)\n                    error_message_list.append([f\"ERROR: {proj_name}\"] + error_message)\n                    set_con_failed = True\n            else:\n                print(\n                    \"\\nERROR: wrong connectivity parameter for\",\n                    proj_name + \".connectivity!\\n\",\n                    \"parameters id:\",\n                    self.params[\"general.id\"],\n                    \"possible:\",\n                    possible_con_list,\n                    \"\\n\",\n                )\n                quit()\n        if set_con_failed:\n            print(\"\\n\")\n            for error_message in error_message_list:\n                print(\" \".join(error_message))\n            raise TypeError(\"Setting connectivities failed\")\n\n        ### set parameters\n        ### loop over all params\n        for key, param_val in self.params.items():\n            ### split key in param object and param name\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            if param_object == \"general\":\n                continue\n\n            ### if param_object is proj in network and param not already used and param is an attribute of proj --> set param of proj\n            if (\n                param_object in self.projections\n                and not (param_name in already_set_params[param_object])\n                and param_name in vars(get_projection(param_object))[\"attributes\"]\n            ):\n                self.set_param(\n                    compartment=param_object,\n                    parameter_name=param_name,\n                    parameter_value=param_val,\n                )\n\n    def _get_params(self, name):\n        \"\"\"\n        read all parameters for specified model name\n\n        Args:\n            name (str):\n                name of the model, specifies which column in the csv file is used\n        \"\"\"\n\n        csvPath = os.path.dirname(os.path.realpath(__file__)) + \"/parameters.csv\"\n        csvfile = open(csvPath, newline=\"\")\n\n        params = {}\n        reader = csv.reader(csvfile, delimiter=\",\")\n        fileRows = []\n        idx = -1\n        ### check if name is in the .csv file\n        for row in reader:\n            if row[0] == \"\":\n                continue\n            fileRows.append(row)\n            if \"general.id\" == row[0] and True in [\n                name == row[i] for i in range(1, len(row))\n            ]:\n                idx = [name == row[i] for i in range(1, len(row))].index(True) + 1\n            elif \"general.id\" == row[0]:\n                print(\n                    \"No Parameters available for given model name \"\n                    + name\n                    + \"! (file \"\n                    + csvPath\n                    + \")\"\n                )\n                quit()\n        if idx == -1:\n            print(\"No general.id in parameter csv file!\")\n            quit()\n        ### read the column corresponding to name\n        for row in fileRows:\n            if \"###\" in row[0]:\n                continue\n            if row[idx] == \"\":\n                continue\n\n            value = row[idx]\n            try:\n                ### if float(value) works value is a number --> check if it is int\n                if float(value) - int(float(value)) == 0:\n                    params[row[0]] = int(float(value))\n                else:\n                    params[row[0]] = float(value)\n            except:\n                ### value is a string\n                if value[0] == \"$\" and value[-1] == \"$\":\n                    ### value is a formula\n                    params[row[0]] = float(eval(value[1:-1]))\n                else:\n                    ### value is some other string\n                    params[row[0]] = value\n        csvfile.close()\n\n        return params\n\n    def _needed_imports(self):\n        for import_val in [\n            Uniform,\n            DiscreteUniform,\n            Normal,\n            LogNormal,\n            Exponential,\n            Gamma,\n            importlib,\n        ]:\n            print(import_val)\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.BGM.__init__","title":"__init__(name='BGM_v01_p01', do_create=True, do_compile=True, compile_folder_name=None, seed=None, name_appendix='')","text":"

Parameters:

Name Type Description Default name str

name of the model, syntax: \"BGM_v_p\" replace and with the versions you want to use, see CompNeuroPy.full_models.BGM_22.parameters for available versions. Default: \"BGM_v01_p01\" 'BGM_v01_p01' do_create bool

if True, the model is created after initialization. Default: True

True do_compile bool

if True, the model is compiled after creation. Default: True

True compile_folder_name str

name of the folder in which the compiled model is saved. Default: None, i.e. \"annarchy_BGM_v\" is used None seed int

the seed for the random number generator used during model creation. Default: None, i.e. random seed is used

None name_appendix str

string which is appended to all model compartments and parameters. Allows to create multiple models with the same name and keep names of compartments and parameters unique. Default: \"\"

'' Source code in src/CompNeuroPy/full_models/bgm_22/bgm.py
@check_types()\ndef __init__(\n    self,\n    name: str = \"BGM_v01_p01\",\n    do_create: bool = True,\n    do_compile: bool = True,\n    compile_folder_name: str | None = None,\n    seed: int | None = None,\n    name_appendix: str = \"\",\n):\n    \"\"\"\n    Args:\n        name (str, optional):\n            name of the model, syntax: \"BGM_v<model_version>_p<parameters_version>\"\n            replace <model_version> and <parameters_version> with the versions you\n            want to use, see CompNeuroPy.full_models.BGM_22.parameters for available\n            versions. Default: \"BGM_v01_p01\"\n        do_create (bool, optional):\n            if True, the model is created after initialization. Default: True\n        do_compile (bool, optional):\n            if True, the model is compiled after creation. Default: True\n        compile_folder_name (str, optional):\n            name of the folder in which the compiled model is saved. Default: None,\n            i.e. \"annarchy_BGM_v<model_version>\" is used\n        seed (int, optional):\n            the seed for the random number generator used during model creation.\n            Default: None, i.e. random seed is used\n        name_appendix (str, optional):\n            string which is appended to all model compartments and parameters.\n            Allows to create multiple models with the same name and keep names of\n            compartments and parameters unique. Default: \"\"\n    \"\"\"\n    ### check if name is correct, otherwise raise ValueError\n    if not (\n        len(name.split(\"_\")) == 3\n        and name.split(\"_\")[0] == \"BGM\"\n        and name.split(\"_\")[1][0] == \"v\"\n        and name.split(\"_\")[2][0] == \"p\"\n    ):\n        raise ValueError(\n            \"name has to be of the form 'BGM_v<model_version>_p<parameters_version>'\"\n        )\n\n    ### set attributes (except the ones which are set in the super().__init__())\n    self.name_appendix = name_appendix\n    self.seed = seed\n    if len(self.name_appendix) > 0:\n        self._name_appendix_to_add = \":\" + name_appendix\n    else:\n        self._name_appendix_to_add = \"\"\n\n    ### set model_version_name\n    self._model_version_name = \"_\".join(name.split(\"_\")[:2])\n\n    ### update name with name_appendix\n    name = name + self._name_appendix_to_add\n\n    ### init default compile_folder_name\n    if compile_folder_name == None:\n        compile_folder_name = \"annarchy_\" + self._model_version_name\n\n    ### set description\n    description = (\n        \"The basal ganglia model based on the model from Goenner et al. (2021)\"\n    )\n\n    ### init random number generator\n    self._rng = np.random.default_rng(seed)\n\n    ### get model parameters before init, ignore name_appendix\n    self.params = self._get_params(name.split(\":\")[0])\n\n    ### init\n    super().__init__(\n        model_creation_function=self._model_creation_function,\n        name=name,\n        description=description,\n        do_create=do_create,\n        do_compile=do_compile,\n        compile_folder_name=compile_folder_name,\n    )\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.BGM.create","title":"create(do_compile=True, compile_folder_name=None)","text":"

Creates the model and optionally compiles it directly.

Parameters:

Name Type Description Default do_compile bool

If True the model is compiled directly. Default: True.

True compile_folder_name str

Name of the folder in which the model is compiled. Default: value from initialization.

None Source code in src/CompNeuroPy/full_models/bgm_22/bgm.py
def create(self, do_compile=True, compile_folder_name=None):\n    \"\"\"\n    Creates the model and optionally compiles it directly.\n\n    Args:\n        do_compile (bool, optional):\n            If True the model is compiled directly. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder in which the model is compiled. Default: value from\n            initialization.\n    \"\"\"\n    ### create the model, but do not compile to set parameters before compilation\n    super().create(do_compile=False, compile_folder_name=compile_folder_name)\n\n    ### update names of compartments and parameters\n    self._add_name_appendix()\n\n    ### set parameters and connectivity of projections\n    ### for each projection the connectivity has to be defined in the params\n    self._set_params()\n    self._set_noise_values()\n    self._set_connections()\n\n    ### compile the model, after setting all parameters (included in compile state)\n    if do_compile:\n        self.compile(compile_folder_name)\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelBischop","title":"CompNeuroPy.full_models.HHmodelBischop","text":"

Bases: CompNeuroModel

Generates a single population of the Hodgkin & Huxley neuron model of Bischop et al. (2012) and optionally creates/compiles the network.

Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
class HHmodelBischop(CompNeuroModel):\n    \"\"\"\n    Generates a single population of the Hodgkin & Huxley neuron model of\n    [Bischop et al. (2012)](https://doi.org/10.3389/fnmol.2012.00078) and optionally\n    creates/compiles the network.\n    \"\"\"\n\n    def __init__(\n        self,\n        pop_size=1,\n        conductance_based_synapses=False,\n        name=\"single_HH_Bischop\",\n        do_create=True,\n        do_compile=True,\n        compile_folder_name=\"annarchy_single_HH_Bischop\",\n    ):\n        \"\"\"\n        Args:\n            pop_size (int, optional):\n                Number of neurons in the population. Default: 1.\n            conductance_based_synapses (bool, optional):\n                Whether the equations contain conductance based synapses for AMPA and\n                GABA. Default: False.\n            name (str, optional):\n                Name of the model. Default: \"single_HH_Bischop\".\n            do_create (bool, optional):\n                Whether to create the model. Default: True.\n            do_compile (bool, optional):\n                Whether to compile the model. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder for the compiled model.\n                Default: \"annarchy_single_HH_Bischop\".\n        \"\"\"\n        ### set attributes\n        self.pop_size = pop_size\n        self.conductance_based_synapses = conductance_based_synapses\n        # define description\n        description = \"\"\"\n            One population \"HH_Bischop\" with a single neuron of the Hodgkin\n            & Huxley neuron model of Bischop et al. (2012).\n        \"\"\"\n        # initialize CompNeuroModel\n        super().__init__(\n            model_creation_function=self._bischop_2012_creation_function,\n            name=name,\n            description=description,\n            do_create=do_create,\n            do_compile=do_compile,\n            compile_folder_name=compile_folder_name,\n        )\n\n    def _bischop_2012_creation_function(self):\n        if self.conductance_based_synapses:\n            Population(self.pop_size, neuron=HHneuronBischopSyn, name=\"HH_Bischop_syn\")\n        else:\n            Population(self.pop_size, neuron=HHneuronBischop, name=\"HH_Bischop\")\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelBischop.__init__","title":"__init__(pop_size=1, conductance_based_synapses=False, name='single_HH_Bischop', do_create=True, do_compile=True, compile_folder_name='annarchy_single_HH_Bischop')","text":"

Parameters:

Name Type Description Default pop_size int

Number of neurons in the population. Default: 1.

1 conductance_based_synapses bool

Whether the equations contain conductance based synapses for AMPA and GABA. Default: False.

False name str

Name of the model. Default: \"single_HH_Bischop\".

'single_HH_Bischop' do_create bool

Whether to create the model. Default: True.

True do_compile bool

Whether to compile the model. Default: True.

True compile_folder_name str

Name of the folder for the compiled model. Default: \"annarchy_single_HH_Bischop\".

'annarchy_single_HH_Bischop' Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
def __init__(\n    self,\n    pop_size=1,\n    conductance_based_synapses=False,\n    name=\"single_HH_Bischop\",\n    do_create=True,\n    do_compile=True,\n    compile_folder_name=\"annarchy_single_HH_Bischop\",\n):\n    \"\"\"\n    Args:\n        pop_size (int, optional):\n            Number of neurons in the population. Default: 1.\n        conductance_based_synapses (bool, optional):\n            Whether the equations contain conductance based synapses for AMPA and\n            GABA. Default: False.\n        name (str, optional):\n            Name of the model. Default: \"single_HH_Bischop\".\n        do_create (bool, optional):\n            Whether to create the model. Default: True.\n        do_compile (bool, optional):\n            Whether to compile the model. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder for the compiled model.\n            Default: \"annarchy_single_HH_Bischop\".\n    \"\"\"\n    ### set attributes\n    self.pop_size = pop_size\n    self.conductance_based_synapses = conductance_based_synapses\n    # define description\n    description = \"\"\"\n        One population \"HH_Bischop\" with a single neuron of the Hodgkin\n        & Huxley neuron model of Bischop et al. (2012).\n    \"\"\"\n    # initialize CompNeuroModel\n    super().__init__(\n        model_creation_function=self._bischop_2012_creation_function,\n        name=name,\n        description=description,\n        do_create=do_create,\n        do_compile=do_compile,\n        compile_folder_name=compile_folder_name,\n    )\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelCorbit","title":"CompNeuroPy.full_models.HHmodelCorbit","text":"

Bases: CompNeuroModel

Generates a single population of the Hodgkin & Huxley neuron model of Corbit et al. (2016) and optionally creates/compiles the network.

Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
class HHmodelCorbit(CompNeuroModel):\n    \"\"\"\n    Generates a single population of the Hodgkin & Huxley neuron model of\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) and\n    optionally creates/compiles the network.\n    \"\"\"\n\n    def __init__(\n        self,\n        pop_size=1,\n        conductance_based_synapses=False,\n        name=\"single_HH_Corbit\",\n        do_create=True,\n        do_compile=True,\n        compile_folder_name=\"annarchy_single_HH_Corbit\",\n    ):\n        \"\"\"\n        Args:\n            pop_size (int, optional):\n                Number of neurons in the population. Default: 1.\n            conductance_based_synapses (bool, optional):\n                Whether the equations contain conductance based synapses for AMPA and\n                GABA. Default: False.\n            name (str, optional):\n                Name of the model. Default: \"single_HH_Corbit\".\n            do_create (bool, optional):\n                Whether to create the model. Default: True.\n            do_compile (bool, optional):\n                Whether to compile the model. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder for the compiled model.\n                Default: \"annarchy_single_HH_Corbit\".\n        \"\"\"\n        ### set attributes\n        self.pop_size = pop_size\n        self.conductance_based_synapses = conductance_based_synapses\n        # define description\n        description = \"\"\"\n            One population \"HH_Bischop\" with a single neuron of the Hodgkin\n            & Huxley neuron model of Bischop et al. (2012).\n        \"\"\"\n        # initialize CompNeuroModel\n        super().__init__(\n            model_creation_function=self._model_creation_function,\n            name=name,\n            description=description,\n            do_create=do_create,\n            do_compile=do_compile,\n            compile_folder_name=compile_folder_name,\n        )\n\n    def _model_creation_function(self):\n        if self.conductance_based_synapses:\n            Population(self.pop_size, neuron=HHneuronCorbitSyn, name=\"HH_Corbit_syn\")\n        else:\n            Population(self.pop_size, neuron=HHneuronCorbit, name=\"HH_Corbit\")\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelCorbit.__init__","title":"__init__(pop_size=1, conductance_based_synapses=False, name='single_HH_Corbit', do_create=True, do_compile=True, compile_folder_name='annarchy_single_HH_Corbit')","text":"

Parameters:

Name Type Description Default pop_size int

Number of neurons in the population. Default: 1.

1 conductance_based_synapses bool

Whether the equations contain conductance based synapses for AMPA and GABA. Default: False.

False name str

Name of the model. Default: \"single_HH_Corbit\".

'single_HH_Corbit' do_create bool

Whether to create the model. Default: True.

True do_compile bool

Whether to compile the model. Default: True.

True compile_folder_name str

Name of the folder for the compiled model. Default: \"annarchy_single_HH_Corbit\".

'annarchy_single_HH_Corbit' Source code in src/CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
def __init__(\n    self,\n    pop_size=1,\n    conductance_based_synapses=False,\n    name=\"single_HH_Corbit\",\n    do_create=True,\n    do_compile=True,\n    compile_folder_name=\"annarchy_single_HH_Corbit\",\n):\n    \"\"\"\n    Args:\n        pop_size (int, optional):\n            Number of neurons in the population. Default: 1.\n        conductance_based_synapses (bool, optional):\n            Whether the equations contain conductance based synapses for AMPA and\n            GABA. Default: False.\n        name (str, optional):\n            Name of the model. Default: \"single_HH_Corbit\".\n        do_create (bool, optional):\n            Whether to create the model. Default: True.\n        do_compile (bool, optional):\n            Whether to compile the model. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder for the compiled model.\n            Default: \"annarchy_single_HH_Corbit\".\n    \"\"\"\n    ### set attributes\n    self.pop_size = pop_size\n    self.conductance_based_synapses = conductance_based_synapses\n    # define description\n    description = \"\"\"\n        One population \"HH_Bischop\" with a single neuron of the Hodgkin\n        & Huxley neuron model of Bischop et al. (2012).\n    \"\"\"\n    # initialize CompNeuroModel\n    super().__init__(\n        model_creation_function=self._model_creation_function,\n        name=name,\n        description=description,\n        do_create=do_create,\n        do_compile=do_compile,\n        compile_folder_name=compile_folder_name,\n    )\n
"},{"location":"built_in/neuron_models/","title":"Neuron Models","text":""},{"location":"built_in/neuron_models/#artificial-neurons","title":"Artificial Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.IntegratorNeuron","title":"IntegratorNeuron","text":"

Bases: Neuron

TEMPLATE

Integrator Neuron for stop_condition in spiking models.

The variable g_ampa increases for incoming spikes (target ampa) and decreases exponentially with time constant tau. If g_ampa reaches a threshold, the neuron's variable decision, which is by default -1, changes to the neuron_id. This can be used to cause the stop_condition of ANNarchy's simulate_until() function (stop_codnition=\"decision>=0 : any\"). In case of multiple integrator neurons, the neuron_id can be used to identify the neuron that reached the threshold.

Warning

You have to define the variable neuron_id for each neuron in the Integrator population.

Parameters:

Name Type Description Default tau float

Time constant in ms of the neuron. Default: 1.

1 threshold float

Threshold for the decision g_ampa has to reach. Default: 1.

1

Examples:

from ANNarchy import Population, simulate_until\nfrom CompNeuroPy.neuron_models import Integrator\n\n# Create a population of 10 integrator neurons\nintegrator_neurons = Population(\n    geometry=10,\n    neuron=IntegratorNeuron(tau=1, threshold=1),\n    stop_condition=\"decision>=0 : any\",\n    name=\"integrator_neurons\",)\n\n# set the neuron_id for each neuron\nintegrator_neurons.neuron_id = range(10)\n\n# simulate until one neuron reaches the threshold\nsimulate_until(max_duration=1000, population=integrator_neurons)\n\n# check if simulation stop due to stop_codnition and which neuron reached the\n# threshold\nif (integrator_neurons.decision >= 0).any():\n    neurons_reached_thresh = integrator_neurons.neuron_id[\n        integrator_neurons.decision >= 0\n    ]\n    print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\nelse:\n    print(\"No neuron reached threshold.\")\n
Variables to record
  • g_ampa
  • decision
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py
class IntegratorNeuron(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Integrator Neuron for stop_condition in spiking models.\n\n    The variable g_ampa increases for incoming spikes (target ampa) and decreases\n    exponentially with time constant tau. If g_ampa reaches a threshold, the neuron's\n    variable decision, which is by default -1, changes to the neuron_id. This can be\n    used to cause the stop_condition of ANNarchy's simulate_until() function\n    (stop_codnition=\"decision>=0 : any\"). In case of multiple integrator neurons,\n    the neuron_id can be used to identify the neuron that reached the threshold.\n\n    !!! warning\n        You have to define the variable neuron_id for each neuron in the Integrator\n        population.\n\n    Parameters:\n        tau (float, optional):\n            Time constant in ms of the neuron. Default: 1.\n        threshold (float, optional):\n            Threshold for the decision g_ampa has to reach. Default: 1.\n\n    Examples:\n        ```python\n        from ANNarchy import Population, simulate_until\n        from CompNeuroPy.neuron_models import Integrator\n\n        # Create a population of 10 integrator neurons\n        integrator_neurons = Population(\n            geometry=10,\n            neuron=IntegratorNeuron(tau=1, threshold=1),\n            stop_condition=\"decision>=0 : any\",\n            name=\"integrator_neurons\",)\n\n        # set the neuron_id for each neuron\n        integrator_neurons.neuron_id = range(10)\n\n        # simulate until one neuron reaches the threshold\n        simulate_until(max_duration=1000, population=integrator_neurons)\n\n        # check if simulation stop due to stop_codnition and which neuron reached the\n        # threshold\n        if (integrator_neurons.decision >= 0).any():\n            neurons_reached_thresh = integrator_neurons.neuron_id[\n                integrator_neurons.decision >= 0\n            ]\n            print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\n        else:\n            print(\"No neuron reached threshold.\")\n        ```\n\n    Variables to record:\n        - g_ampa\n        - decision\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, tau: float = 1, threshold: float = 1):\n        # Create the arguments\n        parameters = f\"\"\"\n            tau = {tau} : population\n            threshold = {threshold} : population\n            neuron_id = 0\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = - g_ampa / tau\n                ddecision/dt = 0 : init = -1\n            \"\"\",\n            spike=\"\"\"\n                g_ampa >= threshold\n            \"\"\",\n            reset=\"\"\"\n                decision = neuron_id\n            \"\"\",\n            name=\"integrator_neuron\",\n            description=\"\"\"\n                Integrator Neuron, which integrates incoming spikes with value g_ampa\n                and emits a spike when reaching a threshold. After spike decision\n                changes, which can be used as for stop condition\"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.IntegratorNeuronSimple","title":"IntegratorNeuronSimple","text":"

Bases: Neuron

TEMPLATE

Integrator Neuron for stop_condition in spiking models.

The variable g_ampa increases for incoming spikes (target ampa) and decreases exponentially with time constant tau. You can check g_ampa and use it for the stop_condition of ANNarchy's simulate_until() function (stop_codnition=\"g_ampa>=some_value : any\"). In case of multiple integrator neurons, the neuron_id can be used to identify the neuron that reached the threshold.

Warning

You have to define the variable neuron_id for each neuron in the Integrator population.

Parameters:

Name Type Description Default tau float

Time constant in ms of the neuron. Default: 1.

1

Examples:

from ANNarchy import Population, simulate_until\nfrom CompNeuroPy.neuron_models import Integrator\n\n# Create a population of 10 integrator neurons\nintegrator_neurons = Population(\n    geometry=10,\n    neuron=IntegratorNeuronSimple(tau=1),\n    stop_condition=\"g_ampa>=5 : any\",\n    name=\"integrator_neurons\",)\n\n# set the neuron_id for each neuron\nintegrator_neurons.neuron_id = range(10)\n\n# simulate until one neuron reaches the threshold\nsimulate_until(max_duration=1000, population=integrator_neurons)\n\n# check if simulation stop due to stop_codnition and which neuron reached the\n# threshold\nif (integrator_neurons.g_ampa >= 5).any():\n    neurons_reached_thresh = integrator_neurons.neuron_id[\n        integrator_neurons.g_ampa >= 5\n    ]\n    print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\nelse:\n    print(\"No neuron reached threshold.\")\n
Variables to record
  • g_ampa
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py
class IntegratorNeuronSimple(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Integrator Neuron for stop_condition in spiking models.\n\n    The variable g_ampa increases for incoming spikes (target ampa) and decreases\n    exponentially with time constant tau. You can check g_ampa and use it for the\n    stop_condition of ANNarchy's simulate_until() function\n    (stop_codnition=\"g_ampa>=some_value : any\"). In case of multiple integrator neurons,\n    the neuron_id can be used to identify the neuron that reached the threshold.\n\n    !!! warning\n        You have to define the variable neuron_id for each neuron in the Integrator\n        population.\n\n    Parameters:\n        tau (float, optional):\n            Time constant in ms of the neuron. Default: 1.\n\n    Examples:\n        ```python\n        from ANNarchy import Population, simulate_until\n        from CompNeuroPy.neuron_models import Integrator\n\n        # Create a population of 10 integrator neurons\n        integrator_neurons = Population(\n            geometry=10,\n            neuron=IntegratorNeuronSimple(tau=1),\n            stop_condition=\"g_ampa>=5 : any\",\n            name=\"integrator_neurons\",)\n\n        # set the neuron_id for each neuron\n        integrator_neurons.neuron_id = range(10)\n\n        # simulate until one neuron reaches the threshold\n        simulate_until(max_duration=1000, population=integrator_neurons)\n\n        # check if simulation stop due to stop_codnition and which neuron reached the\n        # threshold\n        if (integrator_neurons.g_ampa >= 5).any():\n            neurons_reached_thresh = integrator_neurons.neuron_id[\n                integrator_neurons.g_ampa >= 5\n            ]\n            print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\n        else:\n            print(\"No neuron reached threshold.\")\n        ```\n\n    Variables to record:\n        - g_ampa\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, tau: float = 1):\n        # Create the arguments\n        parameters = f\"\"\"\n            tau = {tau} : population\n            neuron_id = 0\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = - g_ampa / tau\n                r = 0\n            \"\"\",\n            name=\"integrator_neuron_simple\",\n            description=\"\"\"\n                Integrator Neuron, which integrates incoming spikes with value g_ampa,\n                which can be used as a stop condition\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.PoissonNeuron","title":"PoissonNeuron","text":"

Bases: Neuron

TEMPLATE

Poisson neuron whose rate can be specified and is reached instantaneous. The neuron emits spikes following a Poisson distribution, the average firing rate is given by the parameter rates.

Parameters:

Name Type Description Default rates float

The average firing rate of the neuron in Hz. Default: 0.

0 Variables to record
  • p
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py
class PoissonNeuron(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Poisson neuron whose rate can be specified and is reached instantaneous. The\n    neuron emits spikes following a Poisson distribution, the average firing rate\n    is given by the parameter rates.\n\n    Parameters:\n        rates (float, optional):\n            The average firing rate of the neuron in Hz. Default: 0.\n\n    Variables to record:\n        - p\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, rates: float = 0):\n        # Create the arguments\n        parameters = f\"\"\"\n            rates = {rates}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                p = Uniform(0.0, 1.0) * 1000.0 / dt\n            \"\"\",\n            spike=\"\"\"\n                p <= rates\n            \"\"\",\n            reset=\"\"\"\n                p = 0.0\n            \"\"\",\n            name=\"poisson_neuron\",\n            description=\"\"\"\n                Poisson neuron whose rate can be specified and is reached instantaneous.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.PoissonNeuronUpDown","title":"PoissonNeuronUpDown","text":"

Bases: Neuron

TEMPLATE

The neuron emits spikes following a Poisson distribution, the average firing rate is given by the parameter rates and is reached with time constants tau_up and tau_down.

Attributes:

Name Type Description rates float

The average firing rate of the neuron in Hz. Default: 0.

tau_up float

Time constant in ms for increasing the firing rate. Default: 1.

tau_down float

Time constant in ms for decreasing the firing rate. Default: 1.

Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py
class PoissonNeuronUpDown(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    The neuron emits spikes following a Poisson distribution, the average firing rate is\n    given by the parameter rates and is reached with time constants tau_up and tau_down.\n\n    Attributes:\n        rates (float, optional):\n            The average firing rate of the neuron in Hz. Default: 0.\n        tau_up (float, optional):\n            Time constant in ms for increasing the firing rate. Default: 1.\n        tau_down (float, optional):\n            Time constant in ms for decreasing the firing rate. Default: 1.\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, rates: float = 0, tau_up: float = 1, tau_down: float = 1):\n        # Create the arguments\n        parameters = f\"\"\"\n            rates = {rates}\n            tau_up = {tau_up}\n            tau_down = {tau_down}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                p = Uniform(0.0, 1.0) * 1000.0 / dt\n                dact/dt = if (rates - act) > 0:\n                              (rates - act) / tau_up\n                          else:\n                              (rates - act) / tau_down\n            \"\"\",\n            spike=\"\"\"\n                p <= act\n            \"\"\",\n            reset=\"\"\"\n                p = 0.0\n            \"\"\",\n            name=\"poisson_neuron_up_down\",\n            description=\"\"\"Poisson neuron whose rate can be specified and is reached\n                with time constants tau_up and tau_down.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.PoissonNeuronSin","title":"PoissonNeuronSin","text":"

Bases: Neuron

TEMPLATE

Neuron emitting spikes following a Poisson distribution, the average firing rate is given by a sinus function.

Parameters:

Name Type Description Default amplitude float

Amplitude of the sinus function. Default: 0.

0 base float

Base (offset) of the sinus function. Default: 0.

0 frequency float

Frequency of the sinus function. Default: 0.

0 phase float

Phase of the sinus function. Default: 0.

0 Variables to record
  • rates
  • p
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/artificial_nm.py
class PoissonNeuronSin(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Neuron emitting spikes following a Poisson distribution, the average firing rate\n    is given by a sinus function.\n\n    Parameters:\n        amplitude (float, optional):\n            Amplitude of the sinus function. Default: 0.\n        base (float, optional):\n            Base (offset) of the sinus function. Default: 0.\n        frequency (float, optional):\n            Frequency of the sinus function. Default: 0.\n        phase (float, optional):\n            Phase of the sinus function. Default: 0.\n\n    Variables to record:\n        - rates\n        - p\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        amplitude: float = 0,\n        base: float = 0,\n        frequency: float = 0,\n        phase: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            amplitude = {amplitude}\n            base = {base}\n            frequency = {frequency}\n            phase = {phase}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                rates = amplitude * sin((2*pi*frequency)*(t/1000-phase)) + base\n                p     = Uniform(0.0, 1.0) * 1000.0 / dt\n            \"\"\",\n            spike=\"\"\"\n                p <= rates\n            \"\"\",\n            reset=\"\"\"\n                p = 0.0\n            \"\"\",\n            name=\"poisson_neuron_sin\",\n            description=\"Poisson neuron whose rate varies with a sinus function.\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#hodgkin-huxley-neurons","title":"Hodgkin Huxley Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronBischop","title":"HHneuronBischop","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012).

Variables to record
  • prev_v
  • I_L
  • alpha_h
  • beta_h
  • h_inf
  • tau_h
  • h
  • alpha_m
  • beta_m
  • m_inf
  • m
  • I_Na
  • alpha_n1
  • beta_n1
  • n1_inf
  • tau_n1
  • n1
  • I_Kv1
  • alpha_n3
  • beta_n3
  • n3_inf
  • tau_n3
  • n3
  • I_Kv3
  • PV
  • PV_Mg
  • dPV_Ca_dt
  • PV_Ca
  • Ca
  • k_inf
  • tau_k
  • k
  • I_SK
  • a_inf
  • a
  • I_Ca
  • v
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronBischop(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Bischop et al. (2012)](https://doi.org/10.3389/fnmol.2012.00078).\n\n    Variables to record:\n        - prev_v\n        - I_L\n        - alpha_h\n        - beta_h\n        - h_inf\n        - tau_h\n        - h\n        - alpha_m\n        - beta_m\n        - m_inf\n        - m\n        - I_Na\n        - alpha_n1\n        - beta_n1\n        - n1_inf\n        - tau_n1\n        - n1\n        - I_Kv1\n        - alpha_n3\n        - beta_n3\n        - n3_inf\n        - tau_n3\n        - n3\n        - I_Kv3\n        - PV\n        - PV_Mg\n        - dPV_Ca_dt\n        - PV_Ca\n        - Ca\n        - k_inf\n        - tau_k\n        - k\n        - I_SK\n        - a_inf\n        - a\n        - I_Ca\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.bischop = _BischopStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.bischop.parameters_base\n\n    def _get_equations(self):\n        return self.bischop.equations_base + self.bischop.membrane_base\n\n    def _get_name(self):\n        return \"H_and_H_Bischop\"\n\n    def _get_description(self):\n        return (\n            \"Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012).\"\n        )\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronBischopSyn","title":"HHneuronBischopSyn","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012) with conductance-based synapses/currents for AMPA and GABA.

Variables to record
  • g_ampa
  • g_gaba
  • prev_v
  • I_L
  • alpha_h
  • beta_h
  • h_inf
  • tau_h
  • h
  • alpha_m
  • beta_m
  • m_inf
  • m
  • I_Na
  • alpha_n1
  • beta_n1
  • n1_inf
  • tau_n1
  • n1
  • I_Kv1
  • alpha_n3
  • beta_n3
  • n3_inf
  • tau_n3
  • n3
  • I_Kv3
  • PV
  • PV_Mg
  • dPV_Ca_dt
  • PV_Ca
  • Ca
  • k_inf
  • tau_k
  • k
  • I_SK
  • a_inf
  • a
  • I_Ca
  • v
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronBischopSyn(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Bischop et al. (2012)](https://doi.org/10.3389/fnmol.2012.00078) with\n    conductance-based synapses/currents for AMPA and GABA.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - prev_v\n        - I_L\n        - alpha_h\n        - beta_h\n        - h_inf\n        - tau_h\n        - h\n        - alpha_m\n        - beta_m\n        - m_inf\n        - m\n        - I_Na\n        - alpha_n1\n        - beta_n1\n        - n1_inf\n        - tau_n1\n        - n1\n        - I_Kv1\n        - alpha_n3\n        - beta_n3\n        - n3_inf\n        - tau_n3\n        - n3\n        - I_Kv3\n        - PV\n        - PV_Mg\n        - dPV_Ca_dt\n        - PV_Ca\n        - Ca\n        - k_inf\n        - tau_k\n        - k\n        - I_SK\n        - a_inf\n        - a\n        - I_Ca\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.bischop = _BischopStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.bischop.parameters_conductance\n\n    def _get_equations(self):\n        return self.bischop.equations_conductance + self.bischop.membrane_conductance\n\n    def _get_name(self):\n        return \"H_and_H_Bischop_syn\"\n\n    def _get_description(self):\n        return \"\"\"\n                Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012)\n                with conductance-based synapses/currents for AMPA and GABA.\n            \"\"\"\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronCorbit","title":"HHneuronCorbit","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016).

Variables to record
  • prev_v
  • I_L
  • m_Na
  • h_Na
  • I_Na
  • n_Kv3_inf
  • tau_n_Kv3_inf
  • n_Kv3
  • I_Kv3
  • m_Kv1
  • h_Kv1
  • I_Kv1
  • v
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronCorbit(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016).\n\n    Variables to record:\n        - prev_v\n        - I_L\n        - m_Na\n        - h_Na\n        - I_Na\n        - n_Kv3_inf\n        - tau_n_Kv3_inf\n        - n_Kv3\n        - I_Kv3\n        - m_Kv1\n        - h_Kv1\n        - I_Kv1\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.corbit = _CorbitStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.corbit.parameters_base\n\n    def _get_equations(self):\n        return self.corbit.equations_base + self.corbit.membrane_base\n\n    def _get_name(self):\n        return \"H_and_H_Corbit\"\n\n    def _get_description(self):\n        return \"Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016).\"\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronCorbitSyn","title":"HHneuronCorbitSyn","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016) with conductance-based synapses/currents for AMPA and GABA.

Variables to record
  • g_ampa
  • g_gaba
  • prev_v
  • I_L
  • m_Na
  • h_Na
  • I_Na
  • n_Kv3_inf
  • tau_n_Kv3_inf
  • n_Kv3
  • I_Kv3
  • m_Kv1
  • h_Kv1
  • I_Kv1
  • v
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronCorbitSyn(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) with\n    conductance-based synapses/currents for AMPA and GABA.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - prev_v\n        - I_L\n        - m_Na\n        - h_Na\n        - I_Na\n        - n_Kv3_inf\n        - tau_n_Kv3_inf\n        - n_Kv3\n        - I_Kv3\n        - m_Kv1\n        - h_Kv1\n        - I_Kv1\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.corbit = _CorbitStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.corbit.parameters_conductance\n\n    def _get_equations(self):\n        return self.corbit.equations_conductance + self.corbit.membrane_conductance\n\n    def _get_name(self):\n        return \"H_and_H_Corbit_syn\"\n\n    def _get_description(self):\n        return \"\"\"\n                Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016)\n                with conductance-based synapses/currents for AMPA and GABA.\n            \"\"\"\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronCorbitVoltageClamp","title":"HHneuronCorbitVoltageClamp","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016) with voltage clamp. Membrane potential v is clamped and I_inf can be recorded.

Variables to record
  • prev_v
  • I_L
  • m_Na
  • h_Na
  • I_Na
  • n_Kv3_inf
  • tau_n_Kv3_inf
  • n_Kv3
  • I_Kv3
  • m_Kv1
  • h_Kv1
  • I_Kv1
  • v
  • I_inf
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronCorbitVoltageClamp(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) with\n    voltage clamp. Membrane potential v is clamped and I_inf can be recorded.\n\n    Variables to record:\n        - prev_v\n        - I_L\n        - m_Na\n        - h_Na\n        - I_Na\n        - n_Kv3_inf\n        - tau_n_Kv3_inf\n        - n_Kv3\n        - I_Kv3\n        - m_Kv1\n        - h_Kv1\n        - I_Kv1\n        - v\n        - I_inf\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.corbit = _CorbitStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.corbit.parameters_base\n\n    def _get_equations(self):\n        return self.corbit.equations_base + self.corbit.membrane_voltage_clamp\n\n    def _get_name(self):\n        return \"H_and_H_Corbit_voltage_clamp\"\n\n    def _get_description(self):\n        return \"\"\"\n                Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016)\n                with voltage clamp.\n            \"\"\"\n
"},{"location":"built_in/neuron_models/#izhikevich-2003-like-neurons","title":"Izhikevich (2003)-like Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003FixedNoisyAmpa","title":"Izhikevich2003FixedNoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance. Fixed means, the 3 factors of the quadratic equation cannot be changed.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 Variables to record
  • g_ampa
  • g_gaba
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003FixedNoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance. Fixed means, the 3 factors of the quadratic equation cannot be changed.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                dv/dt      = 0.04 * v * v + 5 * v + 140 - u + I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_fixed_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2003) with additional\n                conductance-based synapses for AMPA and GABA currents with noise in AMPA\n                conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyAmpa","title":"Izhikevich2003NoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 Variables to record
  • g_ampa
  • g_gaba
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            n2             = {n2} : population\n            n1             = {n1} : population\n            n0             = {n0} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                dv/dt      = n2 * v * v + n1 * v + n0 - u + I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_noisy_AMPA\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents with noise in AMPA conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyAmpaNonlin","title":"Izhikevich2003NoisyAmpaNonlin","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance. With nonlinear function for external current.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 nonlin float

Exponent of the nonlinear function for the external current.

1 Variables to record
  • g_ampa
  • g_gaba
  • I
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyAmpaNonlin(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance. With nonlinear function for external current.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n        nonlin (float, optional):\n            Exponent of the nonlinear function for the external current.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n        nonlin: float = 1,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            n2             = {n2} : population\n            n1             = {n1} : population\n            n0             = {n0} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n            nonlin         = {nonlin} : population\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                I = I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                dv/dt      = n2 * v * v + n1 * v + n0 - u + f(I,nonlin)\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            functions=\"\"\"\n                f(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            name=\"Izhikevich2003_noisy_AMPA_nonlin\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents with noise in AMPA conductance.\n                With nonlinear function for external current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyAmpaOscillating","title":"Izhikevich2003NoisyAmpaOscillating","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance. With additional oscillation term.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 freq float

Frequency of the oscillation term.

0 amp float

Amplitude of the oscillation term.

6 Variables to record
  • osc
  • g_ampa
  • g_gaba
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyAmpaOscillating(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance. With additional oscillation term.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n        freq (float, optional):\n            Frequency of the oscillation term.\n        amp (float, optional):\n            Amplitude of the oscillation term.\n\n    Variables to record:\n        - osc\n        - g_ampa\n        - g_gaba\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n        freq: float = 0,\n        amp: float = 6,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            n2             = {n2} : population\n            n1             = {n1} : population\n            n0             = {n0} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n            freq           = {freq}\n            amp            = {amp}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                osc        = amp * sin(t * 2 * pi * (freq / 1000))\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                dv/dt      = n2 * v * v + n1 * v + n0 - u + I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba)) + osc\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_noisy_AMPA_oscillating\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents with noise in AMPA conductance.\n                With additional oscillation term.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyBase","title":"Izhikevich2003NoisyBase","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents and a noisy baseline current.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 base_mean float

Mean of the baseline current.

0 base_noise float

Standard deviation of the baseline current.

0 rate_base_noise float

Rate of the Poisson distributed noise in the baseline current, i.e. how often the baseline current is changed randomly.

0 Variables to record
  • g_ampa
  • g_gaba
  • offset_base
  • I_base
  • I
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyBase(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents and a noisy baseline\n    current.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current.\n        rate_base_noise (float, optional):\n            Rate of the Poisson distributed noise in the baseline current, i.e. how\n            often the baseline current is changed randomly.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - offset_base\n        - I_base\n        - I\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        base_mean: float = 0,\n        base_noise: float = 0,\n        rate_base_noise: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a               = {a} : population\n            b               = {b} : population\n            c               = {c} : population\n            d               = {d} : population\n            n2              = {n2} : population\n            n1              = {n1} : population\n            n0              = {n0} : population\n            tau_ampa        = {tau_ampa} : population\n            tau_gaba        = {tau_gaba} : population\n            E_ampa          = {E_ampa} : population\n            E_gaba          = {E_gaba} : population\n            I_app           = {I_app}\n            base_mean       = {base_mean}\n            base_noise      = {base_noise}\n            rate_base_noise = {rate_base_noise}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt  = -g_ampa/tau_ampa\n                dg_gaba/dt  = -g_gaba / tau_gaba\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0, 1) * base_noise)\n                I_base      = base_mean + offset_base\n                I           = I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba)) + I_base\n                dv/dt       = n2 * v * v + n1 * v + n0 - u + I\n                du/dt       = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_noisy_I\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents and a noisy baseline current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyBaseNonlin","title":"Izhikevich2003NoisyBaseNonlin","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents and a noisy baseline current. With nonlinear function for external current.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 base_mean float

Mean of the baseline current.

0 base_noise float

Standard deviation of the baseline current.

0 rate_base_noise float

Rate of the Poisson distributed noise in the baseline current, i.e. how often the baseline current is changed randomly.

0 nonlin float

Exponent of the nonlinear function for the external current.

1 Variables to record
  • g_ampa
  • g_gaba
  • offset_base
  • I_base
  • I
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyBaseNonlin(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents and a noisy baseline\n    current. With nonlinear function for external current.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current.\n        rate_base_noise (float, optional):\n            Rate of the Poisson distributed noise in the baseline current, i.e. how\n            often the baseline current is changed randomly.\n        nonlin (float, optional):\n            Exponent of the nonlinear function for the external current.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - offset_base\n        - I_base\n        - I\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        base_mean: float = 0,\n        base_noise: float = 0,\n        rate_base_noise: float = 0,\n        nonlin: float = 1,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a               = {a} : population\n            b               = {b} : population\n            c               = {c} : population\n            d               = {d} : population\n            n2              = {n2} : population\n            n1              = {n1} : population\n            n0              = {n0} : population\n            tau_ampa        = {tau_ampa} : population\n            tau_gaba        = {tau_gaba} : population\n            E_ampa          = {E_ampa} : population\n            E_gaba          = {E_gaba} : population\n            I_app           = {I_app}\n            base_mean       = {base_mean}\n            base_noise      = {base_noise}\n            rate_base_noise = {rate_base_noise}\n            nonlin          = {nonlin} : population\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt  = -g_ampa/tau_ampa\n                dg_gaba/dt  = -g_gaba / tau_gaba\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0, 1) * base_noise)\n                I_base      = base_mean + offset_base\n                I           = I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                dv/dt       = n2 * v * v + n1 * v + n0 - u + f(I,nonlin) + I_base\n                du/dt       = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            functions=\"\"\"\n                f(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            name=\"Izhikevich2003_noisy_I_nonlin\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents and a noisy baseline current.\n                With nonlinear function for external current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#izhikevich-2007-like-neurons","title":"Izhikevich (2007)-like Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007","title":"Izhikevich2007","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 Variables to record
  • I_v
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n\n    Variables to record:\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS * mV**-1\n            v_r    = {v_r} : population # mV\n            v_t    = {v_t} : population # mV\n            a      = {a} : population # ms**-1\n            b      = {b} : population # nS\n            c      = {c} : population # mV\n            d      = {d} : population # pA\n            v_peak = {v_peak} : population # mV\n            I_app  = {I_app} # pA\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007\",\n            description=\"Neuron model equations from Izhikevich (2007).\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007RecCur","title":"Izhikevich2007RecCur","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with separate currents to record.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 Variables to record
  • I_v
  • v
  • u
  • I_u
  • I_k
  • I_a
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007RecCur(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with separate currents to record.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n\n    Variables to record:\n        - I_v\n        - v\n        - u\n        - I_u\n        - I_k\n        - I_a\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS * mV**-1\n            v_r    = {v_r} : population # mV\n            v_t    = {v_t} : population # mV\n            a      = {a} : population # ms**-1\n            b      = {b} : population # nS\n            c      = {c} : population # mV\n            d      = {d} : population # pA\n            v_peak = {v_peak} : population # mV\n            I_app  = {I_app} # pA\n        \"\"\"\n\n        affix = \"\"\"\n            I_u = -u\n            I_k = k*(v - v_r)*(v - v_t)\n            I_a = I_app\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(affix=affix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_record_currents\",\n            description=\"\"\"\n                Neuron model equations from Izhikevich (2007) with separate\n                currents to record.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007VoltageClamp","title":"Izhikevich2007VoltageClamp","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with voltage clamp to record I_inf.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 Variables to record
  • I_v
  • v
  • u
  • I_inf
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007VoltageClamp(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with voltage clamp to record I_inf.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n\n    Variables to record:\n        - I_v\n        - v\n        - u\n        - I_inf\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS * mV**-1\n            v_r    = {v_r} : population # mV\n            v_t    = {v_t} : population # mV\n            a      = {a} : population # ms**-1\n            b      = {b} : population # nS\n            c      = {c} : population # mV\n            d      = {d} : population # pA\n            v_peak = {v_peak} : population # mV\n            I_app  = {I_app} # pA\n        \"\"\"\n\n        dv = \"0\"\n        affix = f\"I_inf = {_dv_default}\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(dv=dv, affix=affix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_voltage_clamp\",\n            description=\"\"\"\n                Neuron model equations from Izhikevich (2007) with voltage clamp\n                to record I_inf.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007Syn","title":"Izhikevich2007Syn","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based synapses.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

10.0 tau_gaba float

Time constant of the GABA synapse.

10.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-90.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007Syn(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based synapses.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 10.0,\n        tau_gaba: float = 10.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -90.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS\n            v_r    = {v_r} : population\n            v_t    = {v_t} : population\n            a      = {a} : population\n            b      = {b} : population\n            c      = {c} : population\n            d      = {d} : population\n            v_peak = {v_peak} : population\n            I_app  = {I_app} # pA\n            tau_ampa = {tau_ampa} : population\n            tau_gaba = {tau_gaba} : population\n            E_ampa   = {E_ampa} : population\n            E_gaba   = {E_gaba} : population\n        \"\"\"\n\n        syn = _syn_default\n        i_v = f\"I_app {_I_syn}\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_syn\",\n            description=\"\"\"\n                Neuron model equations from Izhikevich (2007) with conductance-based\n                AMPA and GABA synapses/currents.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyAmpa","title":"Izhikevich2007NoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

10.0 tau_gaba float

Time constant of the GABA synapse.

10.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-90.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007NoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 10.0,\n        tau_gaba: float = 10.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -90.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"I_app {_I_syn}\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyBase","title":"Izhikevich2007NoisyBase","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the baseline current.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

10.0 tau_gaba float

Time constant of the GABA synapse.

10.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-90.0 base_mean float

Mean of the baseline current.

0.0 base_noise float

Standard deviation of the baseline current noise.

0.0 rate_base_noise float

Rate of the noise update (Poisson distributed) in the baseline current.

0.0 Variables to record
  • offset_base
  • I_base
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007NoisyBase(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the baseline current.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current noise.\n        rate_base_noise (float, optional):\n            Rate of the noise update (Poisson distributed) in the baseline current.\n\n    Variables to record:\n        - offset_base\n        - I_base\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 10.0,\n        tau_gaba: float = 10.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -90.0,\n        base_mean: float = 0.0,\n        base_noise: float = 0.0,\n        rate_base_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            base_mean      = {base_mean}\n            base_noise     = {base_noise}\n            rate_base_noise = {rate_base_noise}\n        \"\"\"\n\n        syn = _syn_default\n        i_v = f\"I_app {_I_syn} + I_base\"\n        prefix = _I_base_noise\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, prefix=prefix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_noisy_base\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents and noisy\n                baseline current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007FsiNoisyAmpa","title":"Izhikevich2007FsiNoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model for fast-spiking neurons, with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 v_b float

Instantaneous activation threshold potential for the recovery variable u.

-55.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007FsiNoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    for fast-spiking neurons, with conductance-based AMPA and GABA synapses with noise\n    in the AMPA conductance.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        v_b (float, optional):\n            Instantaneous activation threshold potential for the recovery variable u.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        v_b: float = -55.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            v_b            = {v_b} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"I_app {_I_syn}\"\n        du = \"if v<v_b: -a * u else: a * (b * (v - v_b)**3 - u)\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, du=du),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_FSI_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007CorbitFsiNoisyAmpa","title":"Izhikevich2007CorbitFsiNoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the AMPA conductance. Additional slow currents were added to fit the striatal FSI neuron model from Corbit et al. (2016). The additional currents should allow the neuron to produce late spiking.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 b_n float

Sensitivity of the slow current n to the difference between the slow current s and the recovery variable u.

0.1 a_s float

Time scale of the slow current s.

0.1 a_n float

Time scale of the slow current n.

0.1 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 nonlin float

Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)

0.1 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • s
  • n
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007CorbitFsiNoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.\n    Additional slow currents were added to fit the striatal FSI neuron model from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016). The\n    additional currents should allow the neuron to produce late spiking.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        b_n (float, optional):\n            Sensitivity of the slow current n to the difference between the slow current\n            s and the recovery variable u.\n        a_s (float, optional):\n            Time scale of the slow current s.\n        a_n (float, optional):\n            Time scale of the slow current n.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        nonlin (float, optional):\n            Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - s\n        - n\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        b_n: float = 0.1,\n        a_s: float = 0.1,\n        a_n: float = 0.1,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        nonlin: float = 0.1,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            b_n            = {b_n} : population\n            a_s            = {a_s} : population\n            a_n            = {a_n} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            nonlin         = {nonlin} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"root_func(I_app {_I_syn}, nonlin) - n\"\n        affix = \"\"\"\n            ds/dt     = a_s*(pos(u)**0.1 - s)\n            dn/dt     = a_n*(b_n*(pos(u)**0.1-s) - n)\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, affix=affix),\n            functions=\"\"\"\n                root_func(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_Corbit_FSI_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance. Additional slow currents were added to fit\n                the striatal FSI neuron model from Corbit et al. (2016).\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007CorbitFsiNoisyBase","title":"Izhikevich2007CorbitFsiNoisyBase","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the baseline current. Additional slow currents were added to fit the striatal FSI neuron model from Corbit et al. (2016). The additional currents should allow the neuron to produce late spiking.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 b_n float

Sensitivity of the slow current n to the difference between the slow current s and the recovery variable u.

0.1 a_s float

Time scale of the slow current s.

0.1 a_n float

Time scale of the slow current n.

0.1 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 nonlin float

Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)

0.1 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 base_mean float

Mean of the baseline current.

0.0 base_noise float

Standard deviation of the baseline current noise.

0.0 rate_base_noise float

Rate of the noise update (Poisson distributed) in the baseline current.

0.0 Variables to record
  • offset_base
  • I_base
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • s
  • n
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007CorbitFsiNoisyBase(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the baseline current.\n    Additional slow currents were added to fit the striatal FSI neuron model from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016). The\n    additional currents should allow the neuron to produce late spiking.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        b_n (float, optional):\n            Sensitivity of the slow current n to the difference between the slow current\n            s and the recovery variable u.\n        a_s (float, optional):\n            Time scale of the slow current s.\n        a_n (float, optional):\n            Time scale of the slow current n.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        nonlin (float, optional):\n            Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current noise.\n        rate_base_noise (float, optional):\n            Rate of the noise update (Poisson distributed) in the baseline current.\n\n    Variables to record:\n        - offset_base\n        - I_base\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - s\n        - n\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        b_n: float = 0.1,\n        a_s: float = 0.1,\n        a_n: float = 0.1,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        nonlin: float = 0.1,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        base_mean: float = 0.0,\n        base_noise: float = 0.0,\n        rate_base_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            b_n            = {b_n} : population\n            a_s            = {a_s} : population\n            a_n            = {a_n} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            nonlin         = {nonlin} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            base_mean      = {base_mean}\n            base_noise     = {base_noise}\n            rate_base_noise = {rate_base_noise}\n        \"\"\"\n\n        syn = _syn_default\n        i_v = f\"root_func(I_app {_I_syn}, nonlin) - n + I_base\"\n        prefix = _I_base_noise\n        affix = \"\"\"\n            ds/dt     = a_s*(pos(u)**0.1 - s)\n            dn/dt     = a_n*(b_n*(pos(u)**0.1-s) - n)\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(\n                syn=syn, i_v=i_v, prefix=prefix, affix=affix\n            ),\n            functions=\"\"\"\n                root_func(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_Corbit_FSI_noisy_base\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in the baseline current. Additional slow currents were added to fit\n                the striatal FSI neuron model from Corbit et al. (2016).\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyAmpaOscillating","title":"Izhikevich2007NoisyAmpaOscillating","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the AMPA conductance. An additional oscillating current was added to the model.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 freq float

Frequency of the oscillating current.

0.0 amp float

Amplitude of the oscillating current.

300.0 Variables to record
  • osc
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in src/CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007NoisyAmpaOscillating(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.\n    An additional oscillating current was added to the model.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n        freq (float, optional):\n            Frequency of the oscillating current.\n        amp (float, optional):\n            Amplitude of the oscillating current.\n\n    Variables to record:\n        - osc\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n        freq: float = 0.0,\n        amp: float = 300.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n            freq           = {freq}\n            amp            = {amp}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"I_app {_I_syn} + osc\"\n        prefix = \"osc = amp * sin(t * 2 * pi * (freq  /1000))\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, prefix=prefix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_noisy_AMPA_oscillating\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance. An additional oscillating current was added\n                to the model.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/synapse_models/","title":"Synapse Models","text":""},{"location":"built_in/synapse_models/#CompNeuroPy.synapse_models.synapse_models.FactorSynapse","title":"FactorSynapse","text":"

Bases: Synapse

Synapse which scales the transmitted value by a specified factor. Factor is equivalent to the connection weight if weight==1.

Parameters:

Name Type Description Default max_trans float

Maximum value that can be transmitted. Default: None.

None mod_factor float

Factor by which the weight value is multiplied. Default: 0.

0 Source code in src/CompNeuroPy/synapse_models/synapse_models.py
class FactorSynapse(Synapse):\n    \"\"\"\n    Synapse which scales the transmitted value by a specified factor. Factor is\n    equivalent to the connection weight if weight==1.\n\n    Parameters:\n        max_trans (float, optional):\n            Maximum value that can be transmitted. Default: None.\n        mod_factor (float, optional):\n            Factor by which the weight value is multiplied. Default: 0.\n    \"\"\"\n\n    def __init__(self, max_trans: None | float = None, mod_factor: float = 0):\n        super().__init__(\n            parameters=f\"\"\"\n            {f\"max_trans  = {max_trans}\" if max_trans is not None else \"\"}\n            mod_factor = {mod_factor}\n        \"\"\",\n            equations=\"\",\n            pre_spike=f\"\"\"\n            g_target += w * mod_factor {\": max = max_trans\" if max_trans is not None else \"\"}\n        \"\"\",\n            name=\"factor_synapse\",\n            description=\"\"\"\n            Synapse which scales the transmitted value by a specified factor. Factor is\n            equivalent to the connection weight if weight==1.\n        \"\"\",\n        )\n
"},{"location":"examples/dbs/","title":"DBS Simulator","text":""},{"location":"examples/dbs/#simple-example","title":"Simple example","text":""},{"location":"examples/dbs/#introduction","title":"Introduction","text":"

This example demonstrates how to use the DBSstimulator class to implement DBS in a network. It is shown how to create a DBSstimulator, how to use it and how to update pointers. In this simple example only the depolarization of the stimulated population is demostrated. All other possible DBS mechanisms are demonstrated in the other example dbs_stimulator.py.

"},{"location":"examples/dbs/#code","title":"Code","text":"
from ANNarchy import Population, Izhikevich, compile, simulate\nfrom CompNeuroPy import DBSstimulator\n\nfrom ANNarchy import setup\nfrom CompNeuroPy import CompNeuroMonitors, PlotRecordings\n\nsetup(dt=0.1)\n\n# create populations\npopulation1 = Population(10, neuron=Izhikevich, name=\"my_pop1\")\npopulation2 = Population(10, neuron=Izhikevich, name=\"my_pop2\")\n\n# create DBS stimulator\ndbs = DBSstimulator(\n    stimulated_population=population1,\n    population_proportion=0.5,\n    dbs_depolarization=30,\n    auto_implement=True,\n)\n\n# if you work with names of populations/projections everything will work, but if you\n# want to work with pointers you have to update them after calling the DBSstimulator\npopulation1, population2 = dbs.update_pointers(pointer_list=[population1, population2])\n\n# compile network\ncompile()\n\n# create monitors\nmonitors = CompNeuroMonitors({\"my_pop1\": \"v\", \"my_pop2\": \"v\"})\nmonitors.start()\n\n# run simulation\n# 1000 ms without dbs\nsimulate(1000)\n# 1000 ms with dbs\ndbs.on()\nsimulate(1000)\n# 1000 ms without dbs\ndbs.off()\nsimulate(1000)\n\n# plot recordings\nPlotRecordings(\n    figname=\"dbs_stimulator_simple.png\",\n    recordings=monitors.get_recordings(),\n    recording_times=monitors.get_recording_times(),\n    chunk=0,\n    shape=(2, 1),\n    plan={\n        \"position\": [1, 2],\n        \"compartment\": [\"my_pop1\", \"my_pop2\"],\n        \"variable\": [\"v\", \"v\"],\n        \"format\": [\"matrix\", \"matrix\"],\n    },\n)\n
"},{"location":"examples/dbs/#console-output","title":"Console Output","text":"
$ python dbs_stimulator_simple.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nCompiling ...  OK \nGenerate fig dbs_stimulator_simple.png... Done\n
"},{"location":"examples/dbs/#complex-example","title":"Complex Example","text":""},{"location":"examples/dbs/#introduction_1","title":"Introduction","text":"

In this example, the DBS stimulator is tested with a simple spiking and rate-coded model. The spiking model is based on the Izhikevich model with conductance-based synapses. The rate-coded model is based on neurons including membrane potential and a resulting firing rate. The DBS stimulator is tested with different stimulation parameters. The resulting activity of the populations is compared to the expected activity (not part of example, included for testing purposes only). The resulting activity of the populations is plotted. The figures are saved in the DBS_spiking_figure and DBS_rate_figure folders. The different DBS conditions are: - no stimulation - orthodromic stimulation of efferents - orthodromic stimulation of afferents - orthodromic stimulation of efferents and afferents - orthodromic stimulation of passing fibres - depolarization of the stimulated population - antidromic stimulation of efferents - antidromic stimulation of afferents - antidromic stimulation of efferents and afferents - antidromic stimulation of passing fibres - antidromic stimulation of passing fibres with lower strength - full dbs stimulation - full dbs stimulation without axon spikes (only effective for spiking model) - full dbs stimulation without axon_rate_amp (only effective for rate-coded model)

Warning

For rate-coded models, antidromic stimulation of projections is not available.

"},{"location":"examples/dbs/#code_1","title":"Code","text":"
from ANNarchy import (\n    Neuron,\n    Population,\n    setup,\n    simulate,\n    Projection,\n    get_population,\n    get_projection,\n    DefaultRateCodedSynapse,\n    DefaultSpikingSynapse,\n    dt,\n    Constant,\n)\nfrom CompNeuroPy import (\n    CompNeuroMonitors,\n    PlotRecordings,\n    CompNeuroModel,\n    cnp_clear,\n    DBSstimulator,\n)\nfrom CompNeuroPy.monitors import RecordingTimes\nimport numpy as np\n\n### setup ANNarchy\nsetup(dt=0.1, seed=12345)\n\n\n### create dbs test model\nclass dbs_test_model_class:\n    \"\"\"\n    Class to create dbs test model.\n\n    The used neuron models have the following constraints:\n        The neuron model has to contain the following parameters:\n        - base_mean: mean of the base current\n        - base_noise: standard deviation of the base current noise\n        Spiking neuron models have to contain conductance based synapses using the\n        following conductance variables:\n        - g_ampa: excitatory synapse\n        - g_gaba: inhibitory synapse\n        Rate neuron models have to contain the following input variables:\n        - sum(ampa): excitatory input\n        - sum(gaba): inhibitory input\n        For DBS rate-coded models have to contain a membrane potential variable mp\n        and spiking models have to be Izhihkevich models.\n\n    Model structure:\n    -------------------------\n            POP1       POP2\n            |          |\n            o          v\n    DBS--->POP3------oPOP4\n                .----.\n                |    |\n            POP5   '-->POP6\n\n    -o = inhibitory synapse\n    -> = excitatory synapse\n    .-> = passing fibre excitatory synapse\n\n    Attributes:\n        model (CompNeuroModel):\n            dbs test model\n    \"\"\"\n\n    def __init__(self, mode) -> None:\n        \"\"\"\n        Initialize dbs test model\n\n        Args:\n            mode (str):\n                Mode of the dbs test model, either \"spiking\" or \"rate-coded\"\n        \"\"\"\n        ### constants should still be available after DBSstimulator recreates the model\n        ### test this by creating this constant\n        Constant(\"my_important_const\", 0.0)\n\n        ### check if model to create is spiking or rate-coded\n        if mode == \"spiking\":\n            self.model = CompNeuroModel(\n                model_creation_function=self.create_model,\n                model_kwargs={\n                    \"neuron_model\": self.get_neuron_model_spiking(),\n                    \"base_current_list\": [40, 100, 200, 50, 40, 40],\n                    \"base_current_noise\": 40,\n                },\n                name=\"dbs_test_spiking\",\n                description=\"Simple spiking model to test dbs\",\n                do_compile=False,\n            )\n        elif mode == \"rate-coded\":\n            self.model = CompNeuroModel(\n                model_creation_function=self.create_model,\n                model_kwargs={\n                    \"neuron_model\": self.get_neuron_model_rate_coded(),\n                    \"base_current_list\": [0.35, 0.7, 1.1, 0.85, 0.35, 0.35],\n                    \"base_current_noise\": 0.01,\n                    \"weight_list\": [0.3, 0.4, 0.3, 0.1],\n                    \"prob_list\": [0.5, 0.7, 0.7, 0.5],\n                },\n                name=\"dbs_test_rate-coded\",\n                description=\"Simple rate-coded model to test dbs\",\n                do_compile=False,\n            )\n        else:\n            raise ValueError(\"Neuron model not recognized\")\n\n    def create_model(\n        self,\n        neuron_model: Neuron,\n        pop_size: int = 10,\n        base_current_list: list = [0, 0, 0, 0, 0, 0],\n        base_current_noise: float = 0.0,\n        prob_list: list = [0.5, 0.5, 0.5, 0.5],\n        weight_list: list = [1.0, 1.0, 1.0, 1.0],\n    ):\n        \"\"\"\n        Create dbs test model\n\n        Args:\n            neuron_model (Neuron):\n                Neuron model to use for the dbs test model\n            pop_size (int, optional):\n                Number of neurons in each population. Default: 10\n            base_current_list (list, optional):\n                List of base currents for the four populations.\n                Default: [0, 0, 0, 0, 0, 0]\n            base_current_noise (float, optional):\n                Standard deviation of the base current noise. Default: 0\n            prob_list (list, optional):\n                List of connection probabilities for the inhibitory and excitatory path.\n                Default: [0.5, 0.5, 0.5, 0.5]\n            weight_list (list, optional):\n                List of connection weights for the inhibitory and excitatory path.\n                Default: [0.1, 0.1, 0.1, 0.1]\n        \"\"\"\n        ### create populations\n        pop1 = Population(pop_size, neuron_model, name=f\"pop1_{neuron_model.name}\")\n        pop2 = Population(pop_size, neuron_model, name=f\"pop2_{neuron_model.name}\")\n        pop3 = Population(pop_size, neuron_model, name=f\"pop3_{neuron_model.name}\")\n        pop4 = Population(pop_size, neuron_model, name=f\"pop4_{neuron_model.name}\")\n        pop5 = Population(pop_size, neuron_model, name=f\"pop5_{neuron_model.name}\")\n        pop6 = Population(pop_size, neuron_model, name=f\"pop6_{neuron_model.name}\")\n\n        ### create projections of inhhibitory path\n        proj_1_3 = Projection(\n            pre=pop1,\n            post=pop3,\n            target=\"gaba\",\n            name=f\"proj_1_3_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_1_3.connect_fixed_probability(\n            probability=prob_list[0],\n            weights=weight_list[0],\n        )\n        proj_3_4 = Projection(\n            pre=pop3,\n            post=pop4,\n            target=\"gaba\",\n            name=f\"proj_3_4_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_3_4.connect_fixed_probability(\n            probability=prob_list[1],\n            weights=weight_list[1],\n        )\n        ### create projections of excitatory path\n        proj_2_4 = Projection(\n            pre=pop2,\n            post=pop4,\n            target=\"ampa\",\n            name=f\"proj_2_4_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_2_4.connect_fixed_probability(\n            probability=prob_list[2],\n            weights=weight_list[2],\n        )\n        ### create projection of passing fibres\n        proj_5_6 = Projection(\n            pre=pop5,\n            post=pop6,\n            target=\"ampa\",\n            name=f\"proj_5_6_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_5_6.connect_fixed_probability(\n            probability=prob_list[3],\n            weights=weight_list[3],\n        )\n\n        ### set baseline activity parameters\n        pop1.base_mean = base_current_list[0]\n        pop2.base_mean = base_current_list[1]\n        pop3.base_mean = base_current_list[2]\n        pop4.base_mean = base_current_list[3]\n        pop5.base_mean = base_current_list[4]\n        pop6.base_mean = base_current_list[5]\n        pop1.base_noise = base_current_noise\n        pop2.base_noise = base_current_noise\n        pop3.base_noise = base_current_noise\n        pop4.base_noise = base_current_noise\n        pop5.base_noise = base_current_noise\n        pop6.base_noise = base_current_noise\n\n    def get_neuron_model_spiking(self):\n        \"\"\"\n        Get neuron model with spiking dynamics\n\n        Returns\n            neuron_model (Neuron):\n                Neuron model with spiking dynamics\n        \"\"\"\n        neuron_model = Neuron(\n            parameters=\"\"\"\n                C      = 100     : population # pF\n                k      = 0.7     : population # pS * mV**-1\n                v_r    = -60     : population # mV\n                v_t    = -40     : population # mV\n                a      = 0.03     : population # ms**-1\n                b      = -2     : population # nS\n                c      = -50     : population # mV\n                d      = 100     : population # pA\n                v_peak = 35     : population # mV\n                I_app  = 0     # pA\n                tau_ampa = 10  : population # ms\n                tau_gaba = 10  : population # ms\n                E_ampa   = 0   : population # mV\n                E_gaba   = -90 : population # mV\n                base_mean       = 0 # pA\n                base_noise      = 0 # pA\n                rate_base_noise = 100 # Hz\n            \"\"\",\n            equations=\"\"\"\n                ### noisy base input\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0., 1.) * base_noise)\n                I_base      = base_mean + offset_base + my_important_const\n                ### input conductances\n                dg_ampa/dt = -g_ampa/tau_ampa\n                dg_gaba/dt = -g_gaba/tau_gaba\n                ### input currents\n                I = I_app - g_ampa*neg(v - E_ampa) - g_gaba*pos(v - E_gaba) + I_base\n                ### membrane potential and recovery variable\n                C * dv/dt  = k*(v - v_r)*(v - v_t) - u + I\n                du/dt      = a*(b*(v - v_r) - u)\n            \"\"\",\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"spiking\",\n            description=\"\"\"\n                Simple neuron model equations from Izhikevich (2007) using regular-spiking parameters\n                with conductance-based AMPA and GABA synapses/currents.\n            \"\"\",\n        )\n        return neuron_model\n\n    def get_neuron_model_rate_coded(self):\n        \"\"\"\n        Get neuron model with rate-coded dynamics\n\n        Returns:\n            neuron_model (Neuron):\n                Neuron model with rate-coded dynamics\n        \"\"\"\n        neuron_model = Neuron(\n            parameters=\"\"\"\n                tau = 10.0 : population\n                sigma = 0.6 : population\n                I_0 = 0.2 : population\n                I_app = 0.\n                base_mean       = 0\n                base_noise      = 0\n                rate_base_noise = 100 # Hz\n                # = (sigma*I_0 + I_0)/(sigma - sigma*I_0) : population\n                c = (0.6*0.2 + 0.2)/(0.6 - 0.6*0.2) : population\n            \"\"\",\n            equations=\"\"\"\n                ### noisy base input\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0., 1.) * base_noise)\n                I_base      = base_mean + offset_base + my_important_const\n                ### input currents\n                I = sum(ampa) - sum(gaba) + I_base + I_app\n                ### membrane potential\n                tau * dmp/dt = -mp + I\n                mp_r = mp: min=-0.99*sigma\n                ### activation function\n                r = activation(mp_r,sigma,c) : max=1., min=0.\n            \"\"\",\n            name=\"rate-coded\",\n            functions=\"\"\"\n                activation(x,sigma,c) = ((sigma*x + x)/(sigma + x)) * (1 + c) - c\n            \"\"\",\n            description=\"Rate-coded neuron with excitatory (ampa) and inhibitory (gaba) inputs plus baseline and noise.\",\n        )\n        return neuron_model\n\n    def get_synapse(self, mode):\n        \"\"\"\n        Create a synapse.\n\n        Args:\n            mode (str):\n                Mode of the dbs test model, either \"spiking\" or \"rate-coded\"\n\n        Returns:\n            synapse (DefaultRateCodedSynapse or DefaultSpikingSynapse):\n                Synapse object\n        \"\"\"\n        if mode == \"rate-coded\":\n            return DefaultRateCodedSynapse()\n        elif mode == \"spiking\":\n            return DefaultSpikingSynapse()\n        else:\n            raise ValueError(\"Neuron model not recognized\")\n\n\ndef do_simulation(\n    mon: CompNeuroMonitors,\n    dbs: DBSstimulator,\n    dbs_val_list: list[list],\n    dbs_key_list: list[str],\n):\n    \"\"\"\n    Do the simulation\n\n    Args:\n        mon (CompNeuroMonitors):\n            CompNeuroMonitors object\n        dbs (DBSstimulator):\n            DBS stimulator object\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        dbs_key_list (list[str]):\n            List of DBS stimulation keys used by the dbs.on() function\n\n    Returns:\n        recordings (list):\n            List of recordings from the monitors\n        recording_times (RecordingTimes):\n            Recording times object\n    \"\"\"\n    ### run initial ramp up simulation\n    simulate(2000.0)\n\n    ### start monitors\n    mon.start()\n\n    ### loop over trials\n    for trial in range(len(dbs_val_list)):\n        ### 1000 ms with DBS off\n        simulate(1000.0)\n        ### 500 ms with DBS on\n        dbs.on(\n            **{\n                dbs_key_list[i]: dbs_val_list[trial][i]\n                for i in range(len(dbs_key_list))\n            }\n        )\n        simulate(500.0)\n        ### 1000 ms with DBS off\n        dbs.off()\n        simulate(1000.0)\n        mon.reset(model=False)\n\n    ### get data from monitors\n    recordings = mon.get_recordings()\n    recording_times = mon.get_recording_times()\n\n    return recordings, recording_times\n\n\ndef check_dbs_effects_spiking(\n    dbs_val_list: list[list],\n    recordings: list,\n    model: CompNeuroModel,\n    recording_times: RecordingTimes,\n):\n    \"\"\"\n    Check if the dbs effects are as expecteds.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        model (CompNeuroModel):\n            Model used for the simulation\n        recording_times (RecordingTimes):\n            Recording times object\n    \"\"\"\n    ### effects_on_activity_list contains the expected effects of dbs on the activity of the populations for each trial\n    ### 0 means no effect, 1 means increase, -1 means decrease\n    effects_on_activity = [\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, -1, 0, 0],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, -1, -1, 0, 0],\n        [0, 0, 0, 0, 0, 1],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, -1, 1, 0, 0],\n        [-1, 0, 0, 0, 0, 0],\n        [-1, 0, -1, 1, 0, 0],\n        [0, 0, 0, 0, -1, 0],\n        [0, 0, 0, 0, 0, 0],\n        [-1, 0, -1, -1, -1, 1],\n        [0, 0, -1, 1, 0, 0],\n        [-1, 0, -1, -1, -1, 1],\n    ]\n    ### check if the expected effects are present in the data\n    effect_list = []\n    high_effect_list = []\n    low_effect_list = []\n    for trial_idx, trial in enumerate(range(len(dbs_val_list))):\n        effect_list.append([])\n        for pop_name_idx, pop_name in enumerate(model.populations):\n            v_arr = recordings[trial][f\"{pop_name};v\"]\n            ### mean over neurons\n            v_arr = np.mean(v_arr, axis=1)\n            ### mean of first period\n            v_mean_1 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            v_std_1 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            ### mean of second period\n            v_mean_2 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt()))\n                ]\n            )\n            ### mean of third period\n            v_mean_3 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            v_std_3 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            ### get meand depending on dbs\n            mean_on = v_mean_2\n            mean_off = (v_mean_1 + v_mean_3) / 2\n            std_off = (v_std_1 + v_std_3) / 2\n            ### calculate effect\n            effect = (mean_on - mean_off) / std_off\n            if effect > 1:\n                high_effect_list.append(abs(effect))\n                effect = 1\n            elif effect < -1:\n                high_effect_list.append(abs(effect))\n                effect = -1\n            else:\n                low_effect_list.append(abs(effect))\n                effect = 0\n\n            effect_list[trial_idx].append(effect)\n\n    assert (\n        np.array(effects_on_activity).astype(int) == np.array(effect_list).astype(int)\n    ).all(), \"Effects on activity not as expected for spiking model\"\n\n\ndef check_dbs_effects_rate_coded(\n    dbs_val_list: list[list],\n    recordings: list,\n    model: CompNeuroModel,\n    recording_times: RecordingTimes,\n):\n    \"\"\"\n    Check if the dbs effects are as expected.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        model (CompNeuroModel):\n            Model used for the simulation\n        recording_times (RecordingTimes):\n            Recording times object\n    \"\"\"\n    ### effects_on_activity_list contains the expected effects of dbs on the activity of the populations for each trial\n    ### 0 means no effect, 1 means increase, -1 means decrease\n    effects_on_activity = [\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, -1, 0, 0],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, -1, -1, 0, 0],\n        [0, 0, 0, 0, 0, 1],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, -1, -1, 0, 1],\n        [0, 0, -1, -1, 0, 1],\n        [0, 0, -1, 1, 0, 0],\n    ]\n    ### check if the expected effects are present in the data\n    effect_list = []\n    high_effect_list = []\n    low_effect_list = []\n    for trial_idx, trial in enumerate(range(len(dbs_val_list))):\n        effect_list.append([])\n        for pop_name_idx, pop_name in enumerate(model.populations):\n            v_arr = recordings[trial][f\"{pop_name};r\"]\n            ### mean over neurons\n            v_arr = np.mean(v_arr, axis=1)\n            ### mean of first period\n            v_mean_1 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            v_std_1 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            ### mean of second period\n            v_mean_2 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt()))\n                ]\n            )\n            ### mean of third period\n            v_mean_3 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            v_std_3 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            ### get meand depending on dbs\n            mean_on = v_mean_2\n            mean_off = (v_mean_1 + v_mean_3) / 2\n            std_off = (v_std_1 + v_std_3) / 2\n            ### calculate effect\n            effect = (mean_on - mean_off) / std_off\n            if effect > 2.5:\n                high_effect_list.append(abs(effect))\n                effect = 1\n            elif effect < -2.5:\n                high_effect_list.append(abs(effect))\n                effect = -1\n            else:\n                low_effect_list.append(abs(effect))\n                effect = 0\n\n            effect_list[trial_idx].append(effect)\n    assert (\n        np.array(effects_on_activity).astype(int) == np.array(effect_list).astype(int)\n    ).all(), \"Effects on activity not as expected for rate-coded model\"\n\n\ndef plot_spiking(\n    dbs_val_list: list[list],\n    recordings: list,\n    recording_times: RecordingTimes,\n    model: CompNeuroModel,\n    plotting: bool,\n):\n    \"\"\"\n    Plot spiking data.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        recording_times (RecordingTimes):\n            Recording times object\n        model (CompNeuroModel):\n            Model used for the simulation\n        plotting (bool):\n            If True, plots are created\n    \"\"\"\n    if not plotting:\n        return\n\n    ### plot data\n    for trial in range(len(dbs_val_list)):\n        PlotRecordings(\n            figname=f\"DBS_spiking_figure/membrane_trial_{trial}.png\",\n            recordings=recordings,\n            recording_times=recording_times,\n            chunk=trial,\n            shape=(3, 2),\n            plan={\n                \"position\": np.arange(len(model.populations), dtype=int) + 1,\n                \"compartment\": model.populations,\n                \"variable\": [\"v\"] * len(model.populations),\n                \"format\": [\"matrix\"] * len(model.populations),\n            },\n            time_lim=(\n                recording_times.time_lims(chunk=trial)[0] + 500,\n                recording_times.time_lims(chunk=trial)[1] - 500,\n            ),\n        )\n        PlotRecordings(\n            figname=f\"DBS_spiking_figure/axon_spikes_{trial}.png\",\n            recordings=recordings,\n            recording_times=recording_times,\n            chunk=trial,\n            shape=(3, 2),\n            plan={\n                \"position\": np.arange(len(model.populations), dtype=int) + 1,\n                \"compartment\": model.populations,\n                \"variable\": [\"axon_spike\"] * len(model.populations),\n                \"format\": [\"raster\"] * len(model.populations),\n            },\n            time_lim=(\n                recording_times.time_lims(chunk=trial)[0] + 1000,\n                recording_times.time_lims(chunk=trial)[0] + 1030,\n            ),\n        )\n\n\ndef plot_rate_coded(\n    dbs_val_list: list[list],\n    recordings: list,\n    recording_times: RecordingTimes,\n    model: CompNeuroModel,\n    plotting: bool,\n):\n    \"\"\"\n    Plot rate-coded data.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        recording_times (RecordingTimes):\n            Recording times object\n        model (CompNeuroModel):\n            Model used for the simulation\n        plotting (bool):\n            If True, plots are created\n    \"\"\"\n    if not plotting:\n        return\n\n    ### plot data\n    for trial in range(len(dbs_val_list)):\n        PlotRecordings(\n            figname=f\"DBS_rate_figure/activity_trial_{trial}.png\",\n            recordings=recordings,\n            recording_times=recording_times,\n            chunk=trial,\n            shape=(3, 2),\n            plan={\n                \"position\": np.arange(len(model.populations), dtype=int) + 1,\n                \"compartment\": model.populations,\n                \"variable\": [\"r\"] * len(model.populations),\n                \"format\": [\"matrix\"] * len(model.populations),\n            },\n            time_lim=(\n                recording_times.time_lims(chunk=trial)[0] + 500,\n                recording_times.time_lims(chunk=trial)[1] - 500,\n            ),\n        )\n\n\ndef main(plotting: bool = False):\n    \"\"\"\n    Main function\n\n    Args:\n        plotting (bool, optional):\n            If True, plots are created. Default: False\n    \"\"\"\n    ### define simulations\n    ### i.e. the parameters for the dbs stimulator on function\n    ### do simulate calls repeatedly dbs.on() and dbs.off() with different parameters\n    ### specified in dbs_val_list\n    dbs_key_list = [\n        \"population_proportion\",\n        \"dbs_depolarization\",\n        \"orthodromic\",\n        \"antidromic\",\n        \"efferents\",\n        \"afferents\",\n        \"passing_fibres\",\n        \"passing_fibres_strength\",\n        \"axon_spikes_per_pulse\",\n        \"axon_rate_amp\",\n    ]\n    dbs_val_list = [\n        # 0 - nothing\n        [None, 0, False, False, False, False, False, 0.2, 1, 1],\n        # 1 - orthodromic efferents\n        [None, 0, True, False, True, False, False, 0.2, 1, 1],\n        # 2 - orthodromic afferents\n        [None, 0, True, False, False, True, False, 0.2, 1, 1],\n        # 3 - orthodromic efferents and afferents\n        [None, 0, True, False, True, True, False, 0.2, 1, 1],\n        # 4 - orthodromic passing fibres\n        [None, 0, True, False, False, False, True, 0.2, 1, 1],\n        # 5 - depolarization\n        [None, 100, False, False, False, False, False, 0.2, 1, 1],\n        # 6 - antidromic efferents\n        [None, 0, False, True, True, False, False, 0.2, 1, 1],\n        # 7 - antidromic afferents\n        [None, 0, False, True, False, True, False, 0.2, 1, 1],\n        # 8 - antidromic efferents and afferents\n        [None, 0, False, True, True, True, False, 0.2, 1, 1],\n        # 9 - antidromic passing fibres\n        [None, 0, False, True, False, False, True, 0.2, 1, 1],\n        # 10 - antidromic passing fibres lower strength\n        [None, 0, False, True, False, False, True, 0.01, 1, 1],\n        # 11 - all\n        [None, 100, True, True, True, True, True, 0.2, 1, 1],\n        # 12 - all without axon spikes, should not affect rate-coded model\n        [None, 100, True, True, True, True, True, 0.2, 0, 1],\n        # 13 - all without axon_rate_amp, should not affect spiking model\n        [None, 100, True, True, True, True, True, 0.2, 1, 0],\n    ]\n\n    spiking_model = True\n    rate_coded_model = True\n\n    if spiking_model:\n        ### create the spiking network\n        model = dbs_test_model_class(\"spiking\").model\n        dbs = DBSstimulator(\n            stimulated_population=get_population(\"pop3_spiking\"),\n            passing_fibres_list=[get_projection(\"proj_5_6_spiking\")],\n            passing_fibres_strength=0.2,\n            auto_implement=True,\n            model=model,\n        )\n        model = dbs.model\n\n        ### compile model\n        model.compile(compile_folder_name=\"DBS_test_spiking\")\n\n        ### create monitors\n        mon_dict = {}\n        for pop_name in model.populations:\n            mon_dict[pop_name] = [\"v\", \"spike\", \"axon_spike\"]\n        mon = CompNeuroMonitors(mon_dict)\n\n        ### run simulation and get data from monitors\n        recordings, recording_times = do_simulation(\n            mon, dbs, dbs_val_list, dbs_key_list\n        )\n\n        ### plot data\n        plot_spiking(\n            dbs_val_list=dbs_val_list,\n            recordings=recordings,\n            recording_times=recording_times,\n            model=model,\n            plotting=plotting,\n        )\n\n        ### check dbs effects\n        check_dbs_effects_spiking(\n            dbs_val_list,\n            recordings,\n            model,\n            recording_times,\n        )\n\n    if rate_coded_model:\n        ### create the rate-coded network\n        cnp_clear()\n        model = dbs_test_model_class(\"rate-coded\").model\n        dbs = DBSstimulator(\n            stimulated_population=get_population(\"pop3_rate-coded\"),\n            passing_fibres_list=[get_projection(\"proj_5_6_rate-coded\")],\n            passing_fibres_strength=0.2,\n            model=model,\n            auto_implement=True,\n        )\n        model = dbs.model\n\n        ### compile model\n        model.compile(compile_folder_name=\"DBS_test_rate_coded\")\n\n        ### create monitors\n        mon_dict = {}\n        for pop_name in model.populations:\n            mon_dict[pop_name] = [\"r\"]\n        mon = CompNeuroMonitors(mon_dict)\n\n        ### run simulation and get data from monitors\n        recordings, recording_times = do_simulation(\n            mon, dbs, dbs_val_list, dbs_key_list\n        )\n\n        ### plot data\n        plot_rate_coded(\n            dbs_val_list=dbs_val_list,\n            recordings=recordings,\n            recording_times=recording_times,\n            model=model,\n            plotting=plotting,\n        )\n\n        ### check dbs effects\n        check_dbs_effects_rate_coded(\n            dbs_val_list,\n            recordings,\n            model,\n            recording_times,\n        )\n    return 1\n\n\nif __name__ == \"__main__\":\n    main(plotting=True)\n
"},{"location":"examples/dbs/#console-output_1","title":"Console Output","text":"
$ python dbs_stimulator.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\n\nWARNING during compile of model dbs_test_spiking_dbs: There are initialized models which are not created, thus not compiled! models:\ndbs_test_spiking\n\nCompiling ...  OK \nGenerate fig DBS_spiking_figure/membrane_trial_0.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_0.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_1.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_1.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_2.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_2.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_3.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_3.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_4.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_4.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_5.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_5.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_6.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_6.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_7.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_7.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_8.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_8.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_9.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_9.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_10.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_10.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_11.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_11.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_12.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_12.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_13.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_13.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\n\nWARNING during compile of model dbs_test_rate-coded_dbs: There are initialized models which are not created, thus not compiled! models:\ndbs_test_spiking\ndbs_test_spiking_dbs\ndbs_test_rate-coded\n\nCompiling ...  OK \nGenerate fig DBS_rate_figure/activity_trial_0.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_1.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_2.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_3.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_4.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_5.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_6.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_7.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_8.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_9.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_10.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_11.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_12.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_13.png... Done\n
"},{"location":"examples/experiment/","title":"Define Experiments","text":""},{"location":"examples/experiment/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroExp class to combine simulations, model and recordings in an experiment. It is shown how to define an experiment, how to run it and how to get the results.

"},{"location":"examples/experiment/#code","title":"Code","text":"
from CompNeuroPy import (\n    CompNeuroExp,\n    CompNeuroSim,\n    CompNeuroMonitors,\n    CompNeuroModel,\n    current_step,\n    current_ramp,\n    PlotRecordings,\n)\nfrom CompNeuroPy.full_models import HHmodelBischop\nfrom ANNarchy import dt, setup, get_population\n\n\n### combine both simulations and recordings in an experiment\nclass MyExp(CompNeuroExp):\n    \"\"\"\n    Define an experiment by inheriting from CompNeuroExp.\n\n    CompNeuroExp provides the attributes:\n\n        monitors (CompNeuroMonitors):\n            a CompNeuroMonitors object to do recordings, define during init otherwise\n            None\n        data (dict):\n            a dictionary for storing any optional data\n\n    and the functions:\n        reset():\n            resets the model and monitors\n        results():\n            returns a results object\n    \"\"\"\n\n    def __init__(\n        self,\n        model: CompNeuroModel,\n        sim_step: CompNeuroSim,\n        sim_ramp: CompNeuroSim,\n        monitors: CompNeuroMonitors,\n    ):\n        \"\"\"\n        Initialize the experiment and additionally store the model and simulations.\n\n        Args:\n            model (CompNeuroModel):\n                a CompNeuroModel object\n            sim_step (CompNeuroSim):\n                a CompNeuroSim object for the step simulation\n            sim_ramp (CompNeuroSim):\n                a CompNeuroSim object for the ramp simulation\n            monitors (CompNeuroMonitors):\n                a CompNeuroMonitors object\n        \"\"\"\n        self.model = model\n        self.sim_step = sim_step\n        self.sim_ramp = sim_ramp\n        super().__init__(monitors)\n\n    def run(self, E_L: float = -68.0):\n        \"\"\"\n        Do the simulations and recordings.\n\n        To use the CompNeuroExp class, you need to define a run function which\n        does the simulations and recordings. The run function should return the\n        results object which can be obtained by calling self.results().\n\n        Args:\n            E_L (float, optional):\n                leak reversal potential of the population, which is set at the beginning\n                of the experiment run. Default: -68 mV\n\n        Returns:\n            results (CompNeuroExp._ResultsCl):\n                results object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        ### call reset at the beginning of the experiment to ensure that the model\n        ### is in the same state at the beginning of each experiment run\n        self.reset()\n\n        ### also always start the monitors, they are stopped automatically at the end\n        self.monitors.start()\n\n        ### set the leak reversal potential of the population, be aware that this\n        ### will be undone by the reset function if you don't set the parameters\n        ### argument to False\n        get_population(self.model.populations[0]).E_L = E_L\n\n        ### SIMULATION START\n        sim_step.run()\n        ### if you want to reset the model, you should use the objects reset()\n        ### it's the same as the ANNarchy reset + it resets the CompNeuroMonitors\n        ### creating a new chunk, optionally not changing the parameters\n        self.reset(parameters=False)\n        sim_ramp.run()\n        ### SIMULATION END\n\n        ### optional: store anything you want in the data dict, for example information\n        ### about the simulations\n        self.data[\"sim\"] = [sim_step.simulation_info(), sim_ramp.simulation_info()]\n        self.data[\"population_name\"] = self.model.populations[0]\n        self.data[\"time_step\"] = dt()\n\n        ### return results using self.results()\n        return self.results()\n\n\nif __name__ == \"__main__\":\n    ### create and compile a model\n    setup(dt=0.01)\n    model = HHmodelBischop()\n\n    ### define recordings before experiment\n    monitors = CompNeuroMonitors({model.populations[0]: [\"v\"]})\n\n    ### define some simulations e.g. using CompNeuroSim\n    sim_step = CompNeuroSim(\n        simulation_function=current_step,\n        simulation_kwargs={\n            \"pop\": model.populations[0],\n            \"t1\": 500,\n            \"t2\": 500,\n            \"a1\": 0,\n            \"a2\": 50,\n        },\n    )\n    sim_ramp = CompNeuroSim(\n        simulation_function=current_ramp,\n        simulation_kwargs={\n            \"pop\": model.populations[0],\n            \"a0\": 0,\n            \"a1\": 100,\n            \"dur\": 1000,\n            \"n\": 50,\n        },\n    )\n\n    ### init and run the experiment\n    my_exp = MyExp(monitors=monitors, model=model, sim_step=sim_step, sim_ramp=sim_ramp)\n\n    ### one use case is to run an experiment multiple times e.g. with different\n    ### parameters\n    results_run1 = my_exp.run()\n    results_run2 = my_exp.run(E_L=-90.0)\n\n    ### plot of the membrane potential from the first and second chunk using results\n    ### experiment run 1\n    PlotRecordings(\n        figname=\"example_experiment_sim_step.png\",\n        recordings=results_run1.recordings,\n        recording_times=results_run1.recording_times,\n        chunk=0,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run1.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n    PlotRecordings(\n        figname=\"example_experiment_sim_ramp.png\",\n        recordings=results_run1.recordings,\n        recording_times=results_run1.recording_times,\n        chunk=1,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run1.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n    ### experiment run 2\n    PlotRecordings(\n        figname=\"example_experiment2_sim_step.png\",\n        recordings=results_run2.recordings,\n        recording_times=results_run2.recording_times,\n        chunk=0,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run2.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n    PlotRecordings(\n        figname=\"example_experiment2_sim_ramp.png\",\n        recordings=results_run2.recordings,\n        recording_times=results_run2.recording_times,\n        chunk=1,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run2.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n\n    ### print data and mon_dict from results\n    print(\"\\nrun1:\")\n    print(\"    data:\")\n    for key, value in results_run1.data.items():\n        print(f\"        {key}:\", value)\n    print(\"    mon_dict:\")\n    for key, value in results_run1.mon_dict.items():\n        print(f\"        {key}:\", value)\n    print(\"\\nrun2:\")\n    print(\"    data:\")\n    for key, value in results_run2.data.items():\n        print(f\"        {key}:\", value)\n    print(\"    mon_dict:\")\n    for key, value in results_run2.mon_dict.items():\n        print(f\"        {key}:\", value)\n
"},{"location":"examples/experiment/#console-output","title":"Console Output","text":"
$ python experiment.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nCompiling ...  OK \nGenerate fig example_experiment_sim_step.png... Done\n\nGenerate fig example_experiment_sim_ramp.png... Done\n\nGenerate fig example_experiment2_sim_step.png... Done\n\nGenerate fig example_experiment2_sim_ramp.png... Done\n\n\nrun1:\n    data:\n        sim: [<CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfb700>, <CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfad40>]\n        population_name: HH_Bischop\n        time_step: 0.01\n    mon_dict:\n        HH_Bischop: ['v']\n\nrun2:\n    data:\n        sim: [<CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfb700>, <CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfad40>]\n        population_name: HH_Bischop\n        time_step: 0.01\n    mon_dict:\n        HH_Bischop: ['v']\n
"},{"location":"examples/generate_models/","title":"Generate Models","text":""},{"location":"examples/generate_models/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroModel class to create and compile models. It is shown how to define a model creation function, how to initialize, create, compile a model and how to get information about the model.

The model \"my_model\" is imported in other examples run_and_monitor_simulations.py.

"},{"location":"examples/generate_models/#code","title":"Code","text":"
from ANNarchy import Population\nfrom CompNeuroPy import CompNeuroModel\nfrom CompNeuroPy.neuron_models import PoissonNeuron\nfrom tabulate import tabulate\n\n\n### define model_creation_function\ndef two_poisson(params, a):\n    \"\"\"\n    Generates two Poisson neuron populations.\n\n    Args:\n        params (dict):\n            Dictionary containing some paramters for the model with following keys:\n                's1'/'s2' : sizes of pop1/pop2\n                'n1'/'n2' : names of pop1/pop2\n        a (int):\n            Unused parameter for demonstration purposes only.\n    \"\"\"\n    ### create two populations\n    Population(params[\"s1\"], neuron=PoissonNeuron, name=params[\"n1\"])\n    Population(params[\"s2\"], neuron=PoissonNeuron, name=params[\"n2\"])\n    ### print unused parameter\n    print(f\"created model, other parameters: {a}\")\n\n\n### Let's initialize a first model\n### define the parameters argument of the model creation function\nparams = {\"s1\": 3, \"s2\": 3, \"n1\": \"first_poisson\", \"n2\": \"second_poisson\"}\n\n### use CompNeuroModel to initialize the model, not create or compile it yet\nmy_model = CompNeuroModel(\n    model_creation_function=two_poisson,\n    model_kwargs={\n        \"params\": params,\n        \"a\": 1,\n    },\n    name=\"my_model\",\n    description=\"my simple Poisson neuron model\",\n    do_create=False,\n    do_compile=False,\n    compile_folder_name=\"annarchy_my_model\",\n)\n\n### this initialized the first model\n### we could now create and compile it, but we will do this inside main\n### it could also be imported in other scripts and then created/compiled there\n\n\ndef main():\n    ### initialize a second model\n    ### this time directly create it, but not compile it yet, models can only be created\n    ### if not compiled yet\n    params = {\"s1\": 1, \"s2\": 1, \"n1\": \"pop1\", \"n2\": \"pop2\"}\n    my_model2 = CompNeuroModel(\n        model_creation_function=two_poisson,\n        model_kwargs={\"params\": params, \"a\": 2},\n        do_compile=False,\n    )\n\n    ### now create also first model, and compile everything (automatically since we did\n    ### not set do_compile=False)\n    my_model.create()\n\n    ### print some name, description, populations and projections of the models in\n    ### tabular form\n    models_data = [\n        [\n            my_model.name,\n            my_model.description,\n            my_model.populations,\n            my_model.projections,\n        ],\n        [\n            my_model2.name,\n            my_model2.description,\n            my_model2.populations,\n            my_model2.projections,\n        ],\n    ]\n    headers = [\"Model\", \"Description\", \"Populations\", \"Projections\"]\n    print(tabulate(models_data, headers, tablefmt=\"grid\"))\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/generate_models/#console-output","title":"Console Output","text":"
$ python create_model.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\ncreated model, other parameters: 2\ncreated model, other parameters: 1\nCompiling ...  OK \n+----------+--------------------------------+-------------------------------------+---------------+\n| Model    | Description                    | Populations                         | Projections   |\n+==========+================================+=====================================+===============+\n| my_model | my simple Poisson neuron model | ['first_poisson', 'second_poisson'] | []            |\n+----------+--------------------------------+-------------------------------------+---------------+\n| model1   |                                | ['pop1', 'pop2']                    | []            |\n+----------+--------------------------------+-------------------------------------+---------------+\n
"},{"location":"examples/monitor_recordings/","title":"Monitor Recordings","text":""},{"location":"examples/monitor_recordings/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroMonitors class to record variables. It is shown how to start/pause monitors, how to split recordings into chunks and optionally reset the model and how to get recordings during and after simulation.

"},{"location":"examples/monitor_recordings/#code","title":"Code","text":"
from ANNarchy import Population, setup, simulate, compile\nfrom CompNeuroPy import (\n    CompNeuroMonitors,\n    PlotRecordings,\n)\nfrom CompNeuroPy.neuron_models import Izhikevich2007\n\n\ndef main():\n    ### setup ANNarchy timestep and create results folder\n    setup(dt=0.1)\n\n    ### first we create two populations, each consist of 1 neuron\n    Population(1, neuron=Izhikevich2007(I_app=0), name=\"my_pop1\")\n    Population(1, neuron=Izhikevich2007(I_app=52), name=\"my_pop2\")\n\n    ### compile\n    compile()\n\n    ### after compilation we can define the monitors using the monitor_dictionary\n    ### and the CompNeuroMonitors class\n    ### for my_pop1 we use a recording period of 2 ms\n    ### for my_pop2 we do not give a recording preiod, therefore record every timestep\n    monitor_dictionary = {\"my_pop1;2\": [\"v\", \"spike\"], \"my_pop2\": [\"v\"]}\n    mon = CompNeuroMonitors(monitor_dictionary)\n\n    ### In this part we demonstrate starting/pausing all monitors\n    ### simulate for 100 ms [0, 100]\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [100, 200]\n    mon.start()\n    simulate(100)\n\n    ### pause all monitors and simulate for 100 ms [200, 300]\n    mon.pause()\n    simulate(100)\n\n    ### In this part we demonstrate starting single monitors\n    ### start only monitor for my_pop1 and simulate for 100 ms [300, 400]\n    mon.start(compartment_list=[\"my_pop1\"])\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [400, 500]\n    mon.start()\n    simulate(100)\n\n    ### In this part we demonstrate pausing single monitors\n    ### pause monitor for my_pop1 and simulate for 100 ms [500, 600]\n    mon.pause(compartment_list=[\"my_pop1\"])\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [600, 700]\n    mon.start()\n    simulate(100)\n\n    ### In this part we demonstrate chunking recordings by reset\n    ### reset WITHOUT model, creating new chunk --> first chunk [0, 700]\n    ### also in this chunk do not record the first 100 ms\n    ### WITHOUT model --> time continues at 700 ms [700, 800]\n    mon.reset(model=False)\n    mon.pause()\n    simulate(100)\n\n    ### start all monitors and simulate for 700 ms [800, 1500]\n    mon.start()\n    simulate(700)\n\n    ### reset WITH model, creating new chunk --> second chunk [700, 1500]\n    ### in third chunk time is reset to 0 ms\n    ### also in this chunk do not record the first 100 ms [0, 100]\n    mon.reset(model=True)\n    mon.pause()\n    simulate(100)\n\n    ### start all monitors and simulate for 700 ms [100, 800]\n    mon.start()\n    simulate(700)\n\n    ### Next we demonstrate getting recordings DURING SIMULATION by using\n    ### get_recordings_and_clear\n    ### this also resets the monitors back to their initialized state, i.e. there are no\n    ### recordings and they are not started yet\n    ### recordings1 consists of 3 chunks, third chunk [0, 800]\n    recordings1, recording_times1 = mon.get_recordings_and_clear()\n\n    ### Now continue simulation, creating NEW RECORDINGS, monitors are not started yet\n    ### model was not reset, so time continues at 800 ms\n    ### simulate for 100 ms [800, 900]\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [900, 1000]\n    mon.start()\n    simulate(100)\n\n    ### reset monitors and model, creating new chunk --> first chunk [800, 1000]\n    ### simulate for 100 ms [0, 100]\n    mon.reset(model=True)\n    simulate(100)\n\n    ### get recordings using get_recordings_and_clear\n    ### this time directly start recording again\n    ### recordings2 consists of 2 chunks, second chunk [0, 100]\n    recordings2, recording_times2 = mon.get_recordings_and_clear()\n\n    ### Now continue simulation, creating NEW RECORDINGS\n    ### directly start monitors and reset model so time is reset to 0 ms\n    ### simulate for 100 ms [0, 100]\n    mon.start()\n    mon.reset(model=True)\n    simulate(100)\n\n    ### get recordings the normal way (simultions are finished)\n    ### recordings3 consists of 1 chunk [0, 100]\n    recordings3 = mon.get_recordings()\n    recording_times3 = mon.get_recording_times()\n\n    ### print the idx and time lims of the recordings and the sizes of the recorded\n    ### arrays\n    print(\"#################### ALL RECORDINGS INFO ####################\")\n    recordings_list = [recordings1, recordings2, recordings3]\n    for all_times_idx, all_times in enumerate(\n        [recording_times1.all(), recording_times2.all(), recording_times3.all()]\n    ):\n        print(f\"recordings{all_times_idx+1}\")\n        for chunk in range(len(all_times)):\n            print(f\"\\tchunk: {chunk}\")\n            for pop_name in [\"my_pop1\", \"my_pop2\"]:\n                print(f\"\\t\\tpop_name: {pop_name}\")\n                print(\n                    f\"\\t\\trecording_array_size: {recordings_list[all_times_idx][chunk][f'{pop_name};v'].shape}\"\n                )\n                for time_point in [\"start\", \"stop\"]:\n                    print(f\"\\t\\t\\ttime_point: {time_point}\")\n                    for unit in [\"ms\", \"idx\"]:\n                        print(f\"\\t\\t\\t\\tunit: {unit}\")\n                        for period in range(\n                            len(all_times[chunk][pop_name][time_point][unit])\n                        ):\n                            print(\n                                f\"\\t\\t\\t\\t\\tperiod {period}: {all_times[chunk][pop_name][time_point][unit][period]}\"\n                            )\n    print(\"#############################################################\")\n\n    ### plot recordings 1 consisting of 3 chunks\n    for chunk in range(len(recordings1)):\n        ### using plot_recordings which plots the recordings of one chunk\n        PlotRecordings(\n            figname=f\"monitor_recordings_1_chunk{chunk}.png\",\n            recordings=recordings1,\n            recording_times=recording_times1,\n            shape=(2, 2),\n            plan={\n                \"position\": [1, 2, 3],\n                \"compartment\": [\"my_pop1\", \"my_pop2\", \"my_pop1\"],\n                \"variable\": [\"v\", \"v\", \"spike\"],\n                \"format\": [\"line\", \"line\", \"raster\"],\n            },\n            chunk=chunk,\n        )\n\n    ### plot recordings 2 consisting of 2 chunks\n    for chunk in range(len(recordings2)):\n        ### using plot_recordings which plots the recordings of one chunk\n        PlotRecordings(\n            figname=f\"monitor_recordings_2_chunk{chunk}.png\",\n            recordings=recordings2,\n            recording_times=recording_times2,\n            shape=(2, 2),\n            plan={\n                \"position\": [1, 2, 3],\n                \"compartment\": [\"my_pop1\", \"my_pop2\", \"my_pop1\"],\n                \"variable\": [\"v\", \"v\", \"spike\"],\n                \"format\": [\"line\", \"line\", \"raster\"],\n            },\n            chunk=chunk,\n        )\n\n    ### plot recordings 3 consisting of 1 chunk\n    for chunk in range(len(recordings3)):\n        ### using plot_recordings which plots the recordings of one chunk\n        PlotRecordings(\n            figname=f\"monitor_recordings_3_chunk{chunk}.png\",\n            recordings=recordings3,\n            recording_times=recording_times3,\n            shape=(2, 2),\n            plan={\n                \"position\": [1, 2, 3],\n                \"compartment\": [\"my_pop1\", \"my_pop2\", \"my_pop1\"],\n                \"variable\": [\"v\", \"v\", \"spike\"],\n                \"format\": [\"line\", \"line\", \"raster\"],\n            },\n            chunk=chunk,\n        )\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/monitor_recordings/#conosole-output","title":"Conosole Output","text":"
$ python monitor_recordings.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nCompiling ...  OK \n#################### ALL RECORDINGS INFO ####################\nrecordings1\n    chunk: 0\n        pop_name: my_pop1\n        recording_array_size: (200, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                    period 1: 300.0\n                    period 2: 600.0\n                unit: idx\n                    period 0: 0\n                    period 1: 50\n                    period 2: 150\n            time_point: stop\n                unit: ms\n                    period 0: 198.0\n                    period 1: 498.0\n                    period 2: 698.0\n                unit: idx\n                    period 0: 49\n                    period 1: 149\n                    period 2: 199\n        pop_name: my_pop2\n        recording_array_size: (4000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                    period 1: 400.0\n                unit: idx\n                    period 0: 0\n                    period 1: 1000\n            time_point: stop\n                unit: ms\n                    period 0: 199.9\n                    period 1: 699.9\n                unit: idx\n                    period 0: 999\n                    period 1: 3999\n    chunk: 1\n        pop_name: my_pop1\n        recording_array_size: (350, 1)\n            time_point: start\n                unit: ms\n                    period 0: 800.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 1498.0\n                unit: idx\n                    period 0: 349\n        pop_name: my_pop2\n        recording_array_size: (7000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 800.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 1499.9\n                unit: idx\n                    period 0: 6999\n    chunk: 2\n        pop_name: my_pop1\n        recording_array_size: (350, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 798.0\n                unit: idx\n                    period 0: 349\n        pop_name: my_pop2\n        recording_array_size: (7000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 799.9\n                unit: idx\n                    period 0: 6999\nrecordings2\n    chunk: 0\n        pop_name: my_pop1\n        recording_array_size: (50, 1)\n            time_point: start\n                unit: ms\n                    period 0: 900.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 998.0\n                unit: idx\n                    period 0: 49\n        pop_name: my_pop2\n        recording_array_size: (1000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 900.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 999.9\n                unit: idx\n                    period 0: 999\n    chunk: 1\n        pop_name: my_pop1\n        recording_array_size: (50, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 98.0\n                unit: idx\n                    period 0: 49\n        pop_name: my_pop2\n        recording_array_size: (1000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 99.9\n                unit: idx\n                    period 0: 999\nrecordings3\n    chunk: 0\n        pop_name: my_pop1\n        recording_array_size: (50, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 98.0\n                unit: idx\n                    period 0: 49\n        pop_name: my_pop2\n        recording_array_size: (1000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 99.9\n                unit: idx\n                    period 0: 999\n#############################################################\nGenerate fig monitor_recordings_1_chunk0.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_1_chunk1.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_1_chunk2.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_2_chunk0.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_2_chunk1.png... Done\n\nGenerate fig monitor_recordings_3_chunk0.png... Done\n
"},{"location":"examples/opt_neuron/","title":"Optimize a neuron model","text":""},{"location":"examples/opt_neuron/#optimize-neuron-model-from-data","title":"Optimize neuron model from data","text":""},{"location":"examples/opt_neuron/#introduction","title":"Introduction","text":"

This example demonstrates how to use the OptNeuron class to fit an ANNarchy neuron model to some experimental data.

"},{"location":"examples/opt_neuron/#code","title":"Code","text":"
from CompNeuroPy import CompNeuroExp, CompNeuroSim, current_step, rmse\nfrom CompNeuroPy.opt_neuron import OptNeuron\nimport numpy as np\nfrom ANNarchy import Neuron, dt\n\n\n### in this example we want to fit an ANNarchy neuron model to some data (which ca be\n### somehow obtained by simulating the neuron and recording variables) for this example,\n### we have the following simple neuron model\nmy_neuron = Neuron(\n    parameters=\"\"\"\n        I_app = 0\n        a = 0 : population\n        b = 0 : population\n    \"\"\",\n    equations=\"\"\"\n        r = a*I_app + b\n    \"\"\",\n)\n\n\n### Now we need some \"experimental data\" which will be provided to the OptNeuron class\n### with the argument results_soll.\ndef get_experimental_data():\n    \"\"\"\n    Return experimental data.\n\n    Assume we have two recordings of the rate r of a single neuron from two different\n    current step experiments. Both have length = 1000 ms and after 500 ms the current is\n    changed, thus also the rate.\n\n    Returns:\n        return_dict (dict):\n            Dictionary with keys \"results_soll\" and \"time_step\" and values the\n            experimental data and the time step in ms with which the date was obtained,\n            respectively.\n    \"\"\"\n    r_arr = np.empty((2, 1000))\n    ### first recording\n    r_arr[0, :500] = 2\n    r_arr[0, 500:] = 6\n    ### second recording\n    r_arr[1, :500] = 2\n    r_arr[1, 500:] = 10\n    ### time step in ms\n    time_step = 1\n\n    return_dict = {\"results_soll\": r_arr, \"time_step\": time_step}\n    return return_dict\n\n\n### We know how our experimental data was obtained. This is what we have to define as an\n### CompNeuroExp for the OptNeuron class.\nclass my_exp(CompNeuroExp):\n    \"\"\"\n    Define an experiment by inheriting from CompNeuroExp.\n\n    CompNeuroExp provides the attributes:\n\n        monitors (CompNeuroMonitors):\n            a CompNeuroMonitors object to do recordings, define during init otherwise\n            None\n        data (dict):\n            a dictionary for storing any optional data\n\n    and the functions:\n        reset():\n            resets the model and monitors\n        results():\n            returns a results object\n    \"\"\"\n\n    def run(self, population_name):\n        \"\"\"\n        Do the simulations and recordings.\n\n        To use the CompNeuroExp class, you need to define a run function which\n        does the simulations and recordings. The run function should return the\n        results object which can be obtained by calling self.results().\n\n        For using the CompNeuroExp for OptNeuron, the run function should have\n        one argument which is the name of the population which is automatically created\n        by OptNeuron, containing a single neuron of the model which should be optimized.\n\n        Args:\n            population_name (str):\n                name of the population which contains a single neuron, this will be\n                automatically provided by OptNeuron\n\n        Returns:\n            results (CompNeuroExp._ResultsCl):\n                results object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        ### For OptNeuron you have to reset the model and monitors at the beginning of\n        ### the run function! Do not reset the parameters, otherwise the optimization\n        ### will not work!\n        self.reset(parameters=False)\n\n        ### you have to start monitors within the run function, otherwise nothing will\n        ### be recorded\n        self.monitors.start()\n\n        ### do simulations and recordings using the provided CompNeuroMonitors object\n        ### (recording the varables specified during the initialization of OptNeuron\n        ### class) and e.g. the CompNeuroSim class\n        sim_step = CompNeuroSim(\n            simulation_function=current_step,\n            simulation_kwargs={\n                \"pop\": population_name,\n                \"t1\": 500,\n                \"t2\": 500,\n                \"a1\": 0,\n                \"a2\": 5,\n            },\n            kwargs_warning=False,\n            name=\"test\",\n            monitor_object=self.monitors,\n        )\n\n        ### run the simulation, remember setting parameters=False in the reset function!\n        sim_step.run()\n        self.reset(parameters=False)\n        sim_step.run({\"a2\": 10})\n\n        ### optional: store anything you want in the data dict. For example infomration\n        ### about the simulations. This is not used for the optimization but can be\n        ### retrieved after the optimization is finished\n        self.data[\"sim\"] = sim_step.simulation_info()\n        self.data[\"population_name\"] = population_name\n        self.data[\"time_step\"] = dt()\n\n        ### return results, use the object's self.results()\n        return self.results()\n\n\n### Next, the OptNeuron class needs a function to calculate the loss.\ndef get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll):\n    \"\"\"\n    Function which has to have the arguments results_ist and results_soll and should\n    calculate and return the loss. This structure is needed for the OptNeuron class.\n\n    Args:\n        results_ist (object):\n            the results object returned by the run function of experiment (see above)\n        results_soll (any):\n            the target data directly provided to OptNeuron during initialization\n\n    Returns:\n        loss (float or list of floats):\n            the loss\n    \"\"\"\n    ### get the recordings and other important things for calculating the loss from\n    ### results_ist, we do not use all available information here, but you could\n    rec_ist = results_ist.recordings\n    pop_ist = results_ist.data[\"population_name\"]\n    neuron = 0\n\n    ### get the data for calculating the loss from the results_soll\n    r_target_0 = results_soll[0]\n    r_target_1 = results_soll[1]\n\n    ### get the data for calculating the loss from the recordings of the\n    ### optimized neuron model\n    r_ist_0 = rec_ist[0][f\"{pop_ist};r\"][:, neuron]\n    r_ist_1 = rec_ist[1][f\"{pop_ist};r\"][:, neuron]\n\n    ### calculate the loss, e.g. the root mean squared error\n    rmse1 = rmse(r_target_0, r_ist_0)\n    rmse2 = rmse(r_target_1, r_ist_1)\n\n    ### return the loss, one can return a singel value or a list of values which will\n    ### be summed during the optimization\n    return [rmse1, rmse2]\n\n\n### now we need to define which variables should be optimized and between which bounds\nvariables_bounds = {\"a\": [-10, 10], \"b\": [-10, 10]}\n\n\ndef main():\n    ### get experimental data\n    experimental_data = get_experimental_data()\n\n    ### intitialize optimization\n    opt = OptNeuron(\n        experiment=my_exp,\n        get_loss_function=get_loss,\n        variables_bounds=variables_bounds,\n        neuron_model=my_neuron,\n        results_soll=experimental_data[\"results_soll\"],\n        time_step=experimental_data[\"time_step\"],\n        compile_folder_name=\"annarchy_opt_neuron_example_from_data\",\n        method=\"hyperopt\",\n        record=[\"r\"],\n    )\n\n    ### run the optimization, define how often the experiment should be repeated\n    fit = opt.run(max_evals=1000, results_file_name=\"best_from_data\")\n\n    ### print optimized parameters, we should get around a=0.8 and b=2\n    print(\"a\", fit[\"a\"])\n    print(\"b\", fit[\"b\"])\n    print(list(fit.keys()))\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/opt_neuron/#console-output","title":"Console Output","text":"
$ python run_opt_neuron_from_data.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nOptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\nOptNeuron: WARNING: attributes ['I_app', 'r'] are not used/initialized.\nchecking neuron_models, experiment, get_loss...Done\n\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:45<00:00, 21.99trial/s, best loss: 0.31922683758789056]\na 0.7609542202637395\nb 2.171783070482363\n['a', 'b', 'loss', 'all_loss', 'std', 'results', 'results_soll']\n
"},{"location":"examples/opt_neuron/#optimize-neuron-model-from-other-neuron-model","title":"Optimize neuron model from other neuron model","text":""},{"location":"examples/opt_neuron/#introduction_1","title":"Introduction","text":"

This example demonstrates how to use the OptNeuron class to fit an ANNarchy neuron model to the dynamics of another ANNarchy neuron model in a specific experiment.

The experiment and variable_bounds used are imported from the other example run_opt_neuron_from_data.py.

"},{"location":"examples/opt_neuron/#code_1","title":"Code","text":"
from CompNeuroPy import CompNeuroExp, rmse\nfrom CompNeuroPy.opt_neuron import OptNeuron\nfrom ANNarchy import Neuron\n\n### import the experiment and variables_bounds\nfrom run_opt_neuron_from_data import my_exp, variables_bounds\nfrom run_opt_neuron_from_data import my_neuron as simple_neuron\n\n\n### for this example we want to fit a simple neuron model to replicate the dynamics of a\n### more complex neuron model, the simple model is imported from the other example\n### 'run_opt_neuron_from_data.py' and the complex model is defined here\ncomplex_neuron = Neuron(\n    parameters=\"\"\"\n        I_app = 0\n        f = 6.0542364610842572e-002 : population\n        e = 3.7144041714209490e+000 : population\n        d = -4.9446336126026436e-001 : population\n        c = 9.0909599124334911e-002 : population\n        b = -4.4497411506061648e-003 : population\n        a = -6.2239117460540167e-005 : population\n    \"\"\",\n    equations=\"\"\"\n        r = a*I_app**5 + b*I_app**4 + c*I_app**3 + d*I_app**2 + e*I_app**1 + f\n    \"\"\",\n)\n\n\n### Next, the OptNeuron class needs a function to calculate the loss.\ndef get_loss(\n    results_ist: CompNeuroExp._ResultsCl, results_soll: CompNeuroExp._ResultsCl\n):\n    \"\"\"\n    Function which has to have the arguments results_ist and results_soll and should\n    calculate and return the loss. This structure is needed for the OptNeuron class.\n\n    Args:\n        results_ist (object):\n            the results object returned by the run function of experiment (see above),\n            conducting the experiment with the optimized neuron model\n        results_soll (any):\n            the results object returned by the run function of experiment (see above),\n            conducting the experiment with the target neuron model\n\n    Returns:\n        loss (float or list of floats):\n            the loss\n    \"\"\"\n\n    ### get the recordings and other important things from the results_ist (results\n    ### generated during the optimization using the defrined CompNeuroExp from above)\n    rec_ist = results_ist.recordings\n    pop_ist = results_ist.data[\"population_name\"]\n    rec_soll = results_soll.recordings\n    pop_soll = results_soll.data[\"population_name\"]\n    neuron = 0\n\n    ### get the data for calculating the loss from the recordings of the\n    ### target neuron model\n    v_soll_0 = rec_soll[0][pop_soll + \";r\"][:, neuron]\n    v_soll_1 = rec_soll[1][pop_soll + \";r\"][:, neuron]\n\n    ### get the data for calculating the loss from the recordings of the\n    ### optimized neuron model\n    v_ist_0 = rec_ist[0][pop_ist + \";r\"][:, neuron]\n    v_ist_1 = rec_ist[1][pop_ist + \";r\"][:, neuron]\n\n    ### calculate the loss, e.g. the root mean squared error\n    rmse1 = rmse(v_soll_0, v_ist_0)\n    rmse2 = rmse(v_soll_1, v_ist_1)\n\n    ### return the loss, one can return a singel value or a list of values which will\n    ### be summed during the optimization\n    return [rmse1, rmse2]\n\n\ndef main():\n    ### define optimization\n    opt = OptNeuron(\n        experiment=my_exp,\n        get_loss_function=get_loss,\n        variables_bounds=variables_bounds,\n        neuron_model=simple_neuron,\n        target_neuron_model=complex_neuron,\n        time_step=1,\n        compile_folder_name=\"annarchy_opt_neuron_example_from_neuron\",\n        method=\"hyperopt\",\n        record=[\"r\"],\n    )\n\n    ### run the optimization, define how often the experiment should be repeated\n    fit = opt.run(max_evals=1000, results_file_name=\"best_from_neuron\")\n\n    ### print optimized parameters, we should get around a=2.8 and b=0.28\n    print(\"a\", fit[\"a\"])\n    print(\"b\", fit[\"b\"])\n    print(list(fit.keys()))\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/opt_neuron/#console-output_1","title":"Console Output","text":"
$ python run_opt_neuron_from_neuron.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nOptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\nOptNeuron: WARNING: attributes ['I_app', 'r'] are not used/initialized.\nchecking neuron_models, experiment, get_loss...Done\n\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:47<00:00, 21.10trial/s, best loss: 0.5607444520201438]\na 2.8009641859311354\nb 0.22697565003968234\n['a', 'b', 'loss', 'all_loss', 'std', 'results', 'results_soll']\n
"},{"location":"examples/plot_recordings/","title":"Plot Recordings","text":""},{"location":"examples/plot_recordings/#introduction","title":"Introduction","text":"

This example demonstrates how to plot recordings (from CompNeuroMonitors) using the PlotRecordings class. The different plotting formats for spiking and non-spiking data (populations and projections) are demonstrated.

This example loads data generated with other example run_and_monitor_simulations.py.

"},{"location":"examples/plot_recordings/#code","title":"Code","text":"
from CompNeuroPy import load_variables, PlotRecordings\n\n\ndef main():\n    ### load data generated with other example \"run_and_monitor_simulations.py\"\n    loaded_dict = load_variables(\n        name_list=[\"recordings\", \"recording_times\", \"increase_rates_pop_info\"],\n        path=\"run_and_monitor_simulations/\",\n    )\n\n    ### define what should be plotted in which subplot, here 14 subplots are defined to\n    ### demonstrate the different plotting formats for spiking and non-spiking variables\n    plan_dict = {\n        \"position\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14],\n        \"compartment\": [\n            \"first_poisson\",\n            \"first_poisson\",\n            \"first_poisson\",\n            \"first_poisson\",\n            \"first_poisson\",\n            \"second_poisson\",\n            \"second_poisson\",\n            \"second_poisson\",\n            \"second_poisson\",\n            \"ampa_proj\",\n            \"ampa_proj\",\n            \"ampa_proj\",\n            \"ampa_proj\",\n        ],\n        \"variable\": [\n            \"spike\",\n            \"spike\",\n            \"spike\",\n            \"spike\",\n            \"spike\",\n            \"p\",\n            \"p\",\n            \"p\",\n            \"p\",\n            \"w\",\n            \"w\",\n            \"w\",\n            \"w\",\n        ],\n        \"format\": [\n            \"raster\",\n            \"mean\",\n            \"hybrid\",\n            \"interspike\",\n            \"cv\",\n            \"line\",\n            \"line_mean\",\n            \"matrix\",\n            \"matrix_mean\",\n            \"line\",\n            \"line_mean\",\n            \"matrix\",\n            \"matrix_mean\",\n        ],\n    }\n\n    ### plot first chunk\n    PlotRecordings(\n        figname=\"run_and_monitor_simulations/my_two_poissons_chunk_0.png\",\n        recordings=loaded_dict[\"recordings\"],\n        recording_times=loaded_dict[\"recording_times\"],\n        shape=(3, 5),\n        plan=plan_dict,\n    )\n    ### plot second chunk\n    PlotRecordings(\n        figname=\"run_and_monitor_simulations/my_two_poissons_chunk_1.png\",\n        recordings=loaded_dict[\"recordings\"],\n        recording_times=loaded_dict[\"recording_times\"],\n        shape=(3, 5),\n        plan=plan_dict,\n        chunk=1,\n    )\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/plot_recordings/#console-output","title":"Console Output","text":"
$ python plot_recordings.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nGenerate fig run_and_monitor_simulations/my_two_poissons_chunk_0.png... Done\n\nGenerate fig run_and_monitor_simulations/my_two_poissons_chunk_1.png... Done\n
"},{"location":"examples/run_and_monitor_simulations/","title":"Generate Simulations","text":""},{"location":"examples/run_and_monitor_simulations/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroSim class to define simulations. It is shown how to define the simulation functions, requirements and how to use the simulation information object.

This example imports the \"my_model\" from other example create_model.py and saves recorded data used in other example plot_recordings.py.

"},{"location":"examples/run_and_monitor_simulations/#code","title":"Code","text":"
import numpy as np\nfrom CompNeuroPy import (\n    CompNeuroMonitors,\n    CompNeuroSim,\n    ReqPopHasAttr,\n    save_variables,\n    CompNeuroModel,\n)\nfrom ANNarchy import (\n    simulate,\n    get_population,\n    Population,\n    Neuron,\n    Projection,\n    Synapse,\n    Uniform,\n)\nfrom CompNeuroPy.examples.create_model import my_model\n\n\n### CompNeuroSim is a class to define simulations\n### It requires a simulation function, which we will define here:\ndef set_rates(pop_name: str, rates: float = 0.0, duration: float = 0.0):\n    \"\"\"\n    Sets the rates variable of a population given by pop_name and simulates duration ms.\n\n    Args:\n        pop_name (str):\n            name of the population\n        rates (float, optional):\n            rates variable of the population\n        duration (float, optional):\n            duration of the simulation in ms\n    \"\"\"\n    ### set rates and simulate\n    get_population(pop_name).rates = rates\n    simulate(duration)\n\n\n### Also create a second more complex simulation function\ndef increase_rates(\n    pop_name: str | list[str],\n    rate_step: float = 0.0,\n    time_step: float = 0.0,\n    nr_steps: int = 0,\n):\n    \"\"\"\n    Increase rates variable of population(s).\n\n    Args:\n        pop_name (str or list of str):\n            name of population(s)\n        rate_step (float, optional):\n            increase of rate with each step, initial step = current rates of pop\n        time_step (float, optional):\n            duration of each step in ms\n        nr_steps (int, optional):\n            number of steps\n    \"\"\"\n\n    ### convert single pop into list\n    pop_name_list = pop_name\n    if not (isinstance(pop_name_list, list)):\n        pop_name_list = [pop_name_list]\n\n    ### define initial value for rates for each pop (assume all neurons have same rates)\n    start_rate_arr = np.array(\n        [get_population(pop_name).rates[0] for pop_name in pop_name_list]\n    )\n\n    ### simulate all steps\n    for step in range(nr_steps):\n        ### calculate rates for each pop\n        rates_arr = step * rate_step + start_rate_arr\n        ### set rates variable of all populations\n        for pop_idx, pop_name in enumerate(pop_name_list):\n            set_rates(\n                pop_name, rates=rates_arr[pop_idx], duration=0\n            )  # use already defined simulation set_rates\n        ### then simulate step\n        set_rates(pop_name_list[0], rates=rates_arr[0], duration=time_step)\n\n    ### simulation_functions can return some information which may be helpful later\n    ### the simulation arguments do not need to be returned, since they are accessible\n    ### through the CompNeuroSim object anyway (see below)\n    return {\"duration\": time_step * nr_steps, \"d_rates\": rate_step * nr_steps}\n\n\n### see below why we need this function\ndef extend_model(my_model: CompNeuroModel):\n    \"\"\"\n    Create a simple projections and a projection with decaying weights.\n\n    Args:\n        my_model (CompNeuroModel):\n            model to which the projection should be added\n    \"\"\"\n\n    ### create a simple population for later use\n    Population(1, neuron=Neuron(equations=\"r=0\"), name=\"simple_pop\")\n\n    ### create a projection with decaying weights to demonstrate recording of projection\n    proj = Projection(\n        pre=my_model.populations[0],\n        post=my_model.populations[1],\n        target=\"ampa\",\n        synapse=Synapse(parameters=\"tau=500\", equations=\"dw/dt=-w/tau\"),\n        name=\"ampa_proj\",\n    )\n    proj.connect_all_to_all(weights=Uniform(1.0, 2.0))\n\n\ndef main():\n    ### create and compile the model from other example \"create_model.py\"\n    my_model.create(do_compile=False)\n\n    ### extend the model to demonstrate the functionality of CompNeuroSim requirements\n    ### (see below) and the recording of projections (recorded data will be used in\n    ### other example \"plot_recordings.py\")\n    extend_model(my_model)\n    my_model.compile()\n\n    ### Define Monitors, recording p and spike from both model populations with periods\n    ### of 10 ms and 15 ms and the weights of the ampa projection with period of 10 ms\n    monitor_dictionary = {\n        f\"{my_model.populations[0]};10\": [\"p\", \"spike\"],\n        f\"{my_model.populations[1]};15\": [\"p\", \"spike\"],\n        \"ampa_proj;10\": [\"w\"],\n    }\n    mon = CompNeuroMonitors(monitor_dictionary)\n\n    ### Now use CompNeuroSim to define a simulation. Use the previously defined\n    ### simulation functions and define their arguments as kwargs dictionary. Give the\n    ### simulation a name and description and you can also define requirements for the\n    ### simulation. Here, for example, we require that the populations contain the\n    ### attribute 'rates'. One can define multiple requirements in a list of\n    ### dictionaries. The arguments of the requirements can be inherited from the\n    ### simulation kwargs by using the syntax 'simulation_kwargs.<kwarg_name>'.\n    ### The monitor object is also given to the simulation, so that the simulation\n    ### runs can be automatically associated with the monitor recording chunks.\n    increase_rates_pop = CompNeuroSim(\n        simulation_function=increase_rates,\n        simulation_kwargs={\n            \"pop_name\": my_model.populations[0],\n            \"rate_step\": 10,\n            \"time_step\": 100,\n            \"nr_steps\": 15,\n        },\n        name=\"increase_rates_pop\",\n        description=\"increase rates variable of pop\",\n        requirements=[\n            {\"req\": ReqPopHasAttr, \"pop\": \"simulation_kwargs.pop_name\", \"attr\": \"rates\"}\n        ],\n        monitor_object=mon,\n    )\n\n    ### Now let's use this simulation\n    ### Simulate 500 ms without recordings and then run the simulation\n    simulate(500)\n    mon.start()\n    increase_rates_pop.run()\n\n    ### resetting monitors and model, creating new recording chunk\n    mon.reset()\n\n    ### again simulate 700 ms without recording\n    ### then run the simulation with different simulation kwargs (for all populations)\n    mon.pause()\n    simulate(700)\n    mon.start()\n    increase_rates_pop.run({\"pop_name\": my_model.populations})\n    simulate(500)\n\n    ### now again change the pop_name kwarg but use the simple_pop population without\n    ### the required attribute 'rates'\n    ### this will raise an error\n    try:\n        increase_rates_pop.run({\"pop_name\": \"simple_pop\"})\n    except Exception as e:\n        print(\"\\n###############################################\")\n        print(\n            \"Running simulation with population not containing attribute 'rates' causes the following error:\"\n        )\n        print(e)\n        print(\"###############################################\\n\")\n\n    ### get recordings and recording times from the CompNeuroMonitors object\n    recordings = mon.get_recordings()\n    recording_times = mon.get_recording_times()\n\n    ### get the simulation information object from the CompNeuroSim object\n    increase_rates_pop_info = increase_rates_pop.simulation_info()\n\n    ### save the recordings, recording times and simulation information\n    save_variables(\n        variable_list=[recordings, recording_times, increase_rates_pop_info],\n        name_list=[\"recordings\", \"recording_times\", \"increase_rates_pop_info\"],\n        path=\"run_and_monitor_simulations\",\n    )\n\n    ### print the information contained in the simulation information object\n    print(\"\\nA simulation object contains:\")\n    print(\"name\\n\", increase_rates_pop_info.name)\n    print(\"\\ndescription\\n\", increase_rates_pop_info.description)\n    print(\"\\nstart (for each run)\\n\", increase_rates_pop_info.start)\n    print(\"\\nend (for each run)\\n\", increase_rates_pop_info.end)\n    print(\"\\ninfo (for each run)\\n\", increase_rates_pop_info.info)\n    print(\"\\nkwargs (for each run)\\n\", increase_rates_pop_info.kwargs)\n    print(\"\\nmonitor chunk (for each run)\\n\", increase_rates_pop_info.monitor_chunk)\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/run_and_monitor_simulations/#console-output","title":"Console Output","text":"
$ python run_and_monitor_simulations.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\ncreated model, other parameters: 1\nCompiling ...  OK \n\n###############################################\nRunning simulation with population not containing attribute 'rates' causes the following error:\nPopulation simple_pop does not contain attribute rates!\n\n###############################################\n\n\nA simulation object contains:\nname\n increase_rates_pop\n\ndescription\n increase rates variable of pop\n\nstart (for each run)\n [500.0, 700.0]\n\nend (for each run)\n [2000.0, 2200.0]\n\ninfo (for each run)\n [{'duration': 1500, 'd_rates': 150}, {'duration': 1500, 'd_rates': 150}]\n\nkwargs (for each run)\n [{'pop_name': 'first_poisson', 'rate_step': 10, 'time_step': 100, 'nr_steps': 15}, {'pop_name': ['first_poisson', 'second_poisson'], 'rate_step': 10, 'time_step': 100, 'nr_steps': 15}]\n\nmonitor chunk (for each run)\n [0, 1]\n
"},{"location":"main/dbs_stimulator/","title":"DBS Stimulator","text":""},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator","title":"CompNeuroPy.dbs.DBSstimulator","text":"

Class for stimulating a population with DBS.

Warning

If you use auto_implement, pointers to the populations and projections of the model are not valid anymore (new populations and projections are created)! Use a CompNeuroPy model working with names of populations and projections anyway (recommended) or use the update_pointers method.

Examples:

from ANNarchy import Population, Izhikevich, compile, simulate, setup\nfrom CompNeuroPy import DBSstimulator\n\n# setup ANNarchy\nsetup(dt=0.1)\n\n# create populations\npopulation1 = Population(10, neuron=Izhikevich, name=\"my_pop1\")\npopulation2 = Population(10, neuron=Izhikevich, name=\"my_pop2\")\n>>>\n# create DBS stimulator\ndbs = DBSstimulator(\n    stimulated_population=population1,\n    population_proportion=0.5,\n    dbs_depolarization=30,\n    auto_implement=True,\n)\n\n# update pointers to correct populations\npopulation1, population2 = dbs.update_pointers(\n    pointer_list=[population1, population2]\n)\n\n# compile network\ncompile()\n\n# run simulation\n# 1000 ms without dbs\nsimulate(1000)\n# 1000 ms with dbs\ndbs.on()\nsimulate(1000)\n# 1000 ms without dbs\ndbs.off()\nsimulate(1000)\n
Source code in src/CompNeuroPy/dbs.py
class DBSstimulator:\n    \"\"\"\n    Class for stimulating a population with DBS.\n\n    !!! warning\n        If you use auto_implement, pointers to the populations and projections of\n        the model are not valid anymore (new populations and projections are\n        created)! Use a CompNeuroPy model working with names of populations and\n        projections anyway (recommended) or use the update_pointers method.\n\n    Examples:\n        ```python\n        from ANNarchy import Population, Izhikevich, compile, simulate, setup\n        from CompNeuroPy import DBSstimulator\n\n        # setup ANNarchy\n        setup(dt=0.1)\n\n        # create populations\n        population1 = Population(10, neuron=Izhikevich, name=\"my_pop1\")\n        population2 = Population(10, neuron=Izhikevich, name=\"my_pop2\")\n        >>>\n        # create DBS stimulator\n        dbs = DBSstimulator(\n            stimulated_population=population1,\n            population_proportion=0.5,\n            dbs_depolarization=30,\n            auto_implement=True,\n        )\n\n        # update pointers to correct populations\n        population1, population2 = dbs.update_pointers(\n            pointer_list=[population1, population2]\n        )\n\n        # compile network\n        compile()\n\n        # run simulation\n        # 1000 ms without dbs\n        simulate(1000)\n        # 1000 ms with dbs\n        dbs.on()\n        simulate(1000)\n        # 1000 ms without dbs\n        dbs.off()\n        simulate(1000)\n        ```\n    \"\"\"\n\n    @check_types()\n    def __init__(\n        self,\n        stimulated_population: Population,\n        population_proportion: float = 1.0,\n        excluded_populations_list: list[Population] = [],\n        dbs_depolarization: float = 0.0,\n        orthodromic: bool = False,\n        antidromic: bool = False,\n        efferents: bool = False,\n        afferents: bool = False,\n        passing_fibres: bool = False,\n        passing_fibres_list: list[Projection] = [],\n        passing_fibres_strength: float | list[float] = 1.0,\n        sum_branches: bool = True,\n        dbs_pulse_frequency_Hz: float = 130.0,\n        dbs_pulse_width_us: float = 300.0,\n        axon_spikes_per_pulse: float = 1.0,\n        axon_rate_amp: float | dict[Population | str, float] = 1.0,\n        seed: int | None = None,\n        auto_implement: bool = False,\n        model: generate_model | None = None,\n    ) -> None:\n        \"\"\"\n        Initialize DBS stimulator.\n\n        !!! warning\n            Do this before compiling the model!\n\n        Args:\n            stimulated_population (Population):\n                Population which is stimulated by DBS\n            population_proportion (float, optional):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly. Default: 1.0.\n            excluded_populations_list (list, optional):\n                List of populations which are excluded from DBS effects, they are not\n                affected and their axons do not generate axon spikes. Default: [].\n            dbs_depolarization (float, optional):\n                Depolarization effect of the DBS pulse to local soma. Default: 0.0.\n            orthodromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded orthodromically.\n                Default: False.\n            antidromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded antidromically,\n                only available in spiking networks. Default: False.\n            efferents (bool, optional):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: False.\n            afferents (bool, optional):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: False.\n            passing_fibres (bool, optional):\n                If True, DBS affects the passing fibres of the stimulated region defined\n                in passing_fibres_list (orthodromic and/or antidromic have to be True\n                too). Default: False.\n            passing_fibres_list (list of Projections, optional):\n                List of projections which pass the DBS stimulated region and therefore\n                are activated by DBS. Default: [], also set passing_fibres True!\n            passing_fibres_strength (float or list of float, optional):\n                Single value or list of float values between 0 and 1 defining how strong\n                the passing fibres are activated by DBS (0: not activated, 1: fully\n                activated like the projections in the DBS stimulated region).\n                Default: 1.\n            sum_branches (bool, optional):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n                Default: True.\n            dbs_pulse_frequency_Hz (float, optional):\n                Frequency of the DBS pulse. Default: 130 Hz.\n            dbs_pulse_width_us (float, optional):\n                Width of the DBS pulse. Default: 300 us.\n            axon_spikes_per_pulse (float, optional):\n                Number of average axon spikes per DBS pulse. Default: 1.\n            axon_rate_amp (float or dict of float, optional):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n                axons caused by DBS. You can specify this for each population\n                individually by using a dictionary (keys = Population instances)\n                axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n                of 1.5 during DBS (all other affected projections forward the default\n                value)\n                You can specify the default value by using the key \"default\", e.g.\n                {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except\n                pop forward a rate of 1.0 during DBS. Default: 1.0.\n            seed (int, optional):\n                Seed for the random distribution of affected neurons based on\n                population_proportion. Default: None.\n            auto_implement (bool, optional):\n                If True, automatically implement DBS mechanisms to the model. Only\n                supported for Izhikevich spiking models and rate-coded models.\n                Default: False.\n                TODO test what happens with mixed models\n            model (generate_model, optional):\n                CompNeuroPy model which is used to automatically implement DBS\n                mechanisms, should not be compiled!. Default: None, i.e., use all\n                populations and projections of the current magic model\n        \"\"\"\n\n        if auto_implement:\n            ### recreate model with DBS mechanisms\n            ### give all variables containing Populations and Projections\n            ### and also recreate them during recreating the model\n            ### variables are:\n            ### - stimulated_population\n            ### - excluded_populations_list\n            ### - passing_fibres_list\n            ### - axon_rate_amp\n            if not isinstance(model, type(None)):\n                ### CompNeuroPy model given\n                ### recreate model with DBS mechanisms\n                create_dbs_model_obj = _CreateDBSmodelcnp(\n                    model,\n                    stimulated_population,\n                    excluded_populations_list,\n                    passing_fibres_list,\n                    axon_rate_amp,\n                )\n                ### get the new CompNeuroPy model\n                model = create_dbs_model_obj.model\n            else:\n                ### no CompNeuroPy model given --> use all populations and projections of the current magic model\n                ### recreate model with DBS mechanisms\n                create_dbs_model_obj = _CreateDBSmodel(\n                    stimulated_population,\n                    excluded_populations_list,\n                    passing_fibres_list,\n                    axon_rate_amp,\n                )\n            ### get the new variables containing Populations and Projections\n            stimulated_population = create_dbs_model_obj.stimulated_population\n            excluded_populations_list = create_dbs_model_obj.excluded_populations_list\n            passing_fibres_list = create_dbs_model_obj.passing_fibres_list\n            axon_rate_amp = create_dbs_model_obj.axon_rate_amp\n\n        ### set parameters\n        self.stimulated_population = stimulated_population\n        self.population_proportion = population_proportion\n        self.excluded_populations_list = excluded_populations_list\n        self.dbs_depolarization = dbs_depolarization\n        self.orthodromic = orthodromic\n        self.antidromic = antidromic\n        self.efferents = efferents\n        self.afferents = afferents\n        self.passing_fibres = passing_fibres\n        self.passing_fibres_list = passing_fibres_list\n        self.passing_fibres_strength = passing_fibres_strength\n        self.sum_branches = sum_branches\n        self.dbs_pulse_width_us = dbs_pulse_width_us\n        self.axon_spikes_per_pulse = axon_spikes_per_pulse\n        self.axon_rate_amp = axon_rate_amp\n        self.seed = seed\n        self.model = model\n\n        ### ANNarchy constants for DBS\n        self._set_constants(dbs_pulse_frequency_Hz)\n\n        ### randomly select affected neurons i.e. create dbs_on_array\n        self.dbs_on_array = self._create_dbs_on_array(population_proportion, seed)\n\n    def _create_dbs_on_array(self, population_proportion: float, seed: int):\n        \"\"\"\n        Create an array with the shape of the stimulated population with ones and zeros\n        randomly distributed with the specified population_proportion.\n\n        Args:\n            population_proportion (float):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly\n            seed (int):\n                Seed for the random distribution of affected neurons based on\n                population_proportion\n\n        Returns:\n            dbs_on_array (np.array):\n                Array with the shape of the stimulated population with ones and zeros\n                randomly distributed with the specified population_proportion\n        \"\"\"\n        ### create random number generator\n        rng = np.random.default_rng(seed)\n        ### create an array of zeros with the shape of the population, then flatten it\n        dbs_on_array = np.zeros(self.stimulated_population.geometry).flatten()\n        ### get the number of affected neurons based on the population_proportion\n        number_of_affected_neurons = population_proportion * dbs_on_array.size\n        ### randomly ceil or floor the number of affected neurons\n        number_of_affected_neurons = int(\n            rng.choice(\n                [\n                    np.ceil(number_of_affected_neurons),\n                    np.floor(number_of_affected_neurons),\n                ]\n            )\n        )\n        ### insert ones to the dbs_on_array\n        dbs_on_array[:number_of_affected_neurons] = 1\n        ### shuffle array\n        rng.shuffle(dbs_on_array)\n        ### reshape array to the shape of the population\n        dbs_on_array = dbs_on_array.reshape(self.stimulated_population.geometry)\n        ### return array\n        return dbs_on_array\n\n    def _set_constants(self, dbs_pulse_frequency_Hz: float):\n        \"\"\"\n        Set constants for DBS.\n\n        Args:\n            dbs_pulse_frequency_Hz (float):\n                Frequency of the DBS pulse in Hz\n        \"\"\"\n        # pulse frequency:\n        Constant(\"dbs_pulse_frequency_Hz\", dbs_pulse_frequency_Hz)\n        # pulse width:\n        # Neumant et al.. 2023: 60us but Meier et al. 2022: 300us... 60us = 0.06ms is very small for ANNarchy simulations\n        Constant(\"dbs_pulse_width_us\", self.dbs_pulse_width_us)\n\n        ### add global function for DBS pulse\n        add_function(\n            \"pulse(time_ms) = ite(modulo(time_ms*1000, 1000000./dbs_pulse_frequency_Hz) < dbs_pulse_width_us, 1., 0.)\"\n        )\n\n    def _axon_spikes_per_pulse_to_prob(self, axon_spikes_per_pulse: float):\n        \"\"\"\n        Convert number of axon spikes per pulse to probability of axon spikes per\n        timestep during DBS pulse\n\n        Args:\n            axon_spikes_per_pulse (float):\n                Number of average axon spikes per DBS pulse\n\n        Returns:\n            prob_axon_spike_time_step (float):\n                Probability of axon spikes per timestep during DBS pulse\n        \"\"\"\n        return np.clip(\n            (axon_spikes_per_pulse * 1000 * dt() / self.dbs_pulse_width_us), 0, 1\n        )\n\n    def _set_depolarization(self, dbs_depolarization: float | None = None):\n        \"\"\"\n        Set depolarization of population.\n\n        Args:\n            dbs_depolarization (float, optional):\n                Depolarization effect of the DBS pulse to local soma. Default: None,\n                i.e., use value from initialization\n        \"\"\"\n        ### either use given depolarization or use default value\n        if isinstance(dbs_depolarization, type(None)):\n            dbs_depolarization = self.dbs_depolarization\n\n        ### set depolarization of population\n        for pop in populations():\n            if pop == self.stimulated_population:\n                pop.dbs_depolarization = dbs_depolarization\n            else:\n                pop.dbs_depolarization = 0\n\n    def _set_axon_spikes(\n        self,\n        orthodromic: bool | None = None,\n        antidromic: bool | None = None,\n        efferents: bool | None = None,\n        afferents: bool | None = None,\n        passing_fibres: bool | None = None,\n        passing_fibres_strength: float | list[float] | None = None,\n        sum_branches: bool | None = None,\n        axon_spikes_per_pulse: float | None = None,\n        axon_rate_amp: float | dict[Population | str, float] | None = None,\n    ):\n        \"\"\"\n        Set axon spikes forwarding orthodromic\n\n        Args:\n            orthodromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded orthodromically,\n                Default: None, i.e., use value from initialization\n            antidromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded antidromically,\n                only available in spiking networks. Default: None, i.e., use value from\n                initialization\n            efferents (bool, optional):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            afferents (bool, optional):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            passing_fibres (bool, optional):\n                If True, DBS affects the passing fibres of the stimulated region defined\n                in passing_fibres_list (orthodromic and/or antidromic have to be True\n                too). Default: None, i.e., use value from initialization\n            passing_fibres_strength (float | list[float], optional):\n                Single value or list of float values between 0 and 1 defining how strong\n                the passing fibres are activated by DBS (0: not activated, 1: fully\n                activated like the projections in the DBS stimulated region).\n                Default: None, i.e., use value from initialization\n            sum_branches (bool, optional):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n                Default: None, i.e., use value from initialization\n            axon_spikes_per_pulse (float, optional):\n                Number of average axon spikes per DBS pulse. Default: None, i.e., use\n                value from initialization\n            axon_rate_amp (float | dict[Population | str, float], optional):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n                axons caused by DBS. You can specify this for each population\n                individually by using a dictionary (keys = Population instances)\n                axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n                of 1.5 during DBS (all other affected projections forward the default\n                value)\n                You can specify the default value by using the key \"default\", e.g.\n                {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except\n                pop forward a rate of 1.0 during DBS. Default: None, i.e., use value\n                from initialization\n        \"\"\"\n\n        ### either use given orthodromic or use default value\n        if isinstance(orthodromic, type(None)):\n            orthodromic = self.orthodromic\n        ### either use given antidromic or use default value\n        if isinstance(antidromic, type(None)):\n            antidromic = self.antidromic\n        ### either use given efferents or use default value\n        if isinstance(efferents, type(None)):\n            efferents = self.efferents\n        ### either use given afferents or use default value\n        if isinstance(afferents, type(None)):\n            afferents = self.afferents\n        ### either use given afferents or use default value\n        if isinstance(passing_fibres, type(None)):\n            passing_fibres = self.passing_fibres\n        ### either use given passing_fibres_strength or use default value\n        if isinstance(passing_fibres_strength, type(None)):\n            passing_fibres_strength = self.passing_fibres_strength\n        ### either use given sum_branches or use default value\n        if isinstance(sum_branches, type(None)):\n            sum_branches = self.sum_branches\n        ### either use given axon_spikes_per_pulse or use default value\n        if isinstance(axon_spikes_per_pulse, type(None)):\n            axon_spikes_per_pulse = self.axon_spikes_per_pulse\n        ### either use given axon_rate_amp or use default value\n        if isinstance(axon_rate_amp, type(None)):\n            axon_rate_amp = self.axon_rate_amp\n\n        ### check if passing_fibres_strength is a list\n        if not isinstance(passing_fibres_strength, list):\n            passing_fibres_strength = [passing_fibres_strength] * len(\n                self.passing_fibres_list\n            )\n        ### check if axon_rate_amp is a dict or float\n        if isinstance(axon_rate_amp, dict):\n            ### check if default key is missing\n            if \"default\" not in axon_rate_amp.keys():\n                ### add the key \"default\" with the value 1.0 to the dict\n                axon_rate_amp[\"default\"] = 1.0\n        else:\n            ### create dict with default value\n            axon_rate_amp = {\"default\": axon_rate_amp}\n\n        ### deactivate DBS axon transmission\n        self._deactivate_axon_DBS()\n\n        ### activate orthodromic transmission for all projections\n        if orthodromic:\n            self._set_orthodromic(\n                efferents,\n                afferents,\n                passing_fibres,\n                passing_fibres_strength,\n                axon_spikes_per_pulse,\n                axon_rate_amp,\n            )\n\n        ### activate antidromic transmission for all populations\n        if antidromic:\n            self._set_antidromic(\n                efferents,\n                afferents,\n                passing_fibres,\n                passing_fibres_strength,\n                sum_branches,\n                axon_spikes_per_pulse,\n            )\n\n    def _deactivate_axon_DBS(self):\n        \"\"\"\n        Deactivate axon spikes forwarding for both orthodromic and antidromic.\n        \"\"\"\n        for pop in populations():\n            ### deactivate axon spike genearation for all populations\n            pop.prob_axon_spike = 0\n            pop.axon_rate_amp = 0\n            ### deactivate antidromic transmission for all populations\n            pop.antidromic = 0\n            pop.antidromic_prob = 0\n\n        ### deactivate orthodromic transmission for all projections\n        for proj in projections():\n            proj.axon_transmission = 0\n            proj.p_axon_spike_trans = 0\n\n    def _set_orthodromic(\n        self,\n        efferents: bool,\n        afferents: bool,\n        passing_fibres: bool,\n        passing_fibres_strength: list[float],\n        axon_spikes_per_pulse: float,\n        axon_rate_amp: dict[Population | str, float],\n    ):\n        \"\"\"\n        Set orthodromic axon spikes forwarding.\n\n        Args:\n            efferents (bool):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            afferents (bool):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            passing_fibres (bool):\n                If True, DBS affects the passing fibres of the stimulated population\n                (orthodromic and/or antidromic have to be True too and there have to\n                be passing fibres in the passing_fibres_list)\n            passing_fibres_strength (list[float]):\n                List of float values between 0 and 1 defining how strong the passing\n                fibres are activated by DBS (0: not activated, 1: fully activated\n                like projections in DBS stimulated region)\n            axon_spikes_per_pulse (float):\n                Number of average axon spikes per DBS pulse\n            axon_rate_amp (dict[Population | str, float]):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded\n                on axons caused by DBS. The dictionary has to contain the key\n                \"default\" with the default value for all projections and can contain\n                keys for each population with a different value for the axon_rate of\n                the efferent axons of this population.\n        \"\"\"\n        if efferents:\n            ### activate all efferent projections\n            projection_list = projections(pre=self.stimulated_population)\n            for proj in projection_list:\n                ### skip excluded populations\n                if proj.post in self.excluded_populations_list:\n                    continue\n                ### activate axon transmission\n                proj.axon_transmission = 1\n                proj.p_axon_spike_trans = 1\n                ### set prob_axon_spike for spiking model\n                proj.pre.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n                ### set axon_rate_amp for rate-coded model\n                if proj.pre in axon_rate_amp.keys():\n                    ### axon_rate_amp is specified for this population\n                    proj.pre.axon_rate_amp = axon_rate_amp[proj.pre]\n                else:\n                    ### axon_rate_amp is not specified for this population, use default value\n                    proj.pre.axon_rate_amp = axon_rate_amp[\"default\"]\n\n        if afferents:\n            ### activate all afferent projections\n            projection_list = projections(post=self.stimulated_population)\n            for proj in projection_list:\n                ### skip excluded populations\n                if proj.pre in self.excluded_populations_list:\n                    continue\n                ### activate axon transmission\n                proj.axon_transmission = 1\n                proj.p_axon_spike_trans = 1\n                ### set prob_axon_spike for spiking model\n                proj.pre.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n                ### set axon_rate_amp for rate-coded model\n                if proj.pre in axon_rate_amp.keys():\n                    ### axon_rate_amp is specified for this population\n                    proj.pre.axon_rate_amp = axon_rate_amp[proj.pre]\n                else:\n                    ### axon_rate_amp is not specified for this population, use default value\n                    proj.pre.axon_rate_amp = axon_rate_amp[\"default\"]\n\n        if passing_fibres:\n            ### activate all passing projections\n            for proj_idx, proj in enumerate(self.passing_fibres_list):\n                proj.axon_transmission = 1\n                proj.p_axon_spike_trans = passing_fibres_strength[proj_idx]\n                ### set prob_axon_spike for spiking model\n                proj.pre.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n                ### set axon_rate_amp for rate-coded model\n                if proj.pre in axon_rate_amp.keys():\n                    ### axon_rate_amp is specified for this population\n                    proj.pre.axon_rate_amp = axon_rate_amp[proj.pre]\n                else:\n                    ### axon_rate_amp is not specified for this population, use default value\n                    proj.pre.axon_rate_amp = axon_rate_amp[\"default\"]\n\n    def _set_antidromic(\n        self,\n        efferents: bool,\n        afferents: bool,\n        passing_fibres: bool,\n        passing_fibres_strength: list[float],\n        sum_branches: bool,\n        axon_spikes_per_pulse: float,\n    ):\n        \"\"\"\n        Set antidromic axon spikes forwarding.\n\n        Args:\n            efferents (bool):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            afferents (bool):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            passing_fibres (bool):\n                If True, DBS affects the passing fibres of the stimulated population\n                (orthodromic and/or antidromic have to be True too and there have to\n                be passing fibres in the passing_fibres_list)\n            passing_fibres_strength (list[float]):\n                List of float values between 0 and 1 defining how strong the passing\n                fibres are activated by DBS (0: not activated, 1: fully activated\n                like projections in DBS stimulated region)\n            sum_branches (bool):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n            axon_spikes_per_pulse (float):\n                Number of average axon spikes per DBS pulse\n        \"\"\"\n\n        if efferents:\n            ### activate all efferent projections, i.e. antodromic activation of stimulated population\n            pop = self.stimulated_population\n            pop.antidromic = 1\n            pop.antidromic_prob = 1\n            pop.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                axon_spikes_per_pulse\n            )\n        if afferents:\n            ### activate all afferent projections, i.e. all presynaptic populations of stimulated population\n            ### get presynaptic projections\n            projection_list = projections(post=self.stimulated_population)\n            ### get presynaptic populations from projections\n            presyn_pop_list = []\n            presyn_pop_name_list = []\n            for proj in projection_list:\n                ### skip excluded populations\n                if proj.pre in self.excluded_populations_list:\n                    continue\n                ### check if presynaptic population is already in list\n                if proj.pre.name not in presyn_pop_name_list:\n                    presyn_pop_name_list.append(proj.pre.name)\n                    presyn_pop_list.append(proj.pre)\n            ### set antidromic for all presynaptic populations\n            for pop in presyn_pop_list:\n                pop.antidromic = 1\n                pop.antidromic_prob = np.mean(self.stimulated_population.dbs_on)\n                pop.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n        if passing_fibres:\n            ### get presynaptic populations from passing fibres projections\n            presyn_pop_list = []\n            presyn_pop_name_list = []\n            for proj in self.passing_fibres_list:\n                ### check if presynaptic population is already in list\n                if proj.pre.name not in presyn_pop_name_list:\n                    presyn_pop_name_list.append(proj.pre.name)\n                    presyn_pop_list.append(proj.pre)\n            ### get antidomic_prob for each presynatic population with the passing_fibres_strength\n            antidromic_prob_list = [0] * len(presyn_pop_list)\n            for pop_idx, pop in enumerate(presyn_pop_list):\n                ### get all passing fibres and their strength of a presynaptic pop\n                passing_fibres_strength_of_pop_list = []\n                for proj_idx, proj in enumerate(self.passing_fibres_list):\n                    if proj.pre.name == pop.name:\n                        passing_fibres_strength_of_pop_list.append(\n                            passing_fibres_strength[proj_idx]\n                        )\n                ### check if the probs of the single axon branches should be summed up\n                ### if for example a presynaptic pop contributes to two passing fibres, the axons of the presynaptic pop split up into two branches\n                ### thus, if these two branches are both stimulated, they both forward APs antidromic\n                ### thus, sum up the antidromic_prob of the single branches to obtain the antidromic spikes which affect the presynaptic pop\n                ### if sum_branches is False, then this would represent that the stimulation at the axon is before it splits up into multiple branches and there should not be different passing_fibres_strengths for the same presynaptic pop\n                if sum_branches:\n                    antidromic_prob_list[pop_idx] = sum(\n                        passing_fibres_strength_of_pop_list\n                    )\n                else:\n                    if len(set(passing_fibres_strength_of_pop_list)) != 1:\n                        ### list contains different values\n                        raise ValueError(\n                            \"Different passing fibres strengths for the same presynaptic population detected. This is not possible if sum_branches is False.\"\n                        )\n                    ### all values are the same, thus take the first one\n                    antidromic_prob_list[pop_idx] = passing_fibres_strength_of_pop_list[\n                        0\n                    ]\n\n                ### TODO\n                ### if summing axon branches leads to a prob > 1, then\n                ### the prob should be set to 1\n                ### the axon spike generation in this pop should be increased\n                ### and all axon spike transmissions from this pop should be decreased by the same factor\n                ### this is not implemented yet... maybe in the future\n                if antidromic_prob_list[pop_idx] > 1:\n                    raise ValueError(\n                        \"Summing the passing fibres strengths of a presynaptic population leads to a antidromic spike probability > 1. This is not possible yet.\"\n                    )\n\n            ### set antidromic for all presynaptic populations\n            for pop_idx, pop in enumerate(presyn_pop_list):\n                pop.antidromic = 1\n                pop.antidromic_prob = antidromic_prob_list[pop_idx]\n                pop.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n\n    @check_types()\n    def on(\n        self,\n        population_proportion: float | None = None,\n        dbs_depolarization: float | None = None,\n        orthodromic: bool | None = None,\n        antidromic: bool | None = None,\n        efferents: bool | None = None,\n        afferents: bool | None = None,\n        passing_fibres: bool | None = None,\n        passing_fibres_strength: float | list[float] | None = None,\n        sum_branches: bool | None = None,\n        axon_spikes_per_pulse: float | None = None,\n        axon_rate_amp: float | dict[Population | str, float] | None = None,\n        seed: int | None = None,\n    ):\n        \"\"\"\n        Activate DBS.\n\n        Args:\n            population_proportion (float, optional):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly. Default: None, i.e., use value from\n                initialization\n            dbs_depolarization (float, optional):\n                Depolarization effect of the DBS pulse to local soma. Default: None,\n                i.e., use value from initialization\n            orthodromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded orthodromically.\n                Default: None, i.e., use value from initialization\n            antidromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded antidromically,\n                only available in spiking networks. Default: None, i.e., use value from\n                initialization\n            efferents (bool, optional):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            afferents (bool, optional):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            passing_fibres (bool, optional):\n                If True, DBS affects the passing fibres of the stimulated region defined\n                in passing_fibres_list (orthodromic and/or antidromic have to be True\n                too). Default: None, i.e., use value from initialization\n            passing_fibres_strength (float | list[float], optional):\n                Single value or list of float values between 0 and 1 defining how strong\n                the passing fibres are activated by DBS (0: not activated, 1: fully\n                activated like the projections in the DBS stimulated region).\n                Default: None, i.e., use value from initialization\n            sum_branches (bool, optional):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n                Default: None, i.e., use value from initialization\n            axon_spikes_per_pulse (float, optional):\n                Number of average axon spikes per DBS pulse. Default: None, i.e., use\n                value from initialization\n            axon_rate_amp (float | dict[Population | str, float], optional):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n                axons caused by DBS. You can specify this for each population\n                individually by using a dictionary (keys = Population instances)\n                axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n                of 1.5 during DBS (all other affected projections forward the default\n                value). You can specify the default value by using the key \"default\",\n                e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations\n                except pop forward a rate of 1.0 during DBS. Default: None, i.e., use\n                value from initialization\n            seed (int, optional):\n                Seed for the random number generator. Default: None, i.e., use value\n                from initialization\n        \"\"\"\n\n        ### set DBS on for all populations\n        ### also sets the proportion of affected neurons, call this before set_depolarization and set_axon_spikes!\n        self._set_dbs_on(population_proportion, seed)\n\n        ### set depolarization of population\n        self._set_depolarization(dbs_depolarization)\n\n        ### set axon spikes forwarding\n        self._set_axon_spikes(\n            orthodromic,\n            antidromic,\n            efferents,\n            afferents,\n            passing_fibres,\n            passing_fibres_strength,\n            sum_branches,\n            axon_spikes_per_pulse,\n            axon_rate_amp,\n        )\n\n    def _set_dbs_on(self, population_proportion: float | None, seed: int | None):\n        \"\"\"\n        Set DBS on for all populations, for the stimulated population only the specified\n        proportion is affected by DBS.\n\n        Args:\n            population_proportion (float, optional):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly. Default: None, i.e., use value from\n                initialization\n            seed (int, optional):\n                Seed for the random number generator. Default: None, i.e., use value\n                from initialization\n        \"\"\"\n        ### set parameters for the creation of the DBS on array\n        ### either use given population_proportion or use default value\n        if isinstance(population_proportion, type(None)):\n            population_proportion = self.population_proportion\n        ### either use given seed or use default value\n        if isinstance(seed, type(None)):\n            seed = self.seed\n\n        ### if seed and population_propotion are the same as in the initialization, use the same dbs_on_array\n        if seed == self.seed and population_proportion == self.population_proportion:\n            ### use the same dbs_on_array as in the initialization\n            dbs_on_array = self.dbs_on_array\n        else:\n            ### create new dbs_on_array\n            dbs_on_array = self._create_dbs_on_array(population_proportion, seed)\n\n        ### set DBS on for all populations\n        for pop in populations():\n            ### of the stimulated population only the specified proportion is affected by DBS\n            if pop == self.stimulated_population:\n                pop.dbs_on = dbs_on_array\n            else:\n                pop.dbs_on = 1\n\n    def off(self):\n        \"\"\"\n        Deactivate DBS.\n        \"\"\"\n        ### set DBS off for all populations\n        for pop in populations():\n            pop.dbs_on = 0\n            pop.prob_axon_spike = 0\n            pop.axon_rate_amp = 0\n\n        ### deactivate DBS axon transmission\n        self._deactivate_axon_DBS()\n\n    def update_pointers(self, pointer_list):\n        \"\"\"\n        Update pointers to populations and projections after recreating the model.\n\n        Args:\n            pointer_list (list):\n                List of pointers to populations and projections\n\n        Returns:\n            pointer_list_new (list):\n                List of pointers to populations and projections of the new model\n        \"\"\"\n        ### update pointers\n        pointer_list_new: list[Population | Projection] = []\n        for pointer in pointer_list:\n            compartment_name = pointer.name\n            if isinstance(pointer, Population):\n                pointer_list_new.append(get_population(compartment_name))\n            elif isinstance(pointer, Projection):\n                pointer_list_new.append(get_projection(compartment_name))\n            else:\n                raise TypeError(\n                    f\"Pointer {pointer} is neither a Population nor a Projection\"\n                )\n        return pointer_list_new\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.__init__","title":"__init__(stimulated_population, population_proportion=1.0, excluded_populations_list=[], dbs_depolarization=0.0, orthodromic=False, antidromic=False, efferents=False, afferents=False, passing_fibres=False, passing_fibres_list=[], passing_fibres_strength=1.0, sum_branches=True, dbs_pulse_frequency_Hz=130.0, dbs_pulse_width_us=300.0, axon_spikes_per_pulse=1.0, axon_rate_amp=1.0, seed=None, auto_implement=False, model=None)","text":"

Initialize DBS stimulator.

Warning

Do this before compiling the model!

Parameters:

Name Type Description Default stimulated_population Population

Population which is stimulated by DBS

required population_proportion float

Proportion of the stimulated population which is affected by DBS, neurons are distributed randomly. Default: 1.0.

1.0 excluded_populations_list list

List of populations which are excluded from DBS effects, they are not affected and their axons do not generate axon spikes. Default: [].

[] dbs_depolarization float

Depolarization effect of the DBS pulse to local soma. Default: 0.0.

0.0 orthodromic bool

If True, DBS causes axonal spikes which are forwarded orthodromically. Default: False.

False antidromic bool

If True, DBS causes axonal spikes which are forwarded antidromically, only available in spiking networks. Default: False.

False efferents bool

If True, DBS affects the efferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: False.

False afferents bool

If True, DBS affects the afferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: False.

False passing_fibres bool

If True, DBS affects the passing fibres of the stimulated region defined in passing_fibres_list (orthodromic and/or antidromic have to be True too). Default: False.

False passing_fibres_list list of Projections

List of projections which pass the DBS stimulated region and therefore are activated by DBS. Default: [], also set passing_fibres True!

[] passing_fibres_strength float or list of float

Single value or list of float values between 0 and 1 defining how strong the passing fibres are activated by DBS (0: not activated, 1: fully activated like the projections in the DBS stimulated region). Default: 1.

1.0 sum_branches bool

If True, the antidromic_prob of a presynaptic population (defining how many axon spikes affect the pop antidromically) of passing fibres is the sum of the passing_fibres_strengths of the single axon branches. Default: True.

True dbs_pulse_frequency_Hz float

Frequency of the DBS pulse. Default: 130 Hz.

130.0 dbs_pulse_width_us float

Width of the DBS pulse. Default: 300 us.

300.0 axon_spikes_per_pulse float

Number of average axon spikes per DBS pulse. Default: 1.

1.0 axon_rate_amp float or dict of float

Similar to prob_axon_spike in spiking model. Which rate is forwarded on axons caused by DBS. You can specify this for each population individually by using a dictionary (keys = Population instances) axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate of 1.5 during DBS (all other affected projections forward the default value) You can specify the default value by using the key \"default\", e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except pop forward a rate of 1.0 during DBS. Default: 1.0.

1.0 seed int

Seed for the random distribution of affected neurons based on population_proportion. Default: None.

None auto_implement bool

If True, automatically implement DBS mechanisms to the model. Only supported for Izhikevich spiking models and rate-coded models. Default: False. TODO test what happens with mixed models

False model generate_model

CompNeuroPy model which is used to automatically implement DBS mechanisms, should not be compiled!. Default: None, i.e., use all populations and projections of the current magic model

None Source code in src/CompNeuroPy/dbs.py
@check_types()\ndef __init__(\n    self,\n    stimulated_population: Population,\n    population_proportion: float = 1.0,\n    excluded_populations_list: list[Population] = [],\n    dbs_depolarization: float = 0.0,\n    orthodromic: bool = False,\n    antidromic: bool = False,\n    efferents: bool = False,\n    afferents: bool = False,\n    passing_fibres: bool = False,\n    passing_fibres_list: list[Projection] = [],\n    passing_fibres_strength: float | list[float] = 1.0,\n    sum_branches: bool = True,\n    dbs_pulse_frequency_Hz: float = 130.0,\n    dbs_pulse_width_us: float = 300.0,\n    axon_spikes_per_pulse: float = 1.0,\n    axon_rate_amp: float | dict[Population | str, float] = 1.0,\n    seed: int | None = None,\n    auto_implement: bool = False,\n    model: generate_model | None = None,\n) -> None:\n    \"\"\"\n    Initialize DBS stimulator.\n\n    !!! warning\n        Do this before compiling the model!\n\n    Args:\n        stimulated_population (Population):\n            Population which is stimulated by DBS\n        population_proportion (float, optional):\n            Proportion of the stimulated population which is affected by DBS,\n            neurons are distributed randomly. Default: 1.0.\n        excluded_populations_list (list, optional):\n            List of populations which are excluded from DBS effects, they are not\n            affected and their axons do not generate axon spikes. Default: [].\n        dbs_depolarization (float, optional):\n            Depolarization effect of the DBS pulse to local soma. Default: 0.0.\n        orthodromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded orthodromically.\n            Default: False.\n        antidromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded antidromically,\n            only available in spiking networks. Default: False.\n        efferents (bool, optional):\n            If True, DBS affects the efferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: False.\n        afferents (bool, optional):\n            If True, DBS affects the afferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: False.\n        passing_fibres (bool, optional):\n            If True, DBS affects the passing fibres of the stimulated region defined\n            in passing_fibres_list (orthodromic and/or antidromic have to be True\n            too). Default: False.\n        passing_fibres_list (list of Projections, optional):\n            List of projections which pass the DBS stimulated region and therefore\n            are activated by DBS. Default: [], also set passing_fibres True!\n        passing_fibres_strength (float or list of float, optional):\n            Single value or list of float values between 0 and 1 defining how strong\n            the passing fibres are activated by DBS (0: not activated, 1: fully\n            activated like the projections in the DBS stimulated region).\n            Default: 1.\n        sum_branches (bool, optional):\n            If True, the antidromic_prob of a presynaptic population (defining how\n            many axon spikes affect the pop antidromically) of passing fibres is\n            the sum of the passing_fibres_strengths of the single axon branches.\n            Default: True.\n        dbs_pulse_frequency_Hz (float, optional):\n            Frequency of the DBS pulse. Default: 130 Hz.\n        dbs_pulse_width_us (float, optional):\n            Width of the DBS pulse. Default: 300 us.\n        axon_spikes_per_pulse (float, optional):\n            Number of average axon spikes per DBS pulse. Default: 1.\n        axon_rate_amp (float or dict of float, optional):\n            Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n            axons caused by DBS. You can specify this for each population\n            individually by using a dictionary (keys = Population instances)\n            axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n            of 1.5 during DBS (all other affected projections forward the default\n            value)\n            You can specify the default value by using the key \"default\", e.g.\n            {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except\n            pop forward a rate of 1.0 during DBS. Default: 1.0.\n        seed (int, optional):\n            Seed for the random distribution of affected neurons based on\n            population_proportion. Default: None.\n        auto_implement (bool, optional):\n            If True, automatically implement DBS mechanisms to the model. Only\n            supported for Izhikevich spiking models and rate-coded models.\n            Default: False.\n            TODO test what happens with mixed models\n        model (generate_model, optional):\n            CompNeuroPy model which is used to automatically implement DBS\n            mechanisms, should not be compiled!. Default: None, i.e., use all\n            populations and projections of the current magic model\n    \"\"\"\n\n    if auto_implement:\n        ### recreate model with DBS mechanisms\n        ### give all variables containing Populations and Projections\n        ### and also recreate them during recreating the model\n        ### variables are:\n        ### - stimulated_population\n        ### - excluded_populations_list\n        ### - passing_fibres_list\n        ### - axon_rate_amp\n        if not isinstance(model, type(None)):\n            ### CompNeuroPy model given\n            ### recreate model with DBS mechanisms\n            create_dbs_model_obj = _CreateDBSmodelcnp(\n                model,\n                stimulated_population,\n                excluded_populations_list,\n                passing_fibres_list,\n                axon_rate_amp,\n            )\n            ### get the new CompNeuroPy model\n            model = create_dbs_model_obj.model\n        else:\n            ### no CompNeuroPy model given --> use all populations and projections of the current magic model\n            ### recreate model with DBS mechanisms\n            create_dbs_model_obj = _CreateDBSmodel(\n                stimulated_population,\n                excluded_populations_list,\n                passing_fibres_list,\n                axon_rate_amp,\n            )\n        ### get the new variables containing Populations and Projections\n        stimulated_population = create_dbs_model_obj.stimulated_population\n        excluded_populations_list = create_dbs_model_obj.excluded_populations_list\n        passing_fibres_list = create_dbs_model_obj.passing_fibres_list\n        axon_rate_amp = create_dbs_model_obj.axon_rate_amp\n\n    ### set parameters\n    self.stimulated_population = stimulated_population\n    self.population_proportion = population_proportion\n    self.excluded_populations_list = excluded_populations_list\n    self.dbs_depolarization = dbs_depolarization\n    self.orthodromic = orthodromic\n    self.antidromic = antidromic\n    self.efferents = efferents\n    self.afferents = afferents\n    self.passing_fibres = passing_fibres\n    self.passing_fibres_list = passing_fibres_list\n    self.passing_fibres_strength = passing_fibres_strength\n    self.sum_branches = sum_branches\n    self.dbs_pulse_width_us = dbs_pulse_width_us\n    self.axon_spikes_per_pulse = axon_spikes_per_pulse\n    self.axon_rate_amp = axon_rate_amp\n    self.seed = seed\n    self.model = model\n\n    ### ANNarchy constants for DBS\n    self._set_constants(dbs_pulse_frequency_Hz)\n\n    ### randomly select affected neurons i.e. create dbs_on_array\n    self.dbs_on_array = self._create_dbs_on_array(population_proportion, seed)\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.on","title":"on(population_proportion=None, dbs_depolarization=None, orthodromic=None, antidromic=None, efferents=None, afferents=None, passing_fibres=None, passing_fibres_strength=None, sum_branches=None, axon_spikes_per_pulse=None, axon_rate_amp=None, seed=None)","text":"

Activate DBS.

Parameters:

Name Type Description Default population_proportion float

Proportion of the stimulated population which is affected by DBS, neurons are distributed randomly. Default: None, i.e., use value from initialization

None dbs_depolarization float

Depolarization effect of the DBS pulse to local soma. Default: None, i.e., use value from initialization

None orthodromic bool

If True, DBS causes axonal spikes which are forwarded orthodromically. Default: None, i.e., use value from initialization

None antidromic bool

If True, DBS causes axonal spikes which are forwarded antidromically, only available in spiking networks. Default: None, i.e., use value from initialization

None efferents bool

If True, DBS affects the efferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: None, i.e., use value from initialization

None afferents bool

If True, DBS affects the afferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: None, i.e., use value from initialization

None passing_fibres bool

If True, DBS affects the passing fibres of the stimulated region defined in passing_fibres_list (orthodromic and/or antidromic have to be True too). Default: None, i.e., use value from initialization

None passing_fibres_strength float | list[float]

Single value or list of float values between 0 and 1 defining how strong the passing fibres are activated by DBS (0: not activated, 1: fully activated like the projections in the DBS stimulated region). Default: None, i.e., use value from initialization

None sum_branches bool

If True, the antidromic_prob of a presynaptic population (defining how many axon spikes affect the pop antidromically) of passing fibres is the sum of the passing_fibres_strengths of the single axon branches. Default: None, i.e., use value from initialization

None axon_spikes_per_pulse float

Number of average axon spikes per DBS pulse. Default: None, i.e., use value from initialization

None axon_rate_amp float | dict[Population | str, float]

Similar to prob_axon_spike in spiking model. Which rate is forwarded on axons caused by DBS. You can specify this for each population individually by using a dictionary (keys = Population instances) axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate of 1.5 during DBS (all other affected projections forward the default value). You can specify the default value by using the key \"default\", e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except pop forward a rate of 1.0 during DBS. Default: None, i.e., use value from initialization

None seed int

Seed for the random number generator. Default: None, i.e., use value from initialization

None Source code in src/CompNeuroPy/dbs.py
@check_types()\ndef on(\n    self,\n    population_proportion: float | None = None,\n    dbs_depolarization: float | None = None,\n    orthodromic: bool | None = None,\n    antidromic: bool | None = None,\n    efferents: bool | None = None,\n    afferents: bool | None = None,\n    passing_fibres: bool | None = None,\n    passing_fibres_strength: float | list[float] | None = None,\n    sum_branches: bool | None = None,\n    axon_spikes_per_pulse: float | None = None,\n    axon_rate_amp: float | dict[Population | str, float] | None = None,\n    seed: int | None = None,\n):\n    \"\"\"\n    Activate DBS.\n\n    Args:\n        population_proportion (float, optional):\n            Proportion of the stimulated population which is affected by DBS,\n            neurons are distributed randomly. Default: None, i.e., use value from\n            initialization\n        dbs_depolarization (float, optional):\n            Depolarization effect of the DBS pulse to local soma. Default: None,\n            i.e., use value from initialization\n        orthodromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded orthodromically.\n            Default: None, i.e., use value from initialization\n        antidromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded antidromically,\n            only available in spiking networks. Default: None, i.e., use value from\n            initialization\n        efferents (bool, optional):\n            If True, DBS affects the efferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: None,\n            i.e., use value from initialization\n        afferents (bool, optional):\n            If True, DBS affects the afferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: None,\n            i.e., use value from initialization\n        passing_fibres (bool, optional):\n            If True, DBS affects the passing fibres of the stimulated region defined\n            in passing_fibres_list (orthodromic and/or antidromic have to be True\n            too). Default: None, i.e., use value from initialization\n        passing_fibres_strength (float | list[float], optional):\n            Single value or list of float values between 0 and 1 defining how strong\n            the passing fibres are activated by DBS (0: not activated, 1: fully\n            activated like the projections in the DBS stimulated region).\n            Default: None, i.e., use value from initialization\n        sum_branches (bool, optional):\n            If True, the antidromic_prob of a presynaptic population (defining how\n            many axon spikes affect the pop antidromically) of passing fibres is\n            the sum of the passing_fibres_strengths of the single axon branches.\n            Default: None, i.e., use value from initialization\n        axon_spikes_per_pulse (float, optional):\n            Number of average axon spikes per DBS pulse. Default: None, i.e., use\n            value from initialization\n        axon_rate_amp (float | dict[Population | str, float], optional):\n            Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n            axons caused by DBS. You can specify this for each population\n            individually by using a dictionary (keys = Population instances)\n            axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n            of 1.5 during DBS (all other affected projections forward the default\n            value). You can specify the default value by using the key \"default\",\n            e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations\n            except pop forward a rate of 1.0 during DBS. Default: None, i.e., use\n            value from initialization\n        seed (int, optional):\n            Seed for the random number generator. Default: None, i.e., use value\n            from initialization\n    \"\"\"\n\n    ### set DBS on for all populations\n    ### also sets the proportion of affected neurons, call this before set_depolarization and set_axon_spikes!\n    self._set_dbs_on(population_proportion, seed)\n\n    ### set depolarization of population\n    self._set_depolarization(dbs_depolarization)\n\n    ### set axon spikes forwarding\n    self._set_axon_spikes(\n        orthodromic,\n        antidromic,\n        efferents,\n        afferents,\n        passing_fibres,\n        passing_fibres_strength,\n        sum_branches,\n        axon_spikes_per_pulse,\n        axon_rate_amp,\n    )\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.off","title":"off()","text":"

Deactivate DBS.

Source code in src/CompNeuroPy/dbs.py
def off(self):\n    \"\"\"\n    Deactivate DBS.\n    \"\"\"\n    ### set DBS off for all populations\n    for pop in populations():\n        pop.dbs_on = 0\n        pop.prob_axon_spike = 0\n        pop.axon_rate_amp = 0\n\n    ### deactivate DBS axon transmission\n    self._deactivate_axon_DBS()\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.update_pointers","title":"update_pointers(pointer_list)","text":"

Update pointers to populations and projections after recreating the model.

Parameters:

Name Type Description Default pointer_list list

List of pointers to populations and projections

required

Returns:

Name Type Description pointer_list_new list

List of pointers to populations and projections of the new model

Source code in src/CompNeuroPy/dbs.py
def update_pointers(self, pointer_list):\n    \"\"\"\n    Update pointers to populations and projections after recreating the model.\n\n    Args:\n        pointer_list (list):\n            List of pointers to populations and projections\n\n    Returns:\n        pointer_list_new (list):\n            List of pointers to populations and projections of the new model\n    \"\"\"\n    ### update pointers\n    pointer_list_new: list[Population | Projection] = []\n    for pointer in pointer_list:\n        compartment_name = pointer.name\n        if isinstance(pointer, Population):\n            pointer_list_new.append(get_population(compartment_name))\n        elif isinstance(pointer, Projection):\n            pointer_list_new.append(get_projection(compartment_name))\n        else:\n            raise TypeError(\n                f\"Pointer {pointer} is neither a Population nor a Projection\"\n            )\n    return pointer_list_new\n
"},{"location":"main/define_experiment/","title":"Define Experiments","text":""},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp","title":"CompNeuroPy.experiment.CompNeuroExp","text":"

Experiment combining simulations and recordings.

Use this class as a parent class for your experiment. You have to additionally implement a run function which runs the simulations and controlls the recordings. The run function should return the results of the experiment by calling the results function of the CompNeuroExp class.

Attributes:

Name Type Description monitors CompNeuroMonitors

CompNeuroMonitors object for recordings

data dict

dict for storing optional data

Examples:

from CompNeuroPy import CompNeuroExp\nfrom ANNarchy import simulate\n\nclass MyExperiment(CompNeuroExp):\n    def run(self):\n        # run simulations and control recordings\n        self.monitors.start()\n        simulate(1000)\n        self.reset()\n        simulate(1000)\n        # store optional data\n        self.data[\"duration\"] = 2000\n        # return results\n        return self.results()\n
Source code in src/CompNeuroPy/experiment.py
class CompNeuroExp:\n    \"\"\"\n    Experiment combining simulations and recordings.\n\n    Use this class as a parent class for your experiment. You have to additionally\n    implement a run function which runs the simulations and controlls the recordings.\n    The run function should return the results of the experiment by calling the results\n    function of the CompNeuroExp class.\n\n    Attributes:\n        monitors (CompNeuroMonitors):\n            CompNeuroMonitors object for recordings\n        data (dict):\n            dict for storing optional data\n\n    Examples:\n        ```python\n        from CompNeuroPy import CompNeuroExp\n        from ANNarchy import simulate\n\n        class MyExperiment(CompNeuroExp):\n            def run(self):\n                # run simulations and control recordings\n                self.monitors.start()\n                simulate(1000)\n                self.reset()\n                simulate(1000)\n                # store optional data\n                self.data[\"duration\"] = 2000\n                # return results\n                return self.results()\n        ```\n    \"\"\"\n\n    def __init__(\n        self,\n        monitors: CompNeuroMonitors | None = None,\n    ):\n        \"\"\"\n        Initialize the experiment.\n\n        Args:\n            monitors (CompNeuroMonitors):\n                CompNeuroMonitors object for recordings\n        \"\"\"\n        self.recordings = {}  # save dict for monitor recordings\n        self.monitors = monitors\n        self.data = {}  # dict for optional data\n\n    def reset(\n        self,\n        populations=True,\n        projections=False,\n        synapses=False,\n        model=True,\n        parameters=True,\n    ):\n        \"\"\"\n        Reset the ANNarchy model and monitors and the CompNeuroMonitors used for the\n        experiment.\n\n        !!! warning\n            If you want the network to have the same state at the beginning of each\n            experiment run, you should call this function at the beginning of the run\n            function of the CompNeuroExp class! If you only want to have the same time\n            for the network at the beginning of each experiment run, set populations,\n            projections, and synapses to False.\n\n        Args:\n            populations (bool, optional):\n                reset populations. Defaults to True.\n            projections (bool, optional):\n                reset projections. Defaults to False.\n            synapses (bool, optional):\n                reset synapses. Defaults to False.\n            model (bool, optional):\n                If False, do ignore the arguments populations, projections, and\n                synapses (the network state doesn't change) and only reset the\n                CompNeuroMonitors Default: True.\n            parameters (bool, optional):\n                If False, do not reset the parameters of the model. Default: True.\n        \"\"\"\n        reset_kwargs = {}\n        reset_kwargs[\"populations\"] = populations\n        reset_kwargs[\"projections\"] = projections\n        reset_kwargs[\"synapses\"] = synapses\n        reset_kwargs[\"monitors\"] = True\n\n        ### reset CompNeuroMonitors and ANNarchy model\n        if self.monitors is not None:\n            self.monitors.reset(model=model, parameters=parameters, **reset_kwargs)\n        elif model is True:\n            if parameters is False:\n                ### if parameters=False, get parameters before reset and set them after\n                ### reset\n                parameters = mf._get_all_parameters()\n            reset(**reset_kwargs)\n            if parameters is False:\n                ### if parameters=False, set parameters after reset\n                mf._set_all_parameters(parameters)\n\n    def results(self):\n        \"\"\"\n        !!! warning\n            Call this function at the end of the run function of the CompNeuroExp class!\n\n        !!! warning\n            Calling this function resets the CompNeuroMonitors. For example, if you\n            simulate two recording chunks in the run function and you run the experiment\n            twice, you will get two recording chunks for each experiment run (not two\n            for the first and four for the second run). But ANNarchy is not resetted\n            automatically! So the network time and state (activity etc.) at the\n            beginning of the second run is the same as at the end of the first run. To\n            prevent this use the reset function of the CompNeuroExp class.\n\n        Returns:\n            results_obj (CompNeuroExp._ResultsCl):\n                Object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        obj = self._ResultsCl()\n        if self.monitors is not None:\n            (\n                obj.recordings,\n                obj.recording_times,\n            ) = self.monitors.get_recordings_and_clear()\n            obj.mon_dict = self.monitors.mon_dict\n        else:\n            obj.recordings = []\n            obj.recording_times = None\n            obj.mon_dict = {}\n        obj.data = self.data\n\n        return obj\n\n    class _ResultsCl:\n        \"\"\"\n        Class for storing the results of the experiment.\n\n        Attributes:\n            recordings (list):\n                list of recordings\n            recording_times (recording_times_cl):\n                recording times object\n            mon_dict (dict):\n                dict of recorded variables of the monitors\n            data (dict):\n                dict with optional data stored during the experiment\n        \"\"\"\n\n        def __init__(self) -> None:\n            self.recordings: list\n            self.recording_times: RecordingTimes\n            self.mon_dict: dict\n            self.data: dict\n\n    def run(self) -> _ResultsCl:\n        \"\"\"\n        !!! warning\n            This function has to be implemented by the user!\n        \"\"\"\n        raise NotImplementedError(\n            \"\"\"\n                You have to implement a run function which runs the simulations and\n                controlls the recordings. The run function should return the results of\n                the experiment by calling the results function of the CompNeuroExp class.\n            \"\"\"\n        )\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.__init__","title":"__init__(monitors=None)","text":"

Initialize the experiment.

Parameters:

Name Type Description Default monitors CompNeuroMonitors

CompNeuroMonitors object for recordings

None Source code in src/CompNeuroPy/experiment.py
def __init__(\n    self,\n    monitors: CompNeuroMonitors | None = None,\n):\n    \"\"\"\n    Initialize the experiment.\n\n    Args:\n        monitors (CompNeuroMonitors):\n            CompNeuroMonitors object for recordings\n    \"\"\"\n    self.recordings = {}  # save dict for monitor recordings\n    self.monitors = monitors\n    self.data = {}  # dict for optional data\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.reset","title":"reset(populations=True, projections=False, synapses=False, model=True, parameters=True)","text":"

Reset the ANNarchy model and monitors and the CompNeuroMonitors used for the experiment.

Warning

If you want the network to have the same state at the beginning of each experiment run, you should call this function at the beginning of the run function of the CompNeuroExp class! If you only want to have the same time for the network at the beginning of each experiment run, set populations, projections, and synapses to False.

Parameters:

Name Type Description Default populations bool

reset populations. Defaults to True.

True projections bool

reset projections. Defaults to False.

False synapses bool

reset synapses. Defaults to False.

False model bool

If False, do ignore the arguments populations, projections, and synapses (the network state doesn't change) and only reset the CompNeuroMonitors Default: True.

True parameters bool

If False, do not reset the parameters of the model. Default: True.

True Source code in src/CompNeuroPy/experiment.py
def reset(\n    self,\n    populations=True,\n    projections=False,\n    synapses=False,\n    model=True,\n    parameters=True,\n):\n    \"\"\"\n    Reset the ANNarchy model and monitors and the CompNeuroMonitors used for the\n    experiment.\n\n    !!! warning\n        If you want the network to have the same state at the beginning of each\n        experiment run, you should call this function at the beginning of the run\n        function of the CompNeuroExp class! If you only want to have the same time\n        for the network at the beginning of each experiment run, set populations,\n        projections, and synapses to False.\n\n    Args:\n        populations (bool, optional):\n            reset populations. Defaults to True.\n        projections (bool, optional):\n            reset projections. Defaults to False.\n        synapses (bool, optional):\n            reset synapses. Defaults to False.\n        model (bool, optional):\n            If False, do ignore the arguments populations, projections, and\n            synapses (the network state doesn't change) and only reset the\n            CompNeuroMonitors Default: True.\n        parameters (bool, optional):\n            If False, do not reset the parameters of the model. Default: True.\n    \"\"\"\n    reset_kwargs = {}\n    reset_kwargs[\"populations\"] = populations\n    reset_kwargs[\"projections\"] = projections\n    reset_kwargs[\"synapses\"] = synapses\n    reset_kwargs[\"monitors\"] = True\n\n    ### reset CompNeuroMonitors and ANNarchy model\n    if self.monitors is not None:\n        self.monitors.reset(model=model, parameters=parameters, **reset_kwargs)\n    elif model is True:\n        if parameters is False:\n            ### if parameters=False, get parameters before reset and set them after\n            ### reset\n            parameters = mf._get_all_parameters()\n        reset(**reset_kwargs)\n        if parameters is False:\n            ### if parameters=False, set parameters after reset\n            mf._set_all_parameters(parameters)\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.results","title":"results()","text":"

Warning

Call this function at the end of the run function of the CompNeuroExp class!

Warning

Calling this function resets the CompNeuroMonitors. For example, if you simulate two recording chunks in the run function and you run the experiment twice, you will get two recording chunks for each experiment run (not two for the first and four for the second run). But ANNarchy is not resetted automatically! So the network time and state (activity etc.) at the beginning of the second run is the same as at the end of the first run. To prevent this use the reset function of the CompNeuroExp class.

Returns:

Name Type Description results_obj _ResultsCl

Object with attributes: recordings (list): list of recordings recording_times (recording_times_cl): recording times object mon_dict (dict): dict of recorded variables of the monitors data (dict): dict with optional data stored during the experiment

Source code in src/CompNeuroPy/experiment.py
def results(self):\n    \"\"\"\n    !!! warning\n        Call this function at the end of the run function of the CompNeuroExp class!\n\n    !!! warning\n        Calling this function resets the CompNeuroMonitors. For example, if you\n        simulate two recording chunks in the run function and you run the experiment\n        twice, you will get two recording chunks for each experiment run (not two\n        for the first and four for the second run). But ANNarchy is not resetted\n        automatically! So the network time and state (activity etc.) at the\n        beginning of the second run is the same as at the end of the first run. To\n        prevent this use the reset function of the CompNeuroExp class.\n\n    Returns:\n        results_obj (CompNeuroExp._ResultsCl):\n            Object with attributes:\n                recordings (list):\n                    list of recordings\n                recording_times (recording_times_cl):\n                    recording times object\n                mon_dict (dict):\n                    dict of recorded variables of the monitors\n                data (dict):\n                    dict with optional data stored during the experiment\n    \"\"\"\n    obj = self._ResultsCl()\n    if self.monitors is not None:\n        (\n            obj.recordings,\n            obj.recording_times,\n        ) = self.monitors.get_recordings_and_clear()\n        obj.mon_dict = self.monitors.mon_dict\n    else:\n        obj.recordings = []\n        obj.recording_times = None\n        obj.mon_dict = {}\n    obj.data = self.data\n\n    return obj\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.run","title":"run()","text":"

Warning

This function has to be implemented by the user!

Source code in src/CompNeuroPy/experiment.py
def run(self) -> _ResultsCl:\n    \"\"\"\n    !!! warning\n        This function has to be implemented by the user!\n    \"\"\"\n    raise NotImplementedError(\n        \"\"\"\n            You have to implement a run function which runs the simulations and\n            controlls the recordings. The run function should return the results of\n            the experiment by calling the results function of the CompNeuroExp class.\n        \"\"\"\n    )\n
"},{"location":"main/generate_models/","title":"Generate Models","text":""},{"location":"main/generate_models/#introduction","title":"Introduction","text":"

One can create a CompNeuroPy-model using the CompNeuroModel class. The CompNeuroModel class takes as one argument the model_creation_function. In this function a classical ANNarchy model is created (populations, projections). The CompNeuroModel class only adds a framework to the model. Neccessary for a CompNeuroPy-model is to define unique names for all populations and projections. Models are created in three steps:

  1. model initialization: the initialization of the CompNeuroModel object, initializes the framework of the model without creating the ANNarchy objects (populations, projections)
  2. model creation: create the ANNarchy objects (populations, projections), i.e., run the model_creation function
  3. model compilation: compile all created models
"},{"location":"main/generate_models/#example","title":"Example","text":"
from CompNeuroPy import CompNeuroModel\nmy_model = CompNeuroModel(model_creation_function=create_model,  ### the most important part, this function creates the model (populations, projections)\n                          model_kwargs={'a':1, 'b':2},           ### define the two arguments a and b of function create_model\n                          name='my_model',                       ### you can give the model a name\n                          description='my simple example model', ### you can give the model a description\n                          do_create=True,                        ### create the model directly\n                          do_compile=True,                       ### let the model (and all models created before) compile directly\n                          compile_folder_name='my_model')        ### name of the saved compilation folder\n

The following function could be the corresponding model_creation_function:

from ANNarchy import Population, Izhikevich\ndef create_model(a, b):\n    pop = Population(geometry=a, neuron=Izhikevich, name='Izh_pop_a') ### first population, size a\n    pop.b = 0                                                         ### some parameter adjustment\n    Population(geometry=b, neuron=Izhikevich, name='Izh_pop_b')       ### second population, size b\n

Here, two populations are created (both use built-in Izhikevich neuron model of ANNarchy). The function does not require a return value. It is important that all populations and projections have unique names.

A more detailed example is available in the Examples.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel","title":"CompNeuroPy.generate_model.CompNeuroModel","text":"

Class for creating and compiling a model.

Attributes:

Name Type Description name str

name of the model

description str

description of the model

model_creation_function function

function which creates the model

compile_folder_name str

name of the folder in which the model is compiled

model_kwargs dict

keyword arguments for model_creation_function

populations list

list of names of all populations of the model

projections list

list of names of all projections of the model

created bool

True if the model is created

compiled bool

True if the model is compiled

attribute_df pandas dataframe

dataframe containing all attributes of the model compartments

Source code in src/CompNeuroPy/generate_model.py
class CompNeuroModel:\n    \"\"\"\n    Class for creating and compiling a model.\n\n    Attributes:\n        name (str):\n            name of the model\n        description (str):\n            description of the model\n        model_creation_function (function):\n            function which creates the model\n        compile_folder_name (str):\n            name of the folder in which the model is compiled\n        model_kwargs (dict):\n            keyword arguments for model_creation_function\n        populations (list):\n            list of names of all populations of the model\n        projections (list):\n            list of names of all projections of the model\n        created (bool):\n            True if the model is created\n        compiled (bool):\n            True if the model is compiled\n        attribute_df (pandas dataframe):\n            dataframe containing all attributes of the model compartments\n    \"\"\"\n\n    _initialized_models = {}\n    _compiled_models = {}\n    _compiled_models_updated = False\n\n    @check_types()\n    def __init__(\n        self,\n        model_creation_function: Callable,\n        model_kwargs: dict | None = None,\n        name: str = \"model\",\n        description: str = \"\",\n        do_create: bool = True,\n        do_compile: bool = True,\n        compile_folder_name: str = \"annarchy\",\n    ):\n        \"\"\"\n        Initializes the CompNeuroModel class.\n\n        Args:\n            model_creation_function (function):\n                Function which creates the model.\n            model_kwargs (dict):\n                Keyword arguments for model_creation_function. Default: None.\n            name (str):\n                Name of the model. Default: \"model\".\n            description (str):\n                Description of the model. Default: \"\".\n            do_create (bool):\n                If True the model is created directly. Default: True.\n            do_compile (bool):\n                If True the model is compiled directly. Default: True.\n            compile_folder_name (str):\n                Name of the folder in which the model is compiled. Default: \"annarchy\".\n        \"\"\"\n        self.name = name\n        if name == \"model\":\n            self.name = name + str(self._nr_models())\n        self.description = description\n        self.model_creation_function = model_creation_function\n        self.compile_folder_name = compile_folder_name\n        self.model_kwargs = model_kwargs\n        self.populations = []\n        self.projections = []\n        self.created = False\n        self.compiled = False\n        self._attribute_df = None\n        self._attribute_df_compiled = False\n        if do_create:\n            self.create(do_compile=do_compile, compile_folder_name=compile_folder_name)\n\n    @property\n    def compiled(self):\n        \"\"\"\n        True if the model is compiled.\n        \"\"\"\n        ### check if ANNarchy was compiled and _compiled_models is not updated yet\n        if mf.annarchy_compiled() and not self._compiled_models_updated:\n            self._update_compiled_models()\n        return self._compiled_models[self.name]\n\n    @compiled.setter\n    def compiled(self, value):\n        \"\"\"\n        Setter for compiled property.\n        \"\"\"\n        self._compiled_models[self.name] = value\n\n    @property\n    def created(self):\n        \"\"\"\n        True if the model is created.\n        \"\"\"\n        return self._initialized_models[self.name]\n\n    @created.setter\n    def created(self, value):\n        \"\"\"\n        Setter for created property.\n        \"\"\"\n        self._initialized_models[self.name] = value\n\n    @property\n    def attribute_df(self):\n        \"\"\"\n        Dataframe containing all attributes of the model compartments.\n        \"\"\"\n        ### check if ANNarchy was compiled and _attribute_df is not updated yet\n        if mf.annarchy_compiled() and not self._attribute_df_compiled:\n            self._update_attribute_df_weights()\n        return self._attribute_df\n\n    def _update_compiled_models(self):\n        \"\"\"\n        Updates _compiled_models to True for all models.\n        \"\"\"\n        ### update _compiled_models\n        for key in self._compiled_models.keys():\n            self._compiled_models[key] = True\n        self._compiled_models_updated = True\n\n    def _update_attribute_df_weights(self):\n        \"\"\"\n        Updates _attribute_df for the weights of all projections.\n        \"\"\"\n        for proj_name in self.projections:\n            values = get_projection(proj_name).w\n            self._update_attribute_df(\n                compartment=proj_name, parameter_name=\"w\", parameter_value=values\n            )\n        self._attribute_df_compiled = True\n\n    def compile(self, compile_folder_name=None):\n        \"\"\"\n        Compiles a created model.\n\n        Args:\n            compile_folder_name (str, optional):\n                Name of the folder in which the model is compiled. Default: value from\n                initialization.\n        \"\"\"\n        ### check if this model is created\n        if self.created:\n            if compile_folder_name == None:\n                compile_folder_name = self.compile_folder_name\n\n            ### check if other models were initialized but not created --> warn that they are not compiled\n            not_created_model_list = self._check_if_models_created()\n            if len(not_created_model_list) > 0:\n                print(\n                    \"\\nWARNING during compile of model \"\n                    + self.name\n                    + \": There are initialized models which are not created, thus not compiled! models:\\n\"\n                    + \"\\n\".join(not_created_model_list)\n                    + \"\\n\"\n                )\n            mf.compile_in_folder(compile_folder_name)\n            self.compiled = True\n\n            ### update attribute_df to compiled state, since weights are only available\n            ### after compilation\n            self._update_attribute_df_weights()\n        else:\n            print(\"\\n\")\n            assert False, (\n                \"ERROR during compile of model \"\n                + self.name\n                + \": Only compile the model after it has been created!\"\n            )\n\n    def create(self, do_compile=True, compile_folder_name=None):\n        \"\"\"\n        Creates a model and optionally compiles it directly.\n\n        Args:\n            do_compile (bool, optional):\n                If True the model is compiled directly. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder in which the model is compiled. Default: value from\n                initialization.\n        \"\"\"\n        if self.created:\n            print(\"model\", self.name, \"already created!\")\n        else:\n            initial_existing_model = mf.get_full_model()\n            ### create model populations and projections\n            if self.model_kwargs != None:\n                self.model_creation_function(**self.model_kwargs)\n            else:\n                self.model_creation_function()\n            self.created = True\n\n            ### check which populations and projections have been added\n            post_existing_model = mf.get_full_model()\n            ### save only added not all projections/populations\n            for initial_pop in initial_existing_model[\"populations\"]:\n                post_existing_model[\"populations\"].remove(initial_pop)\n            for initial_proj in initial_existing_model[\"projections\"]:\n                post_existing_model[\"projections\"].remove(initial_proj)\n            self.populations = post_existing_model[\"populations\"]\n            self.projections = post_existing_model[\"projections\"]\n\n            ### check if names of populations and projections are unique\n            self._check_double_compartments()\n\n            ### create parameter dictionary\n            self._attribute_df = self._get_attribute_df()\n\n            if do_compile:\n                self.compile(compile_folder_name)\n\n    def _check_if_models_created(self):\n        \"\"\"\n        Checks which CompNeuroPy models are created\n\n        Returns:\n            not_created_model_list (list):\n                list of names of all initialized CompNeuroPy models which are not\n                created yet\n        \"\"\"\n        not_created_model_list = []\n        for key in self._initialized_models.keys():\n            if self._initialized_models[key] == False:\n                not_created_model_list.append(key)\n\n        return not_created_model_list\n\n    def _nr_models(self):\n        \"\"\"\n        Returns:\n            nr_models (int):\n                The current number of initialized (not considering \"created\")\n                CompNeuroPy models\n        \"\"\"\n        return len(list(self._initialized_models.keys()))\n\n    def set_param(self, compartment, parameter_name, parameter_value):\n        \"\"\"\n        Sets the specified parameter of the specified compartment.\n\n        Args:\n            compartment (str):\n                name of model compartment\n            parameter_name (str):\n                name of parameter of the compartment\n            parameter_value (number or array-like with shape of compartment geometry):\n                the value or values of the parameter\n\n        Raises:\n            AssertionError: if model is not created\n            AssertionError: if compartment is neither a population nor a projection of\n                the model\n        \"\"\"\n        ### catch if model is not created\n        assert (\n            self.created == True\n        ), f\"ERROR set_param: model {self.name} has to be created before setting parameters!\"\n\n        ### check if compartment is in populations or projections\n        comp_in_pop = compartment in self.populations\n        comp_in_proj = compartment in self.projections\n\n        if comp_in_pop:\n            comp_obj = get_population(compartment)\n        elif comp_in_proj:\n            comp_obj = get_projection(compartment)\n        else:\n            assert (\n                comp_in_pop or comp_in_proj\n            ), f\"ERROR set_param: setting parameter {parameter_name} of compartment {compartment}. The compartment is neither a population nor a projection of the model {self.name}!\"\n\n        ### set the parameter value\n        setattr(comp_obj, parameter_name, parameter_value)\n\n        ### update the model attribute_df\n        self._update_attribute_df(compartment, parameter_name, parameter_value)\n\n    def _update_attribute_df(self, compartment, parameter_name, parameter_value):\n        \"\"\"\n        updates the attribute df for a specific paramter\n\n        Args:\n            compartment (str):\n                name of model compartment\n            parameter_name (str):\n                name of parameter of the compartment\n            parameter_value (number or array-like with shape of compartment geometry):\n                the value or values of the parameter\n        \"\"\"\n        paramter_mask = (\n            (self._attribute_df[\"compartment_name\"] == compartment).astype(int)\n            * (self._attribute_df[\"attribute_name\"] == parameter_name).astype(int)\n        ).astype(bool)\n        parameter_idx = np.arange(paramter_mask.size).astype(int)[paramter_mask][0]\n        min_val = af.get_minimum(parameter_value)\n        max_val = af.get_maximum(parameter_value)\n        if min_val != max_val:\n            self._attribute_df.at[parameter_idx, \"value\"] = f\"[{min_val}, {max_val}]\"\n        else:\n            self._attribute_df.at[parameter_idx, \"value\"] = str(min_val)\n        self._attribute_df.at[parameter_idx, \"definition\"] = \"modified\"\n\n    def _check_double_compartments(self):\n        \"\"\"\n        Goes over all compartments of the model and checks if compartment is only a\n        population or a projection and not both.\n\n        Raises:\n            AssertionError: if model is not created\n            AssertionError: if compartment is both a population and a projection\n        \"\"\"\n        ### cach if model is not created, only if created populations and projections are available\n        assert (\n            self.created == True\n        ), f\"ERROR model {self.name}: model has to be created before checking for double compartments!\"\n        ### only have to go over populations and check if they are also projections (go over projections not neccessary)\n        pop_in_projections_list = []\n        pop_in_projections = False\n        for pop_name in self.populations:\n            if pop_name in self.projections:\n                pop_in_projections_list.append(pop_name)\n                pop_in_projections = True\n\n        assert (\n            pop_in_projections == False\n        ), f\"ERROR model {self.name}: One or multiple compartments are both population and projection ({pop_in_projections_list}). Rename them!\"\n\n    def _get_attribute_df(self):\n        \"\"\"\n        Creates a dataframe containing the attributes of all model compartments.\n\n        Returns:\n            attribute_df (pandas dataframe):\n                dataframe containing all attributes of the model compartments\n\n        Raises:\n            AssertionError: if model is not created\n        \"\"\"\n        ### cach if model is not created, only if created populations and projections are available\n        assert (\n            self.created == True\n        ), f\"ERROR model {self.name}: model has to be created before creating paramteer dictionary!\"\n\n        ### create empty paramteter dict\n        attribute_dict = {\n            \"compartment_type\": [],\n            \"compartment_name\": [],\n            \"attribute_name\": [],\n            \"value\": [],\n            \"definition\": [],\n        }\n\n        ### fill paramter dict with population attributes\n        for pop in self.populations:\n            for attribute in vars(get_population(pop))[\"attributes\"]:\n                ### store min and max of attribute\n                ### create numpy array with getattr to use numpy min max function\n                values = np.array(\n                    [getattr(get_population(pop), attribute)]\n                    + [getattr(get_population(pop), attribute)]\n                )\n                attribute_dict[\"compartment_type\"].append(\"population\")\n                attribute_dict[\"compartment_name\"].append(pop)\n                attribute_dict[\"attribute_name\"].append(attribute)\n                if values.min() != values.max():\n                    attribute_dict[\"value\"].append(f\"[{values.min()}, {values.max()}]\")\n                else:\n                    attribute_dict[\"value\"].append(str(values.min()))\n                attribute_dict[\"definition\"].append(\"init\")\n\n        ### fill paramter dict with projection attributes\n        for proj in self.projections:\n            for attribute in vars(get_projection(proj))[\"attributes\"]:\n                ### store min and max of attribute\n                ### create numpy array with getattr to use numpy min max function\n                values = np.array(\n                    [getattr(get_projection(proj), attribute)]\n                    + [getattr(get_projection(proj), attribute)]\n                )\n                attribute_dict[\"compartment_type\"].append(\"projection\")\n                attribute_dict[\"compartment_name\"].append(proj)\n                attribute_dict[\"attribute_name\"].append(attribute)\n                if values.min() != values.max():\n                    attribute_dict[\"value\"].append(f\"[{values.min()}, {values.max()}]\")\n                else:\n                    attribute_dict[\"value\"].append(values.min())\n                attribute_dict[\"definition\"].append(\"init\")\n\n        ### return dataframe\n        return pd.DataFrame(attribute_dict)\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.compiled","title":"compiled property writable","text":"

True if the model is compiled.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.created","title":"created property writable","text":"

True if the model is created.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.attribute_df","title":"attribute_df property","text":"

Dataframe containing all attributes of the model compartments.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.__init__","title":"__init__(model_creation_function, model_kwargs=None, name='model', description='', do_create=True, do_compile=True, compile_folder_name='annarchy')","text":"

Initializes the CompNeuroModel class.

Parameters:

Name Type Description Default model_creation_function function

Function which creates the model.

required model_kwargs dict

Keyword arguments for model_creation_function. Default: None.

None name str

Name of the model. Default: \"model\".

'model' description str

Description of the model. Default: \"\".

'' do_create bool

If True the model is created directly. Default: True.

True do_compile bool

If True the model is compiled directly. Default: True.

True compile_folder_name str

Name of the folder in which the model is compiled. Default: \"annarchy\".

'annarchy' Source code in src/CompNeuroPy/generate_model.py
@check_types()\ndef __init__(\n    self,\n    model_creation_function: Callable,\n    model_kwargs: dict | None = None,\n    name: str = \"model\",\n    description: str = \"\",\n    do_create: bool = True,\n    do_compile: bool = True,\n    compile_folder_name: str = \"annarchy\",\n):\n    \"\"\"\n    Initializes the CompNeuroModel class.\n\n    Args:\n        model_creation_function (function):\n            Function which creates the model.\n        model_kwargs (dict):\n            Keyword arguments for model_creation_function. Default: None.\n        name (str):\n            Name of the model. Default: \"model\".\n        description (str):\n            Description of the model. Default: \"\".\n        do_create (bool):\n            If True the model is created directly. Default: True.\n        do_compile (bool):\n            If True the model is compiled directly. Default: True.\n        compile_folder_name (str):\n            Name of the folder in which the model is compiled. Default: \"annarchy\".\n    \"\"\"\n    self.name = name\n    if name == \"model\":\n        self.name = name + str(self._nr_models())\n    self.description = description\n    self.model_creation_function = model_creation_function\n    self.compile_folder_name = compile_folder_name\n    self.model_kwargs = model_kwargs\n    self.populations = []\n    self.projections = []\n    self.created = False\n    self.compiled = False\n    self._attribute_df = None\n    self._attribute_df_compiled = False\n    if do_create:\n        self.create(do_compile=do_compile, compile_folder_name=compile_folder_name)\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.compile","title":"compile(compile_folder_name=None)","text":"

Compiles a created model.

Parameters:

Name Type Description Default compile_folder_name str

Name of the folder in which the model is compiled. Default: value from initialization.

None Source code in src/CompNeuroPy/generate_model.py
def compile(self, compile_folder_name=None):\n    \"\"\"\n    Compiles a created model.\n\n    Args:\n        compile_folder_name (str, optional):\n            Name of the folder in which the model is compiled. Default: value from\n            initialization.\n    \"\"\"\n    ### check if this model is created\n    if self.created:\n        if compile_folder_name == None:\n            compile_folder_name = self.compile_folder_name\n\n        ### check if other models were initialized but not created --> warn that they are not compiled\n        not_created_model_list = self._check_if_models_created()\n        if len(not_created_model_list) > 0:\n            print(\n                \"\\nWARNING during compile of model \"\n                + self.name\n                + \": There are initialized models which are not created, thus not compiled! models:\\n\"\n                + \"\\n\".join(not_created_model_list)\n                + \"\\n\"\n            )\n        mf.compile_in_folder(compile_folder_name)\n        self.compiled = True\n\n        ### update attribute_df to compiled state, since weights are only available\n        ### after compilation\n        self._update_attribute_df_weights()\n    else:\n        print(\"\\n\")\n        assert False, (\n            \"ERROR during compile of model \"\n            + self.name\n            + \": Only compile the model after it has been created!\"\n        )\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.create","title":"create(do_compile=True, compile_folder_name=None)","text":"

Creates a model and optionally compiles it directly.

Parameters:

Name Type Description Default do_compile bool

If True the model is compiled directly. Default: True.

True compile_folder_name str

Name of the folder in which the model is compiled. Default: value from initialization.

None Source code in src/CompNeuroPy/generate_model.py
def create(self, do_compile=True, compile_folder_name=None):\n    \"\"\"\n    Creates a model and optionally compiles it directly.\n\n    Args:\n        do_compile (bool, optional):\n            If True the model is compiled directly. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder in which the model is compiled. Default: value from\n            initialization.\n    \"\"\"\n    if self.created:\n        print(\"model\", self.name, \"already created!\")\n    else:\n        initial_existing_model = mf.get_full_model()\n        ### create model populations and projections\n        if self.model_kwargs != None:\n            self.model_creation_function(**self.model_kwargs)\n        else:\n            self.model_creation_function()\n        self.created = True\n\n        ### check which populations and projections have been added\n        post_existing_model = mf.get_full_model()\n        ### save only added not all projections/populations\n        for initial_pop in initial_existing_model[\"populations\"]:\n            post_existing_model[\"populations\"].remove(initial_pop)\n        for initial_proj in initial_existing_model[\"projections\"]:\n            post_existing_model[\"projections\"].remove(initial_proj)\n        self.populations = post_existing_model[\"populations\"]\n        self.projections = post_existing_model[\"projections\"]\n\n        ### check if names of populations and projections are unique\n        self._check_double_compartments()\n\n        ### create parameter dictionary\n        self._attribute_df = self._get_attribute_df()\n\n        if do_compile:\n            self.compile(compile_folder_name)\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.set_param","title":"set_param(compartment, parameter_name, parameter_value)","text":"

Sets the specified parameter of the specified compartment.

Parameters:

Name Type Description Default compartment str

name of model compartment

required parameter_name str

name of parameter of the compartment

required parameter_value number or array-like with shape of compartment geometry

the value or values of the parameter

required

Raises:

Type Description AssertionError

if model is not created

AssertionError

if compartment is neither a population nor a projection of the model

Source code in src/CompNeuroPy/generate_model.py
def set_param(self, compartment, parameter_name, parameter_value):\n    \"\"\"\n    Sets the specified parameter of the specified compartment.\n\n    Args:\n        compartment (str):\n            name of model compartment\n        parameter_name (str):\n            name of parameter of the compartment\n        parameter_value (number or array-like with shape of compartment geometry):\n            the value or values of the parameter\n\n    Raises:\n        AssertionError: if model is not created\n        AssertionError: if compartment is neither a population nor a projection of\n            the model\n    \"\"\"\n    ### catch if model is not created\n    assert (\n        self.created == True\n    ), f\"ERROR set_param: model {self.name} has to be created before setting parameters!\"\n\n    ### check if compartment is in populations or projections\n    comp_in_pop = compartment in self.populations\n    comp_in_proj = compartment in self.projections\n\n    if comp_in_pop:\n        comp_obj = get_population(compartment)\n    elif comp_in_proj:\n        comp_obj = get_projection(compartment)\n    else:\n        assert (\n            comp_in_pop or comp_in_proj\n        ), f\"ERROR set_param: setting parameter {parameter_name} of compartment {compartment}. The compartment is neither a population nor a projection of the model {self.name}!\"\n\n    ### set the parameter value\n    setattr(comp_obj, parameter_name, parameter_value)\n\n    ### update the model attribute_df\n    self._update_attribute_df(compartment, parameter_name, parameter_value)\n
"},{"location":"main/generate_simulations/","title":"Generate Simulations","text":""},{"location":"main/generate_simulations/#introduction","title":"Introduction","text":"

A CompNeuroPy-simulation can be created using the CompNeuroSim class. Similar to the CompNeuroModel class, a function must be defined that contains the actual simulation (the simulation_function) and the CompNeuroSim object adds a clear framework. A CompNeuroSim is first initialized and can then be run multiple times.

"},{"location":"main/generate_simulations/#example","title":"Example:","text":"
from CompNeuroPy import CompNeuroSim\nmy_simulation = CompNeuroSim(simulation_function=some_simulation,           ### the most important part, this function defines the simulation\n                            simulation_kwargs={'pop':pop1, 'duration':100}, ### define the two arguments pop and duration of simulation_function\n                            name='my_simulation',                           ### you can give the simulation a name\n                            description='my simple example simulation',     ### you can give the simulation a description\n                            requirements=[req],                             ### a list of requirements for the simulation (here only a single requirement)\n                            kwargs_warning=True,                            ### should a warning be printed if simulation kwargs change in future runs\n                            monitor_object = mon)                           ### the Monitors object which is used to record variables                   \n

A possible simulation_function could be:

def some_simulation(pop, duration=1):\n    get_population(pop).a = 5  ### adjust paramter a of pop\n    get_population(pop).b = 5  ### adjust paramter b of pop\n    simulate(duration)         ### simulate the duration in ms\n\n    ### return some info\n    ### will later be accessible for each run\n    return {'paramter a': a, 'paramter b': b, 'a_x_duration': a*duration} \n

And a corresponding requirement could be:

from CompNeuroPy import ReqPopHasAttr\nreq = {'req':ReqPopHasAttr, 'pop':pop1, 'attr':['a', 'b']}\n
Here, one checks if the population pop1 contains the attributes a and b. The ReqPopHasAttr is a built-in requirements-class of CompNeuroPy (see below).

A more detailed example is available in the Examples.

"},{"location":"main/generate_simulations/#simulation-information","title":"Simulation information","text":"

The function simulation_info() returns a SimInfo object which contains usefull information about the simulation runs (see below). The SimInfo object also provides usefull analysis functions associated with specific simulation functions. Currently it provides the get_current_arr() which returns arrays containing the input current for each time step of the built-in simulation functions current_step(), current_stim(), and current_ramp().

"},{"location":"main/generate_simulations/#simulation-functions","title":"Simulation functions","text":"

Just define a classic ANNarchy simulation in a function. Within the functions, the ANNarchy functions get_population() and get_projection() can be used to access the populations and projections using the population and projection names provided by a CompNeuroModel. The return value of the simulation function can later be retrieved from the SimInfo object (the info attribute) in a list containing the return value for each run of the simulation.

"},{"location":"main/generate_simulations/#example_1","title":"Example:","text":"
from ANNarchy import simulate, get_population\n\ndef current_step(pop, t1=500, t2=500, a1=0, a2=100):\n    \"\"\"\n        stimulates a given population in two periods with two input currents\n\n        pop: population name of population, which should be stimulated with input current\n             neuron model of population has to contain \"I_app\" as input current in pA\n        t1/t2: times in ms before/after current step\n        a1/a2: current amplitudes before/after current step in pA\n    \"\"\"\n\n    ### save prev input current\n    I_prev = get_population(pop).I_app\n\n    ### first/pre current step simulation\n    get_population(pop).I_app = a1\n    simulate(t1)\n\n    ### second/post current step simulation\n    get_population(pop).I_app = a2\n    simulate(t2)\n\n    ### reset input current to previous value\n    get_population(pop).I_app = I_prev\n\n    ### return some additional information which could be usefull\n    return {'duration':t1+t2}\n
"},{"location":"main/generate_simulations/#requirements","title":"Requirements","text":"

In order to perform simulations with models, the models must almost always fulfill certain requirements. For example, if the input current of a population is to be set, this population (or the neuron model) must of course have the corresponding variable. Such preconditions can be tested in advance with the simulation_requirements classes. They only need to contain a function run() to test the requirements (if requirements are not met, cause an error). In CompNeuroPy predefined simulation_requirements classes are available (CompNeuroPy.simulation_requirements; currently only ReqPopHasAttr). In the CompNeuroSim class, the requirements are passed as arguments in a list (see above). Each requirement (list entry) must be defined as a dictionary with keys req (the requirement class) and the arguments of the requirement class (e.g., pop and attr for the ReqPopHasAttr).

Here two requirements are defined (both ReqPopHasAttr). All populations of my_model should contain the attribute (variable or parameter) 'I' and all populations of my_other_model should contain the attribute 'v':

req1 = {'req':ReqPopHasAttr, 'pop':my_model.populations, 'attr':'I'}\nreq2 = {'req':ReqPopHasAttr, 'pop':my_other_model.populations, 'attr':'v'}\nmy_two_model_simulation = CompNeuroSim(..., requirements=[req1, req2])\n

As described above, new simulation_kwargs can be passed to the run() function of a CompNeuroSim object. Thus, one could initially pass a particular model as simulation_kwargs and for a later run pass a different model. If the requirements are defined as shown above, it is not tested again whether the new model (e.g. my_third_model) also fulfills the requirements (because the requirements were defined for my_model and my_other_model). To work around this, an argument for a simulation_requirements class can also be linked to a simulation_kwargs entry. Thus, if new simulation_kwargs are used, also the simulation_requirements arguments adapt. This can be done using a string with the syntax \"simulation_kwargs.<kwarg_name>.<optional_attribute_of_kwarg>\", as shown in this example:

req1 = {'req':ReqPopHasAttr, 'pop':\"simulation_kwargs.model1.populations\", 'attr':'I'}\nreq2 = {'req':ReqPopHasAttr, 'pop':\"simulation_kwargs.model2.populations\", 'attr':'v'}\nmy_two_model_simulation = CompNeuroSim(simulation_kwargs={'model1':my_model, 'model2':my_other_model, 'parameter':5},\n                                        ...,\n                                        requirements=[req1, req2])\n...\nmy_two_model_simulation.run({'model1':my_third_model})\n

Due to the string \"simulation_kwargs.model1.populations\" the pop argument of req1 is now linked to model1 (defined in the simulation_kwargs). Thus, in the run where a different model (my_third_model) is used for model1, req1 is automatically tested for the new model1.

"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim","title":"CompNeuroPy.generate_simulation.CompNeuroSim","text":"

Class for generating a CompNeuroPy simulation.

Source code in src/CompNeuroPy/generate_simulation.py
class CompNeuroSim:\n    \"\"\"\n    Class for generating a CompNeuroPy simulation.\n    \"\"\"\n\n    _initialized_simulations = []\n\n    def __init__(\n        self,\n        simulation_function: Callable,\n        simulation_kwargs: dict | None = None,\n        name: str = \"simulation\",\n        description: str = \"\",\n        requirements: list | None = None,\n        kwargs_warning: bool = False,\n        monitor_object: CompNeuroMonitors | None = None,\n    ):\n        \"\"\"\n        Args:\n            simulation_function (function):\n                Function which runs the simulation.\n            simulation_kwargs (dict, optional):\n                Dictionary of arguments for the simulation_function. Default: None.\n            name (str, optional):\n                Name of the simulation. Default: \"simulation\".\n            description (str, optional):\n                Description of the simulation. Default: \"\".\n            requirements (list, optional):\n                List of requirements for the simulation. It's a list of dictionaries\n                which contain the requirement class itself (key: \"req\") and the\n                corresponding arguments (keys are the names of the arguments). The\n                arguments can be inherited from the simulation kwargs by using the\n                syntax 'simulation_kwargs.<kwarg_name>'. Default: None.\n            kwargs_warning (bool, optional):\n                If True, a warning is printed if the simulation_kwargs are changed\n                during the simulation. Default: False.\n            monitor_object (CompNeuroMonitors object, optional):\n                CompNeuroMonitors object to automatically track the recording chunk for each\n                simulation run. Default: None.\n        \"\"\"\n        # set simulation function\n        self.name = name\n        if name == \"simulation\":\n            self.name = name + str(self._nr_simulations())\n        self._initialized_simulations.append(self.name)\n        self.description = description\n        self.simulation_function = simulation_function\n        self.simulation_kwargs = simulation_kwargs\n        if requirements is None:\n            self.requirements = []\n        else:\n            self.requirements = requirements\n        self.start = []\n        self.end = []\n        self.info = []\n        self.kwargs = []\n        if kwargs_warning:\n            self._warned = False\n        else:\n            self._warned = True\n        self.monitor_object = monitor_object\n        if monitor_object is not None:\n            self.monitor_chunk = []\n        else:\n            self.monitor_chunk = None\n\n        ### test initial requirements\n        self._test_req(simulation_kwargs=simulation_kwargs)\n\n    def run(self, simulation_kwargs: dict | None = None):\n        \"\"\"\n        Runs the simulation function. With each run extend start, end list containing\n        start and end time of the corresponding run and the info list containing the\n        return value of the simulation function.\n\n        Args:\n            simulation_kwargs (dict, optional):\n                Temporary simulation kwargs which override the initialized simulation\n                kwargs. Default: None, i.e., use values from initialization.\n        \"\"\"\n\n        ### define the current simulation kwargs\n        if simulation_kwargs is not None:\n            if self.simulation_kwargs is not None:\n                ### not replace initialized kwargs completely but only the kwargs which are given\n                tmp_kwargs = self.simulation_kwargs.copy()\n                for key, val in simulation_kwargs.items():\n                    tmp_kwargs[key] = val\n            else:\n                ### there are no initial kwargs --> only use the kwargs which are given\n                tmp_kwargs = simulation_kwargs\n            if not (self._warned) and len(self.requirements) > 0:\n                print(\n                    \"\\nWARNING! run\",\n                    self.name,\n                    \"changed simulation kwargs, initial requirements may no longer be fulfilled!\\n\",\n                )\n                self._warned = True\n        else:\n            tmp_kwargs = self.simulation_kwargs\n\n        ### before each run, test requirements\n        self._test_req(simulation_kwargs=tmp_kwargs)\n\n        ### and append current simulation kwargs to the kwargs variable\n        self.kwargs.append(tmp_kwargs)\n\n        ### and append the current chunk of the monitors object to the chunk variable\n        if self.monitor_object is not None:\n            self.monitor_chunk.append(self.monitor_object.current_chunk())\n\n        ### run the simulation, store start and end simulation time\n        self.start.append(get_time())\n        if tmp_kwargs is not None:\n            self.info.append(self.simulation_function(**tmp_kwargs))\n        else:\n            self.info.append(self.simulation_function())\n        self.end.append(get_time())\n\n    def _nr_simulations(self):\n        \"\"\"\n        Returns the current number of initialized CompNeuroPy simulations.\n        \"\"\"\n        return len(self._initialized_simulations)\n\n    def _test_req(self, simulation_kwargs=None):\n        \"\"\"\n        Tests the initialized requirements with the current simulation_kwargs.\n        \"\"\"\n\n        if simulation_kwargs is None:  # --> use the initial simulation_kwargs\n            simulation_kwargs = self.simulation_kwargs\n\n        for req in self.requirements:\n            ### check if requirement_kwargs are given besides the requirement itself\n            if len(list(req.keys())) > 1:\n                ### remove the requirement itself from the kwargs\n                req_kwargs = ef.remove_key(req, \"req\")\n                ### check if req_kwargs reference to simulation_kwargs, if yes, use the\n                ### current simulation kwargs instead of the intial ones\n                for key, val in req_kwargs.items():\n                    if isinstance(val, str):\n                        val_split = val.split(\".\")\n                        ### check if val is a reference to simulation_kwargs\n                        if val_split[0] == \"simulation_kwargs\":\n                            if len(val_split) == 1:\n                                ### val is only simulation_kwargs\n                                req_kwargs = simulation_kwargs\n                            elif len(val_split) == 2:\n                                ### val is simulation_kwargs.something\n                                req_kwargs[key] = simulation_kwargs[val_split[1]]\n                            else:\n                                ### val is simulation_kwargs.something.something... e.g. key='pops' and val= 'simulation_kwargs.model.populations'\n                                req_kwargs[key] = eval(\n                                    'simulation_kwargs[\"'\n                                    + val_split[1]\n                                    + '\"].'\n                                    + \".\".join(val_split[2:])\n                                )\n                ### run the requirement using the current req_kwargs\n                req[\"req\"](**req_kwargs).run()\n\n            else:\n                ### a requirement is given without kwargs --> just run it\n                req[\"req\"]().run()\n\n    def get_current_arr(self, dt, flat=False):\n        \"\"\"\n        Method exclusively for current_step simulation functions. Gets the current array\n        (input current value for each time step) of all runs.\n\n        !!! warning\n            This method will be removed soon. Use the get_current_arr method of the\n            SimInfo class instead.\n\n        Args:\n            dt (float):\n                Time step size of the simulation.\n            flat (bool, optional):\n                If True, returns a flattened array. Assumes that all runs are run\n                consecutively without brakes. Default: False, i.e., returns a list of\n                arrays.\n\n        Returns:\n            current_arr (list of arrays):\n                List of arrays containing the current values for each time step of each\n                run. If flat=True, returns a flattened array.\n        \"\"\"\n        assert (\n            self.simulation_function.__name__ == \"current_step\"\n        ), 'ERROR get_current_arr: Simulation has to be \"current_step\"!'\n        ### TODO: remove because deprecated\n        print(\n            \"WARNING get_current_arr function will only be available in SimInfo soon.\"\n        )\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            t1 = self.kwargs[run][\"t1\"]\n            t2 = self.kwargs[run][\"t2\"]\n            a1 = self.kwargs[run][\"a1\"]\n            a2 = self.kwargs[run][\"a2\"]\n\n            if t1 > 0 and t2 > 0:\n                current_arr.append(\n                    np.concatenate(\n                        [\n                            np.ones(int(round(t1 / dt))) * a1,\n                            np.ones(int(round(t2 / dt))) * a2,\n                        ]\n                    )\n                )\n            elif t2 > 0:\n                current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n            else:\n                current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n\n    def simulation_info(self):\n        \"\"\"\n        Returns a SimInfo object containing the simulation information.\n\n        Returns:\n            simulation_info_obj (SimInfo):\n                Simulation information object.\n        \"\"\"\n\n        simulation_info_obj = SimInfo(\n            self.name,\n            self.description,\n            self.simulation_function.__name__,\n            self.start,\n            self.end,\n            self.info,\n            self.kwargs,\n            self.monitor_chunk,\n        )\n\n        return simulation_info_obj\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.__init__","title":"__init__(simulation_function, simulation_kwargs=None, name='simulation', description='', requirements=None, kwargs_warning=False, monitor_object=None)","text":"

Parameters:

Name Type Description Default simulation_function function

Function which runs the simulation.

required simulation_kwargs dict

Dictionary of arguments for the simulation_function. Default: None.

None name str

Name of the simulation. Default: \"simulation\".

'simulation' description str

Description of the simulation. Default: \"\".

'' requirements list

List of requirements for the simulation. It's a list of dictionaries which contain the requirement class itself (key: \"req\") and the corresponding arguments (keys are the names of the arguments). The arguments can be inherited from the simulation kwargs by using the syntax 'simulation_kwargs.'. Default: None. None kwargs_warning bool

If True, a warning is printed if the simulation_kwargs are changed during the simulation. Default: False.

False monitor_object CompNeuroMonitors object

CompNeuroMonitors object to automatically track the recording chunk for each simulation run. Default: None.

None Source code in src/CompNeuroPy/generate_simulation.py
def __init__(\n    self,\n    simulation_function: Callable,\n    simulation_kwargs: dict | None = None,\n    name: str = \"simulation\",\n    description: str = \"\",\n    requirements: list | None = None,\n    kwargs_warning: bool = False,\n    monitor_object: CompNeuroMonitors | None = None,\n):\n    \"\"\"\n    Args:\n        simulation_function (function):\n            Function which runs the simulation.\n        simulation_kwargs (dict, optional):\n            Dictionary of arguments for the simulation_function. Default: None.\n        name (str, optional):\n            Name of the simulation. Default: \"simulation\".\n        description (str, optional):\n            Description of the simulation. Default: \"\".\n        requirements (list, optional):\n            List of requirements for the simulation. It's a list of dictionaries\n            which contain the requirement class itself (key: \"req\") and the\n            corresponding arguments (keys are the names of the arguments). The\n            arguments can be inherited from the simulation kwargs by using the\n            syntax 'simulation_kwargs.<kwarg_name>'. Default: None.\n        kwargs_warning (bool, optional):\n            If True, a warning is printed if the simulation_kwargs are changed\n            during the simulation. Default: False.\n        monitor_object (CompNeuroMonitors object, optional):\n            CompNeuroMonitors object to automatically track the recording chunk for each\n            simulation run. Default: None.\n    \"\"\"\n    # set simulation function\n    self.name = name\n    if name == \"simulation\":\n        self.name = name + str(self._nr_simulations())\n    self._initialized_simulations.append(self.name)\n    self.description = description\n    self.simulation_function = simulation_function\n    self.simulation_kwargs = simulation_kwargs\n    if requirements is None:\n        self.requirements = []\n    else:\n        self.requirements = requirements\n    self.start = []\n    self.end = []\n    self.info = []\n    self.kwargs = []\n    if kwargs_warning:\n        self._warned = False\n    else:\n        self._warned = True\n    self.monitor_object = monitor_object\n    if monitor_object is not None:\n        self.monitor_chunk = []\n    else:\n        self.monitor_chunk = None\n\n    ### test initial requirements\n    self._test_req(simulation_kwargs=simulation_kwargs)\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.run","title":"run(simulation_kwargs=None)","text":"

Runs the simulation function. With each run extend start, end list containing start and end time of the corresponding run and the info list containing the return value of the simulation function.

Parameters:

Name Type Description Default simulation_kwargs dict

Temporary simulation kwargs which override the initialized simulation kwargs. Default: None, i.e., use values from initialization.

None Source code in src/CompNeuroPy/generate_simulation.py
def run(self, simulation_kwargs: dict | None = None):\n    \"\"\"\n    Runs the simulation function. With each run extend start, end list containing\n    start and end time of the corresponding run and the info list containing the\n    return value of the simulation function.\n\n    Args:\n        simulation_kwargs (dict, optional):\n            Temporary simulation kwargs which override the initialized simulation\n            kwargs. Default: None, i.e., use values from initialization.\n    \"\"\"\n\n    ### define the current simulation kwargs\n    if simulation_kwargs is not None:\n        if self.simulation_kwargs is not None:\n            ### not replace initialized kwargs completely but only the kwargs which are given\n            tmp_kwargs = self.simulation_kwargs.copy()\n            for key, val in simulation_kwargs.items():\n                tmp_kwargs[key] = val\n        else:\n            ### there are no initial kwargs --> only use the kwargs which are given\n            tmp_kwargs = simulation_kwargs\n        if not (self._warned) and len(self.requirements) > 0:\n            print(\n                \"\\nWARNING! run\",\n                self.name,\n                \"changed simulation kwargs, initial requirements may no longer be fulfilled!\\n\",\n            )\n            self._warned = True\n    else:\n        tmp_kwargs = self.simulation_kwargs\n\n    ### before each run, test requirements\n    self._test_req(simulation_kwargs=tmp_kwargs)\n\n    ### and append current simulation kwargs to the kwargs variable\n    self.kwargs.append(tmp_kwargs)\n\n    ### and append the current chunk of the monitors object to the chunk variable\n    if self.monitor_object is not None:\n        self.monitor_chunk.append(self.monitor_object.current_chunk())\n\n    ### run the simulation, store start and end simulation time\n    self.start.append(get_time())\n    if tmp_kwargs is not None:\n        self.info.append(self.simulation_function(**tmp_kwargs))\n    else:\n        self.info.append(self.simulation_function())\n    self.end.append(get_time())\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.get_current_arr","title":"get_current_arr(dt, flat=False)","text":"

Method exclusively for current_step simulation functions. Gets the current array (input current value for each time step) of all runs.

Warning

This method will be removed soon. Use the get_current_arr method of the SimInfo class instead.

Parameters:

Name Type Description Default dt float

Time step size of the simulation.

required flat bool

If True, returns a flattened array. Assumes that all runs are run consecutively without brakes. Default: False, i.e., returns a list of arrays.

False

Returns:

Name Type Description current_arr list of arrays

List of arrays containing the current values for each time step of each run. If flat=True, returns a flattened array.

Source code in src/CompNeuroPy/generate_simulation.py
def get_current_arr(self, dt, flat=False):\n    \"\"\"\n    Method exclusively for current_step simulation functions. Gets the current array\n    (input current value for each time step) of all runs.\n\n    !!! warning\n        This method will be removed soon. Use the get_current_arr method of the\n        SimInfo class instead.\n\n    Args:\n        dt (float):\n            Time step size of the simulation.\n        flat (bool, optional):\n            If True, returns a flattened array. Assumes that all runs are run\n            consecutively without brakes. Default: False, i.e., returns a list of\n            arrays.\n\n    Returns:\n        current_arr (list of arrays):\n            List of arrays containing the current values for each time step of each\n            run. If flat=True, returns a flattened array.\n    \"\"\"\n    assert (\n        self.simulation_function.__name__ == \"current_step\"\n    ), 'ERROR get_current_arr: Simulation has to be \"current_step\"!'\n    ### TODO: remove because deprecated\n    print(\n        \"WARNING get_current_arr function will only be available in SimInfo soon.\"\n    )\n    current_arr = []\n    for run in range(len(self.kwargs)):\n        t1 = self.kwargs[run][\"t1\"]\n        t2 = self.kwargs[run][\"t2\"]\n        a1 = self.kwargs[run][\"a1\"]\n        a2 = self.kwargs[run][\"a2\"]\n\n        if t1 > 0 and t2 > 0:\n            current_arr.append(\n                np.concatenate(\n                    [\n                        np.ones(int(round(t1 / dt))) * a1,\n                        np.ones(int(round(t2 / dt))) * a2,\n                    ]\n                )\n            )\n        elif t2 > 0:\n            current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n        else:\n            current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n    if flat:\n        return np.concatenate(current_arr)\n    else:\n        return current_arr\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.simulation_info","title":"simulation_info()","text":"

Returns a SimInfo object containing the simulation information.

Returns:

Name Type Description simulation_info_obj SimInfo

Simulation information object.

Source code in src/CompNeuroPy/generate_simulation.py
def simulation_info(self):\n    \"\"\"\n    Returns a SimInfo object containing the simulation information.\n\n    Returns:\n        simulation_info_obj (SimInfo):\n            Simulation information object.\n    \"\"\"\n\n    simulation_info_obj = SimInfo(\n        self.name,\n        self.description,\n        self.simulation_function.__name__,\n        self.start,\n        self.end,\n        self.info,\n        self.kwargs,\n        self.monitor_chunk,\n    )\n\n    return simulation_info_obj\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.SimInfo","title":"CompNeuroPy.generate_simulation.SimInfo","text":"

Class for storing the simulation information.

Attributes:

Name Type Description name str

Name of the simulation.

description str

Description of the simulation.

simulation_function str

Name of the simulation function.

start list

List of start times of the simulation runs.

end list

List of end times of the simulation runs.

info list

List of return values of the simulation function of each simulation run.

kwargs list

List of simulation kwargs of the simulation function of each simulation run.

monitor_chunk list

List of recording chunks of the used CompNeuroMonitors object of each simulation run.

Source code in src/CompNeuroPy/generate_simulation.py
class SimInfo:\n    \"\"\"\n    Class for storing the simulation information.\n\n    Attributes:\n        name (str):\n            Name of the simulation.\n        description (str):\n            Description of the simulation.\n        simulation_function (str):\n            Name of the simulation function.\n        start (list):\n            List of start times of the simulation runs.\n        end (list):\n            List of end times of the simulation runs.\n        info (list):\n            List of return values of the simulation function of each simulation run.\n        kwargs (list):\n            List of simulation kwargs of the simulation function of each simulation run.\n        monitor_chunk (list):\n            List of recording chunks of the used CompNeuroMonitors object of each simulation run.\n    \"\"\"\n\n    def __init__(\n        self,\n        name,\n        description,\n        simulation_function,\n        start,\n        end,\n        info,\n        kwargs,\n        monitor_chunk,\n    ):\n        \"\"\"\n        Initialization of the simulation information object.\n\n        Args:\n            name (str):\n                Name of the simulation.\n            description (str):\n                Description of the simulation.\n            simulation_function (str):\n                Name of the simulation function.\n            start (list):\n                List of start times of the simulation runs.\n            end (list):\n                List of end times of the simulation runs.\n            info (list):\n                List of return values of the simulation function of each simulation run.\n            kwargs (list):\n                List of simulation kwargs of the simulation function of each simulation\n                run.\n            monitor_chunk (list):\n                List of recording chunks of the used CompNeuroMonitors object of each simulation\n                run.\n        \"\"\"\n        self.name = name\n        self.description = description\n        self.simulation_function = simulation_function\n        self.start = start\n        self.end = end\n        self.info = info\n        self.kwargs = kwargs\n        self.monitor_chunk = monitor_chunk\n\n    def get_current_arr(self, dt, flat=False):\n        \"\"\"\n        Method exclusively for the following simulation functions (built-in\n        CompNeuroPy):\n            - current_step\n            - current_stim\n            - current_ramp\n        Gets the current array (input current value for each time step) of all runs.\n\n        Args:\n            dt (float):\n                Time step size of the simulation.\n            flat (bool, optional):\n                If True, returns a flattened array. Assumes that all runs are run\n                consecutively without brakes. Default: False, i.e., returns a list of\n                arrays.\n\n        Returns:\n            current_arr (list of arrays):\n                List of arrays containing the current values for each time step of each\n                run. If flat=True, returns a flattened array.\n        \"\"\"\n        assert (\n            self.simulation_function == \"current_step\"\n            or self.simulation_function == \"current_stim\"\n            or self.simulation_function == \"current_ramp\"\n        ), 'ERROR get_current_arr: Simulation has to be \"current_step\", \"current_stim\" or \"current_ramp\"!'\n\n        if self.simulation_function == \"current_step\":\n            current_arr = []\n            for run in range(len(self.kwargs)):\n                t1 = self.kwargs[run][\"t1\"]\n                t2 = self.kwargs[run][\"t2\"]\n                a1 = self.kwargs[run][\"a1\"]\n                a2 = self.kwargs[run][\"a2\"]\n\n                if t1 > 0 and t2 > 0:\n                    current_arr.append(\n                        np.concatenate(\n                            [\n                                np.ones(int(round(t1 / dt))) * a1,\n                                np.ones(int(round(t2 / dt))) * a2,\n                            ]\n                        )\n                    )\n                elif t2 > 0:\n                    current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n                else:\n                    current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n            if flat:\n                return np.concatenate(current_arr)\n            else:\n                return current_arr\n\n        elif self.simulation_function == \"current_stim\":\n            current_arr = []\n            for run in range(len(self.kwargs)):\n                t = self.kwargs[run][\"t\"]\n                a = self.kwargs[run][\"a\"]\n\n                if t > 0:\n                    current_arr.append(np.ones(int(round(t / dt))) * a)\n\n            if flat:\n                return np.concatenate(current_arr)\n            else:\n                return current_arr\n\n        elif self.simulation_function == \"current_ramp\":\n            current_arr = []\n            for run in range(len(self.kwargs)):\n                amp = self.kwargs[run][\"a0\"]\n                current_arr_ramp = []\n                for stim_idx in range(self.kwargs[run][\"n\"]):\n                    t = self.info[run][\"dur_stim\"]\n                    a = amp\n                    current_arr_ramp.append(np.ones(int(round(t / dt))) * a)\n                    amp = amp + self.info[run][\"da\"]\n                current_arr.append(list(np.concatenate(current_arr_ramp)))\n\n            if flat:\n                return np.concatenate(current_arr)\n            else:\n                return current_arr\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.SimInfo.__init__","title":"__init__(name, description, simulation_function, start, end, info, kwargs, monitor_chunk)","text":"

Initialization of the simulation information object.

Parameters:

Name Type Description Default name str

Name of the simulation.

required description str

Description of the simulation.

required simulation_function str

Name of the simulation function.

required start list

List of start times of the simulation runs.

required end list

List of end times of the simulation runs.

required info list

List of return values of the simulation function of each simulation run.

required kwargs list

List of simulation kwargs of the simulation function of each simulation run.

required monitor_chunk list

List of recording chunks of the used CompNeuroMonitors object of each simulation run.

required Source code in src/CompNeuroPy/generate_simulation.py
def __init__(\n    self,\n    name,\n    description,\n    simulation_function,\n    start,\n    end,\n    info,\n    kwargs,\n    monitor_chunk,\n):\n    \"\"\"\n    Initialization of the simulation information object.\n\n    Args:\n        name (str):\n            Name of the simulation.\n        description (str):\n            Description of the simulation.\n        simulation_function (str):\n            Name of the simulation function.\n        start (list):\n            List of start times of the simulation runs.\n        end (list):\n            List of end times of the simulation runs.\n        info (list):\n            List of return values of the simulation function of each simulation run.\n        kwargs (list):\n            List of simulation kwargs of the simulation function of each simulation\n            run.\n        monitor_chunk (list):\n            List of recording chunks of the used CompNeuroMonitors object of each simulation\n            run.\n    \"\"\"\n    self.name = name\n    self.description = description\n    self.simulation_function = simulation_function\n    self.start = start\n    self.end = end\n    self.info = info\n    self.kwargs = kwargs\n    self.monitor_chunk = monitor_chunk\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.SimInfo.get_current_arr","title":"get_current_arr(dt, flat=False)","text":"

Method exclusively for the following simulation functions (built-in CompNeuroPy): - current_step - current_stim - current_ramp Gets the current array (input current value for each time step) of all runs.

Parameters:

Name Type Description Default dt float

Time step size of the simulation.

required flat bool

If True, returns a flattened array. Assumes that all runs are run consecutively without brakes. Default: False, i.e., returns a list of arrays.

False

Returns:

Name Type Description current_arr list of arrays

List of arrays containing the current values for each time step of each run. If flat=True, returns a flattened array.

Source code in src/CompNeuroPy/generate_simulation.py
def get_current_arr(self, dt, flat=False):\n    \"\"\"\n    Method exclusively for the following simulation functions (built-in\n    CompNeuroPy):\n        - current_step\n        - current_stim\n        - current_ramp\n    Gets the current array (input current value for each time step) of all runs.\n\n    Args:\n        dt (float):\n            Time step size of the simulation.\n        flat (bool, optional):\n            If True, returns a flattened array. Assumes that all runs are run\n            consecutively without brakes. Default: False, i.e., returns a list of\n            arrays.\n\n    Returns:\n        current_arr (list of arrays):\n            List of arrays containing the current values for each time step of each\n            run. If flat=True, returns a flattened array.\n    \"\"\"\n    assert (\n        self.simulation_function == \"current_step\"\n        or self.simulation_function == \"current_stim\"\n        or self.simulation_function == \"current_ramp\"\n    ), 'ERROR get_current_arr: Simulation has to be \"current_step\", \"current_stim\" or \"current_ramp\"!'\n\n    if self.simulation_function == \"current_step\":\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            t1 = self.kwargs[run][\"t1\"]\n            t2 = self.kwargs[run][\"t2\"]\n            a1 = self.kwargs[run][\"a1\"]\n            a2 = self.kwargs[run][\"a2\"]\n\n            if t1 > 0 and t2 > 0:\n                current_arr.append(\n                    np.concatenate(\n                        [\n                            np.ones(int(round(t1 / dt))) * a1,\n                            np.ones(int(round(t2 / dt))) * a2,\n                        ]\n                    )\n                )\n            elif t2 > 0:\n                current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n            else:\n                current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n\n    elif self.simulation_function == \"current_stim\":\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            t = self.kwargs[run][\"t\"]\n            a = self.kwargs[run][\"a\"]\n\n            if t > 0:\n                current_arr.append(np.ones(int(round(t / dt))) * a)\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n\n    elif self.simulation_function == \"current_ramp\":\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            amp = self.kwargs[run][\"a0\"]\n            current_arr_ramp = []\n            for stim_idx in range(self.kwargs[run][\"n\"]):\n                t = self.info[run][\"dur_stim\"]\n                a = amp\n                current_arr_ramp.append(np.ones(int(round(t / dt))) * a)\n                amp = amp + self.info[run][\"da\"]\n            current_arr.append(list(np.concatenate(current_arr_ramp)))\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n
"},{"location":"main/model_configurator/","title":"Model Configurator","text":"

Working on it.

"},{"location":"main/monitors_recordings/","title":"Monitors / Recordings","text":""},{"location":"main/monitors_recordings/#create-monitors","title":"Create Monitors","text":"

CompNeuroPy provides a CompNeuroMonitors class that can be used to easily create and control multiple ANNarchy monitors at once. To create a CompNeuroMonitors object, all that is needed is a monitors_dictionary that defines which variables should be recorded for each model component. All populations and projections have to have unique names to work with CompNeuroMonitors. The keys of the monitor_dictionary are the names of the model components (in example below \"my_pop1\" and \"my_pop2\"). The key can also include a recording period (the time between two recordings, given after a \";\"), e.g. record the variables of my_pop1 only every 10 ms would look like this: 'pop;my_pop1;10':['v', 'spike']. The default period is the time step of the simulation for populations and 1000 times the timestep for projections. The values of the monitor_dictionary are lists of all the variables that should be recorded from the corresponding components. The names of components (populations, projections) could be provided by a CompNeuroModel.

"},{"location":"main/monitors_recordings/#example","title":"Example:","text":"

Here the variables v and spike should be recorded of the population with the name \"my_pop1\" and the variable v should be recorded from the population with the name \"my_pop2\":

from CompNeuroPy import CompNeuroMonitors\nmonitor_dictionary = {'my_pop1':['v', 'spike'], 'my_pop2':['v']}\nmon = CompNeuroMonitors(monitor_dictionary)\n

A full example is available in the Examples.

"},{"location":"main/monitors_recordings/#chunks-and-periods","title":"Chunks and periods","text":"

In CompNeuroPy, recordings are divided into so-called chunks and periods. Chunks are simulation sections that are separated by monitor resets (optionally also reset the model). A chunk can consist of several periods. A period represents the time span between the start and pause of a monitor recording. To divide a simulation into chunks and periods, a CompNeuroMonitors object provides the three functions start(), pause() and reset().

At the beginning of a simulation, the monitors do not start automatically which is why the start() function must be called at least once. The start() function can also be used to resume paused recordings. With the function pause() recordings are paused. The function reset() starts a new chunk for the recordings (the end of a chunk is also always the end of a period, i.e. the last period of the corresponding chunk). After calling reset() the monitors remain in their current mode (active or paused). By default reset() also resets the model to the compile status (time = 0) by calling the ANNarchy reset() function and has the same arguments. If the argument model is set to False, the ANNarchy reset() function is not called and only a new chunk is created.

"},{"location":"main/monitors_recordings/#example_1","title":"Example:","text":"
### first chunk, one period\nsimulate(100) # 100 ms not recorded\nmon.start()   # start all monitors\nsimulate(100) # 100 ms recorded\n\n### second chunk, two periods\nmon.reset()   # model reset, beginning of new chunk\nsimulate(100) # 100 ms recorded (monitors were active before reset --> still active)\nmon.pause()   # pause all monitors\nsimulate(100) # 100 ms not recorded\nmon.start()   # start all monitors\nsimulate(100) # 100 ms recorded\n
"},{"location":"main/monitors_recordings/#get-recordings","title":"Get recordings","text":"

The recordings can be obtained from the CompNeuroMonitors object using the get_recordings() function. This returns a list of dictionaries (one for each chunk). The dictionaries contain the recorded data defined with the monitor_dictionary at the CompNeuroMonitors initialization. In the recordings dictionaries the keys have the following structure: \"<component_name>;variable\"; the corresponding dictionary values are the recordings of the respective variable. The dictionaries always contain the time step of the simulation (key = \"dt\"), the periods (time between recorded values) for each component (key = \"<component_name>;period\") and the attributes of each component (key = \"<component_name>;parameter_dict\").

"},{"location":"main/monitors_recordings/#example_2","title":"Example:","text":"
recordings = mon.get_recordings()\ny1 = recordings[0]['my_pop1;v'] ### variable v of my_pop1 from 1st chunk\ny2 = recordings[1]['my_pop1;v'] ### variable v of my_pop1 from 2nd chunk\n
"},{"location":"main/monitors_recordings/#get-recording-times","title":"Get recording times","text":"

In addition to the recordings themselves, recording times can also be obtained from the CompNeuroMonitors object, which is very useful for later analyses. With the function get_recording_times() of the CompNeuroMonitors object a RecordingTimes object can be obtained. From the RecordingTimes object one can get time limits (in ms) and coresponding indizes for the recordings.

"},{"location":"main/monitors_recordings/#example_3","title":"Example:","text":"
recording_times = mon.get_recording_times()\nstart_time = recording_times.time_lims(chunk=1, period=1)[0] ### 200 ms\nstart_idx  = recording_times.idx_lims(chunk=1, period=1)[0]  ### 1000, if dt == 0.1\nend_time   = recording_times.time_lims(chunk=1, period=1)[1] ### 300 ms\nend_idx    = recording_times.idx_lims(chunk=1, period=1)[1]  ### 2000\n

You can combine the recordings of both chunks of the example simulation shown above into a single time array and a single value array using the RecordingTimes object's combine_chunks function:

time_arr, value_arr = recording_times.combine_chunks(recordings, 'my_pop1;v', 'consecutive')\n

"},{"location":"main/monitors_recordings/#plot-recordings","title":"Plot recordings","text":"

To get a quick overview of the recordings, CompNeuroPy provides the PlotRecordings class.

"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors","title":"CompNeuroPy.monitors.CompNeuroMonitors","text":"

Class to bring together ANNarchy monitors into one object.

Source code in src/CompNeuroPy/monitors.py
class CompNeuroMonitors:\n    \"\"\"\n    Class to bring together ANNarchy monitors into one object.\n    \"\"\"\n\n    def __init__(self, mon_dict={}):\n        \"\"\"\n        Initialize CompNeuroMonitors object by creating ANNarchy monitors.\n\n        Args:\n            mon_dict (dict):\n                dict with key=\"compartment_name;period\" where period is optional and\n                val=list with variables to record.\n        \"\"\"\n        self.mon = self._add_monitors(mon_dict)\n        self.mon_dict = mon_dict\n        self._init_internals(init_call=True)\n\n    def _init_internals(self, init_call=False):\n        \"\"\"\n        Initialize the following internal variables:\n            - timings (dict):\n                dict with key=\"pop_name\" for populations and \"proj_name\" for projections\n                for each recorded population and projection and\n                val={\"currently_paused\": True, \"start\": [], \"stop\": []}\n            - recordings (list):\n                list with recordings of all chunks. Set to empty list.\n            - recording_times (list):\n                list with recording times of all chunks. Set to empty list.\n            - already_got_recordings (bool):\n                True if recordings were already requested, False otherwise. Set to\n                False.\n            - already_got_recording_times (bool):\n                True if recording_times were already requested, False otherwise. Set to\n                False.\n            - get_recordings_reset_call (bool):\n                True if get_recordings() and get_recording_times() are called within\n                reset(), False otherwise. Set to False.\n\n        Args:\n            init_call (bool, optional):\n                True if called from __init__(), False otherwise. Default: False.\n        \"\"\"\n        if init_call is False:\n            #### pause all ANNarchy monitors because currently paused will be set to False\n            self.pause()\n\n        ### initialize timings\n        timings = {}\n        for key, val in self.mon_dict.items():\n            _, compartment, _ = self._unpack_mon_dict_keys(key)\n            timings[compartment] = {\"currently_paused\": True, \"start\": [], \"stop\": []}\n        self.timings = timings\n\n        ### initialize recordings and recording_times etc.\n        self.recordings = []\n        self.recording_times = []\n        self.already_got_recordings = False\n        self.already_got_recording_times = False\n        self.get_recordings_reset_call = False\n\n    @check_types()\n    def start(self, compartment_list: list | None = None):\n        \"\"\"\n        Start or resume recording of all recorded compartments in compartment_list.\n\n        Args:\n            compartment_list (list, optional):\n                List with compartment names to start or resume recording. Default: None,\n                i.e., all compartments of initialized mon_dict are started or resumed.\n        \"\"\"\n        if compartment_list == None:\n            mon_dict_key_list = list(self.mon_dict.keys())\n            compartment_list = [\n                self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n            ]\n\n        self.timings = self._start_monitors(compartment_list, self.mon, self.timings)\n\n    @check_types()\n    def pause(self, compartment_list: list | None = None):\n        \"\"\"\n        Pause recording of all recorded compartments in compartment_list.\n\n        Args:\n            compartment_list (list, optional):\n                List with compartment names to pause recording. Default: None,\n                i.e., all compartments of initialized mon_dict are paused.\n        \"\"\"\n        if compartment_list == None:\n            mon_dict_key_list = list(self.mon_dict.keys())\n            compartment_list = [\n                self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n            ]\n\n        self.timings = self._pause_monitors(compartment_list, self.mon, self.timings)\n\n    def reset(\n        self,\n        populations=True,\n        projections=False,\n        synapses=False,\n        monitors=True,\n        model=True,\n        parameters=True,\n        net_id=0,\n    ):\n        \"\"\"\n        Create a new recording chunk by getting recordings and recording times of the\n        current chunk and optionally resetting the model. Recordings are automatically\n        resumed in the new chunk if they are not paused.\n\n        Args:\n            populations (bool, optional):\n                If True, reset populations. Default: True.\n            projections (bool, optional):\n                If True, reset projections. Default: False.\n            synapses (bool, optional):\n                If True, reset synapses. Default: False.\n            monitors (bool, optional):\n                If True, reset ANNarchy monitors. Default: True.\n            model (bool, optional):\n                If True, reset model. Default: True.\n            parameters (bool, optional):\n                If True, reset the parameters of popilations and projections. Default:\n                True.\n            net_id (int, optional):\n                Id of the network to reset. Default: 0.\n        \"\"\"\n        ### TODO rename this function to new_chunk() or something like that and let\n        ### recordings and recording times be returned\n        self.get_recordings_reset_call = True\n        self.get_recordings()\n        self.get_recording_times()\n        self.get_recordings_reset_call = False\n        self.already_got_recordings = (\n            False  # after reset one can still update recordings\n        )\n        self.already_got_recording_times = (\n            False  # after reset one can still update recording_times\n        )\n\n        ### reset timings, after reset, add a zero to start if the monitor is still\n        ### running (this is not resetted by reset())\n        ### if the model was not resetted --> do add current time instead of zero\n        for key in self.timings.keys():\n            self.timings[key][\"start\"] = []\n            self.timings[key][\"stop\"] = []\n            if self.timings[key][\"currently_paused\"] == False:\n                if model:\n                    self.timings[key][\"start\"].append(0)\n                else:\n                    self.timings[key][\"start\"].append(\n                        np.round(get_time(), af.get_number_of_decimals(dt()))\n                    )\n\n        ### reset model\n        if model:\n            if parameters is False:\n                ### if parameters=False, get parameters before reset and set them after\n                ### reset\n                parameters_dict = mf._get_all_parameters()\n            reset(populations, projections, synapses, monitors, net_id=net_id)\n            if parameters is False:\n                ### if parameters=False, set parameters after reset\n                mf._set_all_parameters(parameters_dict)\n\n    def current_chunk(self):\n        \"\"\"\n        Get the index of the current chunk.\n\n        Returns:\n            current_chunk_idx (int):\n                Index of the current chunk. If no recordings are currently active,\n                returns None.\n        \"\"\"\n        ### if recordings are currently active --> return chunk in which these recordings will be saved\n        ### check if there are currently active recordings\n        active_recordings = False\n        for key, val in self.mon_dict.items():\n            _, compartment, _ = self._unpack_mon_dict_keys(key)\n            if not (self.timings[compartment][\"currently_paused\"]):\n                ### tere are currently active recordings\n                active_recordings = True\n\n        if active_recordings:\n            current_chunk_idx = len(self.recordings)\n            return current_chunk_idx\n        else:\n            ### if currently no recordings are active return None\n            return None\n\n    def get_recordings(self) -> list[dict]:\n        \"\"\"\n        Get recordings of all recorded compartments.\n\n        Returns:\n            recordings (list):\n                List with recordings of all chunks.\n        \"\"\"\n        ### only if recordings in current chunk and get_recodings was not already called add current chunk to recordings\n        if (\n            self._any_recordings_in_current_chunk()\n            and self.already_got_recordings is False\n        ):\n            ### update recordings\n            self.recordings.append(self._get_monitors(self.mon_dict, self.mon))\n            ### upade already_got_recordings --> it will not update recordings again\n            self.already_got_recordings = True\n\n            if not (self.get_recordings_reset_call):\n                if len(self.recordings) == 0:\n                    print(\n                        \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                    )\n            return self.recordings\n        else:\n            if not (self.get_recordings_reset_call):\n                if len(self.recordings) == 0:\n                    print(\n                        \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                    )\n            return self.recordings\n\n    def get_recording_times(self):\n        \"\"\"\n        Get recording times of all recorded compartments.\n\n        Returns:\n            recording_times (recording_times_cl):\n                Object with recording times of all chunks.\n        \"\"\"\n\n        temp_timings = self._get_temp_timings()\n\n        ### only append temp_timings of current chunk if there are recordings in current chunk at all and if get_recordings was not already called (double call would add the same chunk again)\n        if (\n            self._any_recordings_in_current_chunk()\n            and self.already_got_recording_times is False\n        ):\n            self.recording_times.append(temp_timings)\n\n        ### upade already_got_recording_times --> it will not update recording_times again\n        self.already_got_recording_times = True\n\n        ### generate a object from recording_times and return this instead of the dict\n        recording_times_ob = RecordingTimes(self.recording_times)\n\n        if not (self.get_recordings_reset_call):\n            if len(self.recording_times) == 0:\n                print(\n                    \"WARNING get_recording_times: no recordings available, empty list returned. Maybe forgot start()?\"\n                )\n        return recording_times_ob\n\n    def get_recordings_and_clear(self):\n        \"\"\"\n        The default get_recordings method should be called at the end of the simulation.\n        The get_recordings_and_clear method allows to get several times recordings with\n        the same monitor object and to simulate between the calls. Sets the internal\n        variables back to their initial state. Usefull if you repeat a simulation +\n        recording several times and you do not want to always create new chunks.\n\n        !!! warning\n            If you want to continue recording after calling this method, you have to\n            call start() again.\n\n        Returns:\n            recordings (list):\n                List with recordings of all chunks.\n            recording_times (recording_times_cl):\n                Object with recording times of all chunks.\n        \"\"\"\n        ret0 = self.get_recordings()\n        ret1 = self.get_recording_times()\n        self._init_internals()\n        ret = (ret0, ret1)\n        return ret\n\n    def _correct_start_stop(self, start_time_arr, stop_time_arr, period):\n        \"\"\"\n        Corrects the start and stop times of recordings to the actual start and stop\n        times of recorded values.\n\n        Args:\n            start_time_arr (np.array):\n                Array with start times of recordings, obtained with get_time() function\n                of ANNarchy.\n            stop_time_arr (np.array):\n                Array with stop times of recordings, obtained with get_time() function\n                of ANNarchy.\n            period (float):\n                Time difference between recording values specified by the user.\n\n        Returns:\n            actual_start_time (np.array):\n                Array with actual start times of recorded values.\n            actual_stop_time (np.array):\n                Array with actual stop times of recorded values.\n            nr_rec_vals (np.array):\n                Array with number of recorded values between start and stop.\n        \"\"\"\n        # actual_period = int(period / dt()) * dt()\n        actual_start_time = np.ceil(start_time_arr / period) * period\n\n        actual_stop_time = np.ceil(stop_time_arr / period - 1) * period\n\n        nr_rec_vals = 1 + (actual_stop_time - actual_start_time) / period\n\n        return (actual_start_time, actual_stop_time, nr_rec_vals)\n\n    def _get_temp_timings(self):\n        \"\"\"\n        Generates a timings dictionary with time lims and idx lims for each compartment.\n        Calculates the idx lims of the recordings based on the time lims.\n\n        Returns:\n            temp_timings (dict):\n                Dict with time lims and idx lims for each compartment.\n        \"\"\"\n        temp_timings = {}\n        for key in self.mon_dict.keys():\n            _, compartment, period = self._unpack_mon_dict_keys(key)\n            if len(self.timings[compartment][\"start\"]) > len(\n                self.timings[compartment][\"stop\"]\n            ):\n                ### was started/resumed but never stoped after --> use current time for stop time\n                self.timings[compartment][\"stop\"].append(get_time())\n            ### calculate the idx of the recorded arrays which correspond to the timings and remove 'currently_paused'\n            ### get for each start-stop pair the corrected start stop timings (when teh values were actually recorded, depends on period and timestep)\n            ### and also get the number of recorded values for start-stop pair\n            start_time_arr = np.array(self.timings[compartment][\"start\"])\n            stop_time_arr = np.array(self.timings[compartment][\"stop\"])\n            (\n                start_time_arr,\n                stop_time_arr,\n                nr_rec_vals_arr,\n            ) = self._correct_start_stop(start_time_arr, stop_time_arr, period)\n\n            ### with the number of recorded values -> get start and end idx for each start-stop pair\n            start_idx = [\n                np.sum(nr_rec_vals_arr[0:i]).astype(int)\n                for i in range(nr_rec_vals_arr.size)\n            ]\n            stop_idx = [\n                np.sum(nr_rec_vals_arr[0 : i + 1]).astype(int) - 1\n                for i in range(nr_rec_vals_arr.size)\n            ]\n\n            ### return start-stop pair info in timings format\n            temp_timings[compartment] = {\n                \"start\": {\n                    \"ms\": np.round(\n                        start_time_arr, af.get_number_of_decimals(dt())\n                    ).tolist(),\n                    \"idx\": start_idx,\n                },\n                \"stop\": {\n                    \"ms\": np.round(\n                        stop_time_arr, af.get_number_of_decimals(dt())\n                    ).tolist(),\n                    \"idx\": stop_idx,\n                },\n            }\n        return temp_timings\n\n    def _any_recordings_in_current_chunk(self):\n        \"\"\"\n        Check if there are any recordings in the current chunk.\n\n        Returns:\n            any_recordings (bool):\n                True if there are any recordings in the current chunk, False otherwise.\n        \"\"\"\n        temp_timings = self._get_temp_timings()\n\n        ### generate a temp object of temp timings to check if there were recordings at all\n        recording_times_ob_temp = RecordingTimes([temp_timings])\n        return recording_times_ob_temp._any_recordings(chunk=0)\n\n    def _add_monitors(self, mon_dict: dict):\n        \"\"\"\n        Generate monitors defined by mon_dict.\n\n        Args:\n            mon_dict (dict):\n                dict with key=\"compartment_name;period\" where period is optional and\n                val=list with variables to record.\n\n        Returns:\n            mon (dict):\n                dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n        \"\"\"\n        mon = {}\n        for key, val in mon_dict.items():\n            compartmentType, compartment, period = self._unpack_mon_dict_keys(\n                key, warning=True\n            )\n            ### check if compartment is pop\n            if compartmentType == \"pop\":\n                mon[compartment] = Monitor(\n                    get_population(compartment), val, start=False, period=period\n                )\n            ### check if compartment is proj\n            if compartmentType == \"proj\":\n                mon[compartment] = Monitor(\n                    get_projection(compartment), val, start=False, period=period\n                )\n        return mon\n\n    def _start_monitors(self, compartment_list, mon, timings=None):\n        \"\"\"\n        Starts or resumes monitores defined by compartment_list.\n\n        Args:\n            compartment_list (list):\n                List with compartment names to start or resume recording.\n            mon (dict):\n                Dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n            timings (dict, optional):\n                timings variable of the CompNeuroMonitors object. Default: None.\n\n        Returns:\n            timings (dict):\n                timings variable of the CompNeuroMonitors object.\n        \"\"\"\n        ### for each compartment generate started variable (because compartments can ocure multiple times if multiple variables of them are recorded --> do not start same monitor multiple times)\n        started = {}\n        for compartment_name in compartment_list:\n            started[compartment_name] = False\n\n        if timings == None:\n            ### information about pauses not available, just start\n            for compartment_name in compartment_list:\n                if started[compartment_name] == False:\n                    mon[compartment_name].start()\n                    print(\"start\", compartment_name)\n                    started[compartment_name] = True\n            return None\n        else:\n            ### information about pauses available, start if not paused, resume if paused\n            for compartment_name in compartment_list:\n                if started[compartment_name] == False:\n                    if timings[compartment_name][\"currently_paused\"]:\n                        if len(timings[compartment_name][\"start\"]) > 0:\n                            ### resume\n                            mon[compartment_name].resume()\n                        else:\n                            ### initial start\n                            mon[compartment_name].start()\n                    started[compartment_name] = True\n                    ### update currently_paused\n                    timings[compartment_name][\"currently_paused\"] = False\n                    ### never make start longer than stop+1!... this can be caused if start is called multiple times without pause in between\n                    if len(timings[compartment_name][\"start\"]) <= len(\n                        timings[compartment_name][\"stop\"]\n                    ):\n                        timings[compartment_name][\"start\"].append(get_time())\n            return timings\n\n    def _pause_monitors(self, compartment_list, mon, timings=None):\n        \"\"\"\n        Pause monitores defined by compartment_list.\n\n        Args:\n            compartment_list (list):\n                List with compartment names to pause recording.\n            mon (dict):\n                Dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n            timings (dict, optional):\n                timings variable of the CompNeuroMonitors object. Default: None.\n\n        Returns:\n            timings (dict):\n                timings variable of the CompNeuroMonitors object.\n        \"\"\"\n        ### for each compartment generate paused variable (because compartments can ocure multiple times if multiple variables of them are recorded --> do not pause same monitor multiple times)\n        paused = {}\n        for compartment_name in compartment_list:\n            paused[compartment_name] = False\n\n        for compartment_name in compartment_list:\n            if paused[compartment_name] == False:\n                mon[compartment_name].pause()\n                paused[compartment_name] = True\n\n        if timings != None:\n            ### information about pauses is available, update it\n            for key, val in paused.items():\n                timings[key][\"currently_paused\"] = True\n                ### never make pause longer than start, this can be caused if pause is called multiple times without start in between\n                if len(timings[key][\"stop\"]) < len(timings[key][\"start\"]):\n                    timings[key][\"stop\"].append(get_time())\n                ### if pause is directly called after start --> start == stop --> remove these entries, this is no actual period\n                if (\n                    len(timings[key][\"stop\"]) == len(timings[key][\"start\"])\n                    and timings[key][\"stop\"][-1] == timings[key][\"start\"][-1]\n                ):\n                    timings[key][\"stop\"] = timings[key][\"stop\"][:-1]\n                    timings[key][\"start\"] = timings[key][\"start\"][:-1]\n            return timings\n        else:\n            return None\n\n    def _get_monitors(self, mon_dict, mon):\n        \"\"\"\n        Get recorded values from ANNarchy monitors defined by mon_dict.\n\n        Args:\n            mon_dict (dict):\n                dict with key=\"compartment_name;period\" where period is optional and\n                val=list with variables to record.\n            mon (dict):\n                Dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n\n        Returns:\n            recordings (dict):\n                Dict with key=\"compartment_name;variable\" and val=list with recorded\n                values.\n        \"\"\"\n        recordings = {}\n        for key, val in mon_dict.items():\n            compartment_type, compartment, period = self._unpack_mon_dict_keys(key)\n            recordings[f\"{compartment};period\"] = period\n            if compartment_type == \"pop\":\n                pop = get_population(compartment)\n                parameter_dict = {\n                    param_name: getattr(pop, param_name)\n                    for param_name in pop.parameters\n                }\n                recordings[f\"{compartment};parameter_dict\"] = parameter_dict\n            if compartment_type == \"proj\":\n                proj = get_projection(compartment)\n                parameter_dict = {\n                    param_name: getattr(proj, param_name)\n                    for param_name in proj.parameters\n                }\n                recordings[f\"{compartment};parameters\"] = parameter_dict\n            for val_val in val:\n                temp = mon[compartment].get(val_val)\n                recordings[f\"{compartment};{val_val}\"] = temp\n        recordings[\"dt\"] = dt()\n        return recordings\n\n    def _unpack_mon_dict_keys(self, s: str, warning: bool = False):\n        \"\"\"\n        Unpacks a string of the form \"compartment_name;period\" or\n        \"compartment_name\" into its components. If period is not provided\n        it is set to dt() for populations and dt()*1000 for projections.\n\n        Args:\n            s (str):\n                String to be unpacked\n            warning (bool, optional):\n                If True, print warning if period is not provided for projections.\n\n        Returns:\n            compartment_type (str):\n                Compartment type\n            compartment_name (str):\n                Compartment name\n            period (float):\n                Period of the compartment\n        \"\"\"\n        ### split string\n        splitted_s = s.split(\";\")\n\n        ### get name\n        compartment_name = splitted_s[0]\n\n        ### get type\n        pop_list = [pop.name for pop in populations()]\n        proj_list = [proj.name for proj in projections()]\n        if compartment_name in pop_list and compartment_name in proj_list:\n            ### raise error because name is in both lists\n            print(\n                \"ERROR CompNeuroMonitors._unpack_mon_dict_keys(): compartment_name is both populaiton and projection\"\n            )\n            quit()\n        elif compartment_name in pop_list:\n            compartment_type = \"pop\"\n        elif compartment_name in proj_list:\n            compartment_type = \"proj\"\n\n        ### get period\n        if len(splitted_s) == 2:\n            period = float(splitted_s[1])\n        else:\n            period = {\"pop\": dt(), \"proj\": dt() * 1000}[compartment_type]\n            ### print warning for compartment_type proj\n            if compartment_type == \"proj\" and warning:\n                print(\n                    f\"WARNING CompNeuroMonitors: no period provided for projection {compartment_name}, period set to {period} ms\"\n                )\n        period = round(period / dt()) * dt()\n\n        return compartment_type, compartment_name, period\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.__init__","title":"__init__(mon_dict={})","text":"

Initialize CompNeuroMonitors object by creating ANNarchy monitors.

Parameters:

Name Type Description Default mon_dict dict

dict with key=\"compartment_name;period\" where period is optional and val=list with variables to record.

{} Source code in src/CompNeuroPy/monitors.py
def __init__(self, mon_dict={}):\n    \"\"\"\n    Initialize CompNeuroMonitors object by creating ANNarchy monitors.\n\n    Args:\n        mon_dict (dict):\n            dict with key=\"compartment_name;period\" where period is optional and\n            val=list with variables to record.\n    \"\"\"\n    self.mon = self._add_monitors(mon_dict)\n    self.mon_dict = mon_dict\n    self._init_internals(init_call=True)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.start","title":"start(compartment_list=None)","text":"

Start or resume recording of all recorded compartments in compartment_list.

Parameters:

Name Type Description Default compartment_list list

List with compartment names to start or resume recording. Default: None, i.e., all compartments of initialized mon_dict are started or resumed.

None Source code in src/CompNeuroPy/monitors.py
@check_types()\ndef start(self, compartment_list: list | None = None):\n    \"\"\"\n    Start or resume recording of all recorded compartments in compartment_list.\n\n    Args:\n        compartment_list (list, optional):\n            List with compartment names to start or resume recording. Default: None,\n            i.e., all compartments of initialized mon_dict are started or resumed.\n    \"\"\"\n    if compartment_list == None:\n        mon_dict_key_list = list(self.mon_dict.keys())\n        compartment_list = [\n            self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n        ]\n\n    self.timings = self._start_monitors(compartment_list, self.mon, self.timings)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.pause","title":"pause(compartment_list=None)","text":"

Pause recording of all recorded compartments in compartment_list.

Parameters:

Name Type Description Default compartment_list list

List with compartment names to pause recording. Default: None, i.e., all compartments of initialized mon_dict are paused.

None Source code in src/CompNeuroPy/monitors.py
@check_types()\ndef pause(self, compartment_list: list | None = None):\n    \"\"\"\n    Pause recording of all recorded compartments in compartment_list.\n\n    Args:\n        compartment_list (list, optional):\n            List with compartment names to pause recording. Default: None,\n            i.e., all compartments of initialized mon_dict are paused.\n    \"\"\"\n    if compartment_list == None:\n        mon_dict_key_list = list(self.mon_dict.keys())\n        compartment_list = [\n            self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n        ]\n\n    self.timings = self._pause_monitors(compartment_list, self.mon, self.timings)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.reset","title":"reset(populations=True, projections=False, synapses=False, monitors=True, model=True, parameters=True, net_id=0)","text":"

Create a new recording chunk by getting recordings and recording times of the current chunk and optionally resetting the model. Recordings are automatically resumed in the new chunk if they are not paused.

Parameters:

Name Type Description Default populations bool

If True, reset populations. Default: True.

True projections bool

If True, reset projections. Default: False.

False synapses bool

If True, reset synapses. Default: False.

False monitors bool

If True, reset ANNarchy monitors. Default: True.

True model bool

If True, reset model. Default: True.

True parameters bool

If True, reset the parameters of popilations and projections. Default: True.

True net_id int

Id of the network to reset. Default: 0.

0 Source code in src/CompNeuroPy/monitors.py
def reset(\n    self,\n    populations=True,\n    projections=False,\n    synapses=False,\n    monitors=True,\n    model=True,\n    parameters=True,\n    net_id=0,\n):\n    \"\"\"\n    Create a new recording chunk by getting recordings and recording times of the\n    current chunk and optionally resetting the model. Recordings are automatically\n    resumed in the new chunk if they are not paused.\n\n    Args:\n        populations (bool, optional):\n            If True, reset populations. Default: True.\n        projections (bool, optional):\n            If True, reset projections. Default: False.\n        synapses (bool, optional):\n            If True, reset synapses. Default: False.\n        monitors (bool, optional):\n            If True, reset ANNarchy monitors. Default: True.\n        model (bool, optional):\n            If True, reset model. Default: True.\n        parameters (bool, optional):\n            If True, reset the parameters of popilations and projections. Default:\n            True.\n        net_id (int, optional):\n            Id of the network to reset. Default: 0.\n    \"\"\"\n    ### TODO rename this function to new_chunk() or something like that and let\n    ### recordings and recording times be returned\n    self.get_recordings_reset_call = True\n    self.get_recordings()\n    self.get_recording_times()\n    self.get_recordings_reset_call = False\n    self.already_got_recordings = (\n        False  # after reset one can still update recordings\n    )\n    self.already_got_recording_times = (\n        False  # after reset one can still update recording_times\n    )\n\n    ### reset timings, after reset, add a zero to start if the monitor is still\n    ### running (this is not resetted by reset())\n    ### if the model was not resetted --> do add current time instead of zero\n    for key in self.timings.keys():\n        self.timings[key][\"start\"] = []\n        self.timings[key][\"stop\"] = []\n        if self.timings[key][\"currently_paused\"] == False:\n            if model:\n                self.timings[key][\"start\"].append(0)\n            else:\n                self.timings[key][\"start\"].append(\n                    np.round(get_time(), af.get_number_of_decimals(dt()))\n                )\n\n    ### reset model\n    if model:\n        if parameters is False:\n            ### if parameters=False, get parameters before reset and set them after\n            ### reset\n            parameters_dict = mf._get_all_parameters()\n        reset(populations, projections, synapses, monitors, net_id=net_id)\n        if parameters is False:\n            ### if parameters=False, set parameters after reset\n            mf._set_all_parameters(parameters_dict)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.current_chunk","title":"current_chunk()","text":"

Get the index of the current chunk.

Returns:

Name Type Description current_chunk_idx int

Index of the current chunk. If no recordings are currently active, returns None.

Source code in src/CompNeuroPy/monitors.py
def current_chunk(self):\n    \"\"\"\n    Get the index of the current chunk.\n\n    Returns:\n        current_chunk_idx (int):\n            Index of the current chunk. If no recordings are currently active,\n            returns None.\n    \"\"\"\n    ### if recordings are currently active --> return chunk in which these recordings will be saved\n    ### check if there are currently active recordings\n    active_recordings = False\n    for key, val in self.mon_dict.items():\n        _, compartment, _ = self._unpack_mon_dict_keys(key)\n        if not (self.timings[compartment][\"currently_paused\"]):\n            ### tere are currently active recordings\n            active_recordings = True\n\n    if active_recordings:\n        current_chunk_idx = len(self.recordings)\n        return current_chunk_idx\n    else:\n        ### if currently no recordings are active return None\n        return None\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.get_recordings","title":"get_recordings()","text":"

Get recordings of all recorded compartments.

Returns:

Name Type Description recordings list

List with recordings of all chunks.

Source code in src/CompNeuroPy/monitors.py
def get_recordings(self) -> list[dict]:\n    \"\"\"\n    Get recordings of all recorded compartments.\n\n    Returns:\n        recordings (list):\n            List with recordings of all chunks.\n    \"\"\"\n    ### only if recordings in current chunk and get_recodings was not already called add current chunk to recordings\n    if (\n        self._any_recordings_in_current_chunk()\n        and self.already_got_recordings is False\n    ):\n        ### update recordings\n        self.recordings.append(self._get_monitors(self.mon_dict, self.mon))\n        ### upade already_got_recordings --> it will not update recordings again\n        self.already_got_recordings = True\n\n        if not (self.get_recordings_reset_call):\n            if len(self.recordings) == 0:\n                print(\n                    \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                )\n        return self.recordings\n    else:\n        if not (self.get_recordings_reset_call):\n            if len(self.recordings) == 0:\n                print(\n                    \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                )\n        return self.recordings\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.get_recording_times","title":"get_recording_times()","text":"

Get recording times of all recorded compartments.

Returns:

Name Type Description recording_times recording_times_cl

Object with recording times of all chunks.

Source code in src/CompNeuroPy/monitors.py
def get_recording_times(self):\n    \"\"\"\n    Get recording times of all recorded compartments.\n\n    Returns:\n        recording_times (recording_times_cl):\n            Object with recording times of all chunks.\n    \"\"\"\n\n    temp_timings = self._get_temp_timings()\n\n    ### only append temp_timings of current chunk if there are recordings in current chunk at all and if get_recordings was not already called (double call would add the same chunk again)\n    if (\n        self._any_recordings_in_current_chunk()\n        and self.already_got_recording_times is False\n    ):\n        self.recording_times.append(temp_timings)\n\n    ### upade already_got_recording_times --> it will not update recording_times again\n    self.already_got_recording_times = True\n\n    ### generate a object from recording_times and return this instead of the dict\n    recording_times_ob = RecordingTimes(self.recording_times)\n\n    if not (self.get_recordings_reset_call):\n        if len(self.recording_times) == 0:\n            print(\n                \"WARNING get_recording_times: no recordings available, empty list returned. Maybe forgot start()?\"\n            )\n    return recording_times_ob\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.get_recordings_and_clear","title":"get_recordings_and_clear()","text":"

The default get_recordings method should be called at the end of the simulation. The get_recordings_and_clear method allows to get several times recordings with the same monitor object and to simulate between the calls. Sets the internal variables back to their initial state. Usefull if you repeat a simulation + recording several times and you do not want to always create new chunks.

Warning

If you want to continue recording after calling this method, you have to call start() again.

Returns:

Name Type Description recordings list

List with recordings of all chunks.

recording_times recording_times_cl

Object with recording times of all chunks.

Source code in src/CompNeuroPy/monitors.py
def get_recordings_and_clear(self):\n    \"\"\"\n    The default get_recordings method should be called at the end of the simulation.\n    The get_recordings_and_clear method allows to get several times recordings with\n    the same monitor object and to simulate between the calls. Sets the internal\n    variables back to their initial state. Usefull if you repeat a simulation +\n    recording several times and you do not want to always create new chunks.\n\n    !!! warning\n        If you want to continue recording after calling this method, you have to\n        call start() again.\n\n    Returns:\n        recordings (list):\n            List with recordings of all chunks.\n        recording_times (recording_times_cl):\n            Object with recording times of all chunks.\n    \"\"\"\n    ret0 = self.get_recordings()\n    ret1 = self.get_recording_times()\n    self._init_internals()\n    ret = (ret0, ret1)\n    return ret\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes","title":"CompNeuroPy.monitors.RecordingTimes","text":"Source code in src/CompNeuroPy/monitors.py
class RecordingTimes:\n    def __init__(self, recording_times_list):\n        \"\"\"\n        Initialize RecordingTimes object.\n\n        Args:\n            recording_times_list (list):\n                List with recording times of all chunks.\n        \"\"\"\n        self.recording_times_list = recording_times_list\n\n    def time_lims(\n        self,\n        chunk: int | None = None,\n        compartment: str | None = None,\n        period: int | None = None,\n    ):\n        \"\"\"\n        Get the time limits recordings of of a specified chunk/model compartment in ms.\n\n        chunk (int, optional):\n            Index of the chunk. Default: None, i.e., first chunk.\n        compartment (str, optional):\n            Name of the compartment. Default: None, i.e., first model compartment from\n            monitor.\n        period (int, optional):\n            Index of the period. Default: None, i.e., all periods.\n\n        Returns:\n            lims (tuple):\n                Tuple with start and stop time of the specified chunk/model compartment.\n        \"\"\"\n        assert (\n            len(self.recording_times_list) > 0\n        ), \"ERROR time_lims(): No recordings/recording_times available.\"\n        return self._lims(\"ms\", chunk, compartment, period)\n\n    def idx_lims(\n        self,\n        chunk: int | None = None,\n        compartment: str | None = None,\n        period: int | None = None,\n    ):\n        \"\"\"\n        Get the index limits of recordings of a specified chunk/model compartment.\n\n        chunk (int, optional):\n            Index of the chunk. Default: None, i.e., first chunk.\n        compartment (str, optional):\n            Name of the compartment. Default: None, i.e., first model compartment from\n            monitor.\n        period (int, optional):\n            Index of the period. Default: None, i.e., all periods.\n\n        Returns:\n            lims (tuple):\n                Tuple with start and stop index of the specified chunk/model\n                compartment.\n        \"\"\"\n        assert (\n            len(self.recording_times_list) > 0\n        ), \"ERROR idx_lims(): No recordings/recording_times available.\"\n        return self._lims(\"idx\", chunk, compartment, period)\n\n    def all(self):\n        \"\"\"\n        Get the recording times of all chunks, compartments, periods in ms and index.\n\n        Returns:\n            recording_times_list (list):\n                List with recording times of all chunks.\n        \"\"\"\n        return self.recording_times_list\n\n    def nr_periods(self, chunk=None, compartment=None):\n        \"\"\"\n        Get the number of recording periods (start-pause) of a specified chunk/model\n        compartment.\n\n        Args:\n            chunk (int, optional):\n                Index of the chunk. Default: None, i.e., first chunk.\n            compartment (str, optional):\n                Name of the compartment. Default: None, i.e., first model compartment\n                from monitor.\n\n        Returns:\n            nr_periods (int):\n                Number of recording periods (start-pause) of a specified chunk/model\n                compartment.\n        \"\"\"\n        chunk = self._check_chunk(chunk)\n        compartment = self.__check_compartment__(compartment, chunk)\n        return self._get_nr_periods(chunk, compartment)\n\n    def combine_chunks(\n        self, recordings: list, recording_data_str: str, mode=\"sequential\"\n    ):\n        \"\"\"\n        Combines the data of all chunks of recordings, only possible if no pauses in\n        between.\n\n        Args:\n            recordings (list):\n                List with recordings of all chunks.\n            recording_data_str (str):\n                String specifying the compartment name and the variable to combine.\n                Format: \"compartment_name;variable_name\"\n            mode (str, optional):\n                How should the time array be generated. Can be \"sequential\" or\n                \"consecutive\". Default: \"sequential\".\n                - \"sequential\": each chunk starts at zero e.g.: [0,100] + [0,250] -->\n                    [0, 1, ..., 100, 0, 1, ..., 250]\n                - \"consecutive\": each chunk starts at the last stop time of the previous\n                    chunk e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 101, 102, ..., 350]\n\n        Returns:\n            time_arr (np.array):\n                Array with time values in ms.\n            data_arr (np.array):\n                Array with the recorded variable.\n        \"\"\"\n        assert (\n            len(self.recording_times_list) > 0\n        ), \"ERROR combine_chunks(): No recordings/recording_times available.\"\n\n        compartment = recording_data_str.split(\";\")[0]\n        period_time = recordings[0][f\"{compartment};period\"]\n        time_step = recordings[0][\"dt\"]\n        nr_chunks = self._get_nr_chunks()\n        data_list = []\n        time_list = []\n        pre_chunk_start_time = 0\n\n        for chunk in range(nr_chunks):\n            ### append data list with data of all periods of this chunk\n            data_list.append(recordings[chunk][recording_data_str])\n\n            ### nr of periods in this chunk\n            nr_periods = self._get_nr_periods(chunk, compartment)\n\n            ### start time of chunk depends on mode\n            if mode == \"sequential\":\n                chunk_start_time = 0\n            elif mode == \"consecutive\":\n                if chunk == 0:\n                    chunk_start_time = 0\n                else:\n                    last_stop_time = self.recording_times_list[chunk - 1][compartment][\n                        \"stop\"\n                    ][\"ms\"][-1]\n                    chunk_start_time = (\n                        pre_chunk_start_time + last_stop_time + period_time\n                    )\n                    pre_chunk_start_time = chunk_start_time\n            else:\n                print(\"ERROR recording_times.combine_data, Wrong mode.\")\n                quit()\n\n            ### append the time list with all times of the periods\n            for period in range(nr_periods):\n                start_time = (\n                    self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                        0\n                    ]\n                    + chunk_start_time\n                )\n                end_time = (\n                    self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                        1\n                    ]\n                    + chunk_start_time\n                )\n                start_time = round(start_time, af.get_number_of_decimals(time_step))\n                end_time = round(end_time, af.get_number_of_decimals(time_step))\n                times = np.arange(start_time, end_time + period_time, period_time)\n                time_list.append(times)\n\n        ### flatten the two lists\n        data_arr = np.concatenate(data_list, 0)\n        time_arr = np.concatenate(time_list, 0)\n\n        ### check if there are gaps in the time array\n        ### fill them with the corersponding times and\n        ### the data array with nan values\n        time_arr, data_arr = af.time_data_add_nan(\n            time_arr,\n            data_arr,\n            fill_time_step=period_time,\n        )\n\n        return time_arr, data_arr\n\n    def _lims(self, string, chunk=None, compartment=None, period=None):\n        \"\"\"\n        Get the limits of recordings of a specified chunk/model compartment.\n\n        Args:\n            string (str):\n                String specifying the type of limits to return. Can be \"ms\" for time\n                limits in ms or \"idx\" for index limits.\n            chunk (int, optional):\n                Index of the chunk. Default: None, i.e., first chunk.\n            compartment (str, optional):\n                Name of the compartment. Default: None, i.e., first model compartment\n                from monitor.\n            period (int, optional):\n                Index of the period. Default: None, i.e., all periods.\n\n        Returns:\n            lims (tuple):\n                Tuple with start and stop time/index of the specified chunk/model\n                compartment.\n        \"\"\"\n\n        chunk = self._check_chunk(chunk)\n        compartment = self.__check_compartment__(compartment, chunk)\n        period_0, period_1 = self._check_period(period, chunk, compartment)\n        lims = (\n            self.recording_times_list[chunk][compartment][\"start\"][string][period_0],\n            self.recording_times_list[chunk][compartment][\"stop\"][string][period_1],\n        )\n        return lims\n\n    def __check_compartment__(self, compartment, chunk):\n        if compartment == None:\n            ### by default just use the first compartment\n            compartment = list(self.recording_times_list[chunk].keys())[0]\n        elif compartment in list(self.recording_times_list[chunk].keys()):\n            compartment = compartment\n        else:\n            print(\n                'ERROR recording_times, given compartment \"'\n                + str(compartment)\n                + '\" not available'\n            )\n            quit()\n\n        return compartment\n\n    def _check_period(self, period, chunk, compartment):\n        \"\"\"\n        Check if period is given.\n\n        Args:\n            period (int, optional):\n                Index of the period. Default: None, i.e., all periods.\n            chunk (int):\n                Index of the chunk.\n            compartment (str):\n                Name of the compartment.\n\n        Returns:\n            period_0 (int):\n                Index of the first period.\n            period_1 (int):\n                Index of the last period. If perios is given, period_0 == period_1.\n        \"\"\"\n        if period == None:\n            ### by default use all periods\n            period_0 = 0\n            period_1 = (\n                len(self.recording_times_list[chunk][compartment][\"start\"][\"idx\"]) - 1\n            )\n        elif period < len(\n            self.recording_times_list[chunk][compartment][\"start\"][\"idx\"]\n        ):\n            period_0 = period\n            period_1 = period\n        else:\n            print(\"ERROR recording_times, given period not available\")\n            quit()\n\n        return period_0, period_1\n\n    def _check_chunk(self, chunk):\n        \"\"\"\n        Check if chunk is given.\n\n        Args:\n            chunk (int, optional):\n                Index of the chunk. Default: None, i.e., first chunk.\n\n        Returns:\n            chunk (int):\n                Index of the chunk.\n        \"\"\"\n        if chunk is None:\n            ### by default use first chunk\n            chunk = 0\n        elif chunk < self._get_nr_chunks():\n            chunk = chunk\n        else:\n            print(\"ERROR recording_times, given chunk not available\")\n            quit()\n\n        return chunk\n\n    def _get_nr_chunks(self):\n        \"\"\"\n        Get the number of chunks of the recordings.\n\n        Returns:\n            nr_chunks (int):\n                Number of chunks.\n        \"\"\"\n        return len(self.recording_times_list)\n\n    def _get_nr_periods(self, chunk, compartment):\n        \"\"\"\n        Get the number of recording periods (start-pause) of a specified chunk/model\n        compartment.\n\n        Args:\n            chunk (int):\n                Index of the chunk.\n            compartment (str):\n                Name of the compartment.\n\n        Returns:\n            nr_periods (int):\n                Number of recording periods (start-pause) of a specified chunk/model\n                compartment.\n        \"\"\"\n        return len(self.recording_times_list[chunk][compartment][\"start\"][\"idx\"])\n\n    def _any_recordings(self, chunk):\n        \"\"\"\n        Check all periods and compartments if there are any recordings.\n\n        Args:\n            chunk (int):\n                Index of the chunk.\n\n        Returns:\n            found_recordings (bool):\n                True if there are any recordings in the chunk, False otherwise.\n        \"\"\"\n        compartment_list = list(self.recording_times_list[chunk].keys())\n        found_recordings = False\n        for compartment in compartment_list:\n            nr_periods_of_compartment = len(\n                self.recording_times_list[chunk][compartment][\"start\"][\"idx\"]\n            )\n\n            for period_idx in range(nr_periods_of_compartment):\n                idx_lims = self.idx_lims(\n                    chunk=chunk, compartment=compartment, period=period_idx\n                )\n                if np.diff(idx_lims)[0] > 0:\n                    found_recordings = True\n\n        return found_recordings\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.__init__","title":"__init__(recording_times_list)","text":"

Initialize RecordingTimes object.

Parameters:

Name Type Description Default recording_times_list list

List with recording times of all chunks.

required Source code in src/CompNeuroPy/monitors.py
def __init__(self, recording_times_list):\n    \"\"\"\n    Initialize RecordingTimes object.\n\n    Args:\n        recording_times_list (list):\n            List with recording times of all chunks.\n    \"\"\"\n    self.recording_times_list = recording_times_list\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.time_lims","title":"time_lims(chunk=None, compartment=None, period=None)","text":"

Get the time limits recordings of of a specified chunk/model compartment in ms.

chunk (int, optional): Index of the chunk. Default: None, i.e., first chunk. compartment (str, optional): Name of the compartment. Default: None, i.e., first model compartment from monitor. period (int, optional): Index of the period. Default: None, i.e., all periods.

Returns:

Name Type Description lims tuple

Tuple with start and stop time of the specified chunk/model compartment.

Source code in src/CompNeuroPy/monitors.py
def time_lims(\n    self,\n    chunk: int | None = None,\n    compartment: str | None = None,\n    period: int | None = None,\n):\n    \"\"\"\n    Get the time limits recordings of of a specified chunk/model compartment in ms.\n\n    chunk (int, optional):\n        Index of the chunk. Default: None, i.e., first chunk.\n    compartment (str, optional):\n        Name of the compartment. Default: None, i.e., first model compartment from\n        monitor.\n    period (int, optional):\n        Index of the period. Default: None, i.e., all periods.\n\n    Returns:\n        lims (tuple):\n            Tuple with start and stop time of the specified chunk/model compartment.\n    \"\"\"\n    assert (\n        len(self.recording_times_list) > 0\n    ), \"ERROR time_lims(): No recordings/recording_times available.\"\n    return self._lims(\"ms\", chunk, compartment, period)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.idx_lims","title":"idx_lims(chunk=None, compartment=None, period=None)","text":"

Get the index limits of recordings of a specified chunk/model compartment.

chunk (int, optional): Index of the chunk. Default: None, i.e., first chunk. compartment (str, optional): Name of the compartment. Default: None, i.e., first model compartment from monitor. period (int, optional): Index of the period. Default: None, i.e., all periods.

Returns:

Name Type Description lims tuple

Tuple with start and stop index of the specified chunk/model compartment.

Source code in src/CompNeuroPy/monitors.py
def idx_lims(\n    self,\n    chunk: int | None = None,\n    compartment: str | None = None,\n    period: int | None = None,\n):\n    \"\"\"\n    Get the index limits of recordings of a specified chunk/model compartment.\n\n    chunk (int, optional):\n        Index of the chunk. Default: None, i.e., first chunk.\n    compartment (str, optional):\n        Name of the compartment. Default: None, i.e., first model compartment from\n        monitor.\n    period (int, optional):\n        Index of the period. Default: None, i.e., all periods.\n\n    Returns:\n        lims (tuple):\n            Tuple with start and stop index of the specified chunk/model\n            compartment.\n    \"\"\"\n    assert (\n        len(self.recording_times_list) > 0\n    ), \"ERROR idx_lims(): No recordings/recording_times available.\"\n    return self._lims(\"idx\", chunk, compartment, period)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.all","title":"all()","text":"

Get the recording times of all chunks, compartments, periods in ms and index.

Returns:

Name Type Description recording_times_list list

List with recording times of all chunks.

Source code in src/CompNeuroPy/monitors.py
def all(self):\n    \"\"\"\n    Get the recording times of all chunks, compartments, periods in ms and index.\n\n    Returns:\n        recording_times_list (list):\n            List with recording times of all chunks.\n    \"\"\"\n    return self.recording_times_list\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.nr_periods","title":"nr_periods(chunk=None, compartment=None)","text":"

Get the number of recording periods (start-pause) of a specified chunk/model compartment.

Parameters:

Name Type Description Default chunk int

Index of the chunk. Default: None, i.e., first chunk.

None compartment str

Name of the compartment. Default: None, i.e., first model compartment from monitor.

None

Returns:

Name Type Description nr_periods int

Number of recording periods (start-pause) of a specified chunk/model compartment.

Source code in src/CompNeuroPy/monitors.py
def nr_periods(self, chunk=None, compartment=None):\n    \"\"\"\n    Get the number of recording periods (start-pause) of a specified chunk/model\n    compartment.\n\n    Args:\n        chunk (int, optional):\n            Index of the chunk. Default: None, i.e., first chunk.\n        compartment (str, optional):\n            Name of the compartment. Default: None, i.e., first model compartment\n            from monitor.\n\n    Returns:\n        nr_periods (int):\n            Number of recording periods (start-pause) of a specified chunk/model\n            compartment.\n    \"\"\"\n    chunk = self._check_chunk(chunk)\n    compartment = self.__check_compartment__(compartment, chunk)\n    return self._get_nr_periods(chunk, compartment)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.combine_chunks","title":"combine_chunks(recordings, recording_data_str, mode='sequential')","text":"

Combines the data of all chunks of recordings, only possible if no pauses in between.

Parameters:

Name Type Description Default recordings list

List with recordings of all chunks.

required recording_data_str str

String specifying the compartment name and the variable to combine. Format: \"compartment_name;variable_name\"

required mode str

How should the time array be generated. Can be \"sequential\" or \"consecutive\". Default: \"sequential\". - \"sequential\": each chunk starts at zero e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 0, 1, ..., 250] - \"consecutive\": each chunk starts at the last stop time of the previous chunk e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 101, 102, ..., 350]

'sequential'

Returns:

Name Type Description time_arr array

Array with time values in ms.

data_arr array

Array with the recorded variable.

Source code in src/CompNeuroPy/monitors.py
def combine_chunks(\n    self, recordings: list, recording_data_str: str, mode=\"sequential\"\n):\n    \"\"\"\n    Combines the data of all chunks of recordings, only possible if no pauses in\n    between.\n\n    Args:\n        recordings (list):\n            List with recordings of all chunks.\n        recording_data_str (str):\n            String specifying the compartment name and the variable to combine.\n            Format: \"compartment_name;variable_name\"\n        mode (str, optional):\n            How should the time array be generated. Can be \"sequential\" or\n            \"consecutive\". Default: \"sequential\".\n            - \"sequential\": each chunk starts at zero e.g.: [0,100] + [0,250] -->\n                [0, 1, ..., 100, 0, 1, ..., 250]\n            - \"consecutive\": each chunk starts at the last stop time of the previous\n                chunk e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 101, 102, ..., 350]\n\n    Returns:\n        time_arr (np.array):\n            Array with time values in ms.\n        data_arr (np.array):\n            Array with the recorded variable.\n    \"\"\"\n    assert (\n        len(self.recording_times_list) > 0\n    ), \"ERROR combine_chunks(): No recordings/recording_times available.\"\n\n    compartment = recording_data_str.split(\";\")[0]\n    period_time = recordings[0][f\"{compartment};period\"]\n    time_step = recordings[0][\"dt\"]\n    nr_chunks = self._get_nr_chunks()\n    data_list = []\n    time_list = []\n    pre_chunk_start_time = 0\n\n    for chunk in range(nr_chunks):\n        ### append data list with data of all periods of this chunk\n        data_list.append(recordings[chunk][recording_data_str])\n\n        ### nr of periods in this chunk\n        nr_periods = self._get_nr_periods(chunk, compartment)\n\n        ### start time of chunk depends on mode\n        if mode == \"sequential\":\n            chunk_start_time = 0\n        elif mode == \"consecutive\":\n            if chunk == 0:\n                chunk_start_time = 0\n            else:\n                last_stop_time = self.recording_times_list[chunk - 1][compartment][\n                    \"stop\"\n                ][\"ms\"][-1]\n                chunk_start_time = (\n                    pre_chunk_start_time + last_stop_time + period_time\n                )\n                pre_chunk_start_time = chunk_start_time\n        else:\n            print(\"ERROR recording_times.combine_data, Wrong mode.\")\n            quit()\n\n        ### append the time list with all times of the periods\n        for period in range(nr_periods):\n            start_time = (\n                self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                    0\n                ]\n                + chunk_start_time\n            )\n            end_time = (\n                self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                    1\n                ]\n                + chunk_start_time\n            )\n            start_time = round(start_time, af.get_number_of_decimals(time_step))\n            end_time = round(end_time, af.get_number_of_decimals(time_step))\n            times = np.arange(start_time, end_time + period_time, period_time)\n            time_list.append(times)\n\n    ### flatten the two lists\n    data_arr = np.concatenate(data_list, 0)\n    time_arr = np.concatenate(time_list, 0)\n\n    ### check if there are gaps in the time array\n    ### fill them with the corersponding times and\n    ### the data array with nan values\n    time_arr, data_arr = af.time_data_add_nan(\n        time_arr,\n        data_arr,\n        fill_time_step=period_time,\n    )\n\n    return time_arr, data_arr\n
"},{"location":"main/optimize_neuron/","title":"Optimize a neuron model","text":""},{"location":"main/optimize_neuron/#introduction","title":"Introduction","text":"

CompNeuroPy provides the OptNeuron class which can be used to define your optimization of an ANNarchy neuron model (tuning the parameters). You can either optimize your neuron model to some data or try to reproduce the dynamics of a different neuron model (for example to reduce a more complex model). In both cases, you have to define the experiment which generates the data of interest with your neuron model.

Warning

OptNeuron has to be imported from \"CompNeuroPy.opt_neuron\" and you have to install torch, sbi and hyperopt (e.g. pip install torch sbi hyperopt)

Used optimization methods:

  • hyperopt

    Bergstra, J., Yamins, D., Cox, D. D. (2013) Making a Science of Model Search: Hyperparameter Optimization in Hundreds of Dimensions for Vision Architectures. TProc. of the 30th International Conference on Machine Learning (ICML 2013), June 2013, pp. I-115 to I-23.

  • sbi

    Tejero-Cantero et al., (2020). sbi: A toolkit for simulation-based inference. Journal of Open Source Software, 5(52), 2505, https://doi.org/10.21105/joss.02505

"},{"location":"main/optimize_neuron/#example","title":"Example:","text":"
opt = OptNeuron(\n    experiment=my_exp,\n    get_loss_function=get_loss,\n    variables_bounds=variables_bounds,\n    results_soll=experimental_data[\"results_soll\"],\n    time_step=experimental_data[\"time_step\"],\n    compile_folder_name=\"annarchy_opt_neuron_example\",\n    neuron_model=my_neuron,\n    method=\"hyperopt\",\n    record=[\"r\"],\n)\n

A full example is available in the Examples.

"},{"location":"main/optimize_neuron/#run-the-optimization","title":"Run the optimization","text":"

To run the optimization simply call the run() function of the OptNeuron object.

"},{"location":"main/optimize_neuron/#define-the-experiment","title":"Define the experiment","text":"

You have to define a CompNeuroExp object containing a run() function. In the run() function simulations and recordings are performed.

Warning

While defining the CompNeuroExp run() function for the optimization with OptNeuron you must observe the following rules:

  • the run() function has to take a single argument (besides self) which contains the name of the population consiting of a single neuron of the optimized neuron model (you can use this to access the population)
  • call self.reset(parameters=False) at the beginning of the run function, thus the neuron will be in its compile state (except the paramters) at the beginning of each simulation run
  • always set parameters=False while calling the self.reset() function (otherwise the parameter optimization will not work)
  • besides the optimized parameters and the loss, the results of the experiment (using the optimized parameters) will be available after the optimization, you can store any additional data in the self.data attribute
"},{"location":"main/optimize_neuron/#example_1","title":"Example:","text":"
class my_exp(CompNeuroExp):\n    \"\"\"\n    Define an experiment by inheriting from CompNeuroExp.\n\n    CompNeuroExp provides the attributes:\n\n        monitors (CompNeuroMonitors):\n            a CompNeuroMonitors object to do recordings, define during init otherwise\n            None\n        data (dict):\n            a dictionary for storing any optional data\n\n    and the functions:\n        reset():\n            resets the model and monitors\n        results():\n            returns a results object\n    \"\"\"\n\n    def run(self, population_name):\n        \"\"\"\n        Do the simulations and recordings.\n\n        To use the CompNeuroExp class, you need to define a run function which\n        does the simulations and recordings. The run function should return the\n        results object which can be obtained by calling self.results().\n\n        For using the CompNeuroExp for OptNeuron, the run function should have\n        one argument which is the name of the population which is automatically created\n        by OptNeuron, containing a single neuron of the model which should be optimized.\n\n        Args:\n            population_name (str):\n                name of the population which contains a single neuron, this will be\n                automatically provided by opt_neuron\n\n        Returns:\n            results (CompNeuroExp._ResultsCl):\n                results object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        ### For OptNeuron you have to reset the model and monitors at the beginning of\n        ### the run function! Do not reset the parameters, otherwise the optimization\n        ### will not work!\n        self.reset(parameters=False)\n\n        ### you have to start monitors within the run function, otherwise nothing will\n        ### be recorded\n        self.monitors.start()\n\n        ### run the simulation, remember setting parameters=False in the reset function!\n        ...\n        simulate(100)\n        self.reset(parameters=False)\n        ...\n\n        ### optional: store anything you want in the data dict. For example infomration\n        ### about the simulations. This is not used for the optimization but can be\n        ### retrieved after the optimization is finished\n        self.data[\"sim\"] = sim_step.simulation_info()\n        self.data[\"population_name\"] = population_name\n        self.data[\"time_step\"] = dt()\n\n        ### return results, use the object's self.results()\n        return self.results()\n
"},{"location":"main/optimize_neuron/#the-get_loss_function","title":"The get_loss_function","text":"

The get_loss_function must have two arguments. When this function is called during optimization, the first argument is always the results object returned by the experiment, i.e. the results of the neuron you want to optimize. The second argument depends on whether you have specified results_soll, i.e. data to be reproduced by the neuron_model, or whether you have specified a target_neuron_model whose results are to be reproduced by the neuron_model. Thus, the second argument is either results_soll provided to the OptNeuron class during initialization or another results object (returned by the CompNeuroExp run function), generated with the target_neuron_model.

"},{"location":"main/optimize_neuron/#example_2","title":"Example:","text":"

In this example we assume, that results_soll was provided during initialization of the OptNeuron class (no target_neuron_model used).

def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll):\n    \"\"\"\n    Function which has to have the arguments results_ist and results_soll and should\n    calculates and return the loss. This structure is needed for the OptNeuron class.\n\n    Args:\n        results_ist (object):\n            the results object returned by the run function of experiment (see above)\n        results_soll (any):\n            the target data directly provided to OptNeuron during initialization\n\n    Returns:\n        loss (float or list of floats):\n            the loss\n    \"\"\"\n    ### get the recordings and other important things for calculating the loss from\n    ### results_ist, we do not use all available information here, but you could\n    rec_ist = results_ist.recordings\n    pop_ist = results_ist.data[\"population_name\"]\n    neuron = 0\n\n    ### get the data for calculating the loss from the results_soll\n    r_target_0 = results_soll[0]\n    r_target_1 = results_soll[1]\n\n    ### get the data for calculating the loss from the recordings\n    r_ist_0 = rec_ist[0][f\"{pop_ist};r\"][:, neuron]\n    r_ist_1 = rec_ist[1][f\"{pop_ist};r\"][:, neuron]\n\n    ### calculate the loss, e.g. the root mean squared error\n    rmse1 = rmse(r_target_0, r_ist_0)\n    rmse2 = rmse(r_target_1, r_ist_1)\n\n    ### return the loss, one can return a singel value or a list of values which will\n    ### be summed during the optimization\n    return [rmse1, rmse2]\n

"},{"location":"main/optimize_neuron/#CompNeuroPy.opt_neuron.OptNeuron","title":"CompNeuroPy.opt_neuron.OptNeuron","text":"

This class is used to optimize neuron models with ANNarchy.

Source code in src/CompNeuroPy/opt_neuron.py
class OptNeuron:\n    \"\"\"\n    This class is used to optimize neuron models with ANNarchy.\n    \"\"\"\n\n    opt_created = []\n\n    @check_types()\n    def __init__(\n        self,\n        experiment: Type[CompNeuroExp],\n        get_loss_function: Callable[[Any, Any], float | list[float]],\n        variables_bounds: dict[str, float | list[float]],\n        neuron_model: Neuron,\n        results_soll: Any | None = None,\n        target_neuron_model: Neuron | None = None,\n        time_step: float = 1.0,\n        compile_folder_name: str = \"annarchy_OptNeuron\",\n        num_rep_loss: int = 1,\n        method: str = \"hyperopt\",\n        prior=None,\n        fv_space: list = None,\n        record: list[str] = [],\n    ):\n        \"\"\"\n        This prepares the optimization. To run the optimization call the run function.\n\n        Args:\n            experiment (CompNeuroExp class):\n                CompNeuroExp class containing a 'run' function which defines the\n                simulations and recordings\n\n            get_loss_function (function):\n                function which takes results_ist and results_soll as arguments and\n                calculates/returns the loss\n\n            variables_bounds (dict):\n                Dictionary with parameter names (keys) and their bounds (values). If\n                single values are given as values, the parameter is constant, i.e., not\n                optimized. If a list is given as value, the parameter is optimized and\n                the list contains the lower and upper bound of the parameter (order is\n                not important).\n\n            neuron_model (ANNarchy Neuron):\n                The neuron model whose parameters should be optimized.\n\n            results_soll (Any, optional):\n                Some variable which contains the target data and can be used by the\n                get_loss_function (second argument of get_loss_function)\n                !!! warning\n                    Either provide results_soll or a target_neuron_model not both!\n                Default: None.\n\n            target_neuron_model (ANNarchy Neuron, optional):\n                The neuron model which produces the target data by running the\n                experiment.\n                !!! warning\n                    Either provide results_soll or a target_neuron_model not both!\n                Default: None.\n\n            time_step (float, optional):\n                The time step for the simulation in ms. Default: 1.\n\n            compile_folder_name (string, optional):\n                The name of the annarchy compilation folder within annarchy_folders/.\n                Default: 'annarchy_OptNeuron'.\n\n            num_rep_loss (int, optional):\n                Only interesting for noisy simulations/models. How often should the\n                simulaiton be run to calculate the loss (the defined number of losses\n                is obtained and averaged). Default: 1.\n\n            method (str, optional):\n                Either 'sbi' or 'hyperopt'. If 'sbi' is used, the optimization is\n                performed with sbi. If 'hyperopt' is used, the optimization is\n                performed with hyperopt. Default: 'hyperopt'.\n\n            prior (distribution, optional):\n                The prior distribution used by sbi. Default: None, i.e., uniform\n                distributions between the variable bounds are assumed.\n\n            fv_space (list, optional):\n                The search space for hyperopt. Default: None, i.e., uniform\n                distributions between the variable bounds are assumed.\n\n            record (list, optional):\n                List of strings which define what variables of the tuned neuron should\n                be recorded. Default: [].\n        \"\"\"\n\n        if len(self.opt_created) > 0:\n            print(\n                \"OptNeuron: Error: Already another OptNeuron created. Only create one per python session!\"\n            )\n            quit()\n        else:\n            print(\n                \"OptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\"\n            )\n\n            ### set object variables\n            self.opt_created.append(1)\n            self.record = record\n            self.results_soll = results_soll\n            self.variables_bounds = variables_bounds\n            self.fitting_variables_name_list = self._get_fitting_variables_name_list()\n            self.method = method\n            if method == \"hyperopt\":\n                if fv_space is None:\n                    self.fv_space = self._get_hyperopt_space()\n                else:\n                    self.fv_space = fv_space\n            self.const_params = self._get_const_params()\n            self.num_rep_loss = num_rep_loss\n            self.neuron_model = neuron_model\n            if method == \"sbi\":\n                self.prior = self._get_prior(prior)\n            self.target_neuron = target_neuron_model\n            self.compile_folder_name = compile_folder_name\n            self.__get_loss__ = get_loss_function\n\n            ### check target_neuron/results_soll\n            self._check_target()\n            ### check neuron models\n            self._check_neuron_models()\n\n            ### setup ANNarchy\n            setup(dt=time_step)\n\n            ### create and compile model\n            ### if neuron models and target neuron model --> create both models then\n            ### test, then clear and create only model for neuron model\n            model, target_model, monitors = self._generate_models()\n\n            self.pop = model.populations[0]\n            if target_model is not None:\n                self.pop_target = target_model.populations[0]\n            else:\n                self.pop_target = None\n            ### create experiment with current monitors\n            self.experiment = experiment(monitors=monitors)\n\n            ### check variables of model\n            self._test_variables()\n\n            ### check neuron models, experiment, get_loss\n            ### if results_soll is None -_> generate results_soll\n            self._check_get_loss_function()\n\n            ### after checking neuron models, experiment, get_loss\n            ### if two models exist --> clear ANNarchy and create/compile again only\n            ### standard model, thus recreate also monitors and experiment\n            clear()\n            model, _, monitors = self._generate_models()\n            self.monitors = monitors\n            self.experiment = experiment(monitors=monitors)\n\n    def _generate_models(self):\n        \"\"\"\n        Generates the tuned model and the target_model (only if results_soll is None).\n\n        Returns:\n            model (CompNeuroModel):\n                The model which is used for the optimization.\n\n            target_model (CompNeuroModel):\n                The model which is used to generate the target data. If results_soll is\n                provided, target_model is None.\n\n            monitors (CompNeuroMonitors):\n                The monitors which are used to record the data. If no variables are\n                recorded, monitors is None.\n        \"\"\"\n        with ef.suppress_stdout():\n            model = None\n            target_model = None\n            monitors = None\n            if self.results_soll is None:\n                ### create two models\n                model = CompNeuroModel(\n                    model_creation_function=self._raw_neuron,\n                    model_kwargs={\"neuron\": self.neuron_model, \"name\": \"model_neuron\"},\n                    name=\"standard_model\",\n                    do_create=True,\n                    do_compile=False,\n                    compile_folder_name=self.compile_folder_name,\n                )\n\n                target_model = CompNeuroModel(\n                    model_creation_function=self._raw_neuron,\n                    model_kwargs={\n                        \"neuron\": self.target_neuron,\n                        \"name\": \"target_model_neuron\",\n                    },\n                    name=\"target_model\",\n                    do_create=True,\n                    do_compile=True,\n                    compile_folder_name=self.compile_folder_name,\n                )\n\n                ### create monitors\n                if len(self.record) > 0:\n                    monitors = CompNeuroMonitors(\n                        {\n                            pop_name: self.record\n                            for pop_name in [\n                                model.populations[0],\n                                target_model.populations[0],\n                            ]\n                        }\n                    )\n\n            else:\n                ### create one model\n                model = CompNeuroModel(\n                    model_creation_function=self._raw_neuron,\n                    model_kwargs={\"neuron\": self.neuron_model, \"name\": \"model_neuron\"},\n                    name=\"single_model\",\n                    do_create=True,\n                    do_compile=True,\n                    compile_folder_name=self.compile_folder_name,\n                )\n                ### create monitors\n                if len(self.record) > 0:\n                    monitors = CompNeuroMonitors({model.populations[0]: self.record})\n\n        return model, target_model, monitors\n\n    def _check_neuron_models(self):\n        \"\"\"\n        Checks if the neuron models are ANNarchy neuron models.\n        \"\"\"\n        if not (isinstance(self.neuron_model, type(Neuron()))) or (\n            self.target_neuron is not None\n            and not (isinstance(self.target_neuron, type(Neuron())))\n        ):\n            print(\n                \"OptNeuron: Error: neuron_model and/or target_neuron_model have to be ANNarchy neuron models\"\n            )\n            quit()\n\n    def _check_target(self):\n        \"\"\"\n        Check if either results_soll or target_neuron are provided and not both.\n        \"\"\"\n        if self.target_neuron is None and self.results_soll is None:\n            print(\n                \"OptNeuron: Error: Either provide results_soll or target_neuron_model\"\n            )\n            quit()\n        elif self.target_neuron is not None and self.results_soll is not None:\n            print(\n                \"OptNeuron: Error: Either provide results_soll or target_neuron_model, not both\"\n            )\n            quit()\n\n    def _get_prior(self, prior):\n        \"\"\"\n        Get the prior distribution used by sbi. If no prior is given, uniform\n        distributions between the variable bounds are assumed. If a prior is given,\n        this prior is used.\n\n        Args:\n            prior (distribution, optional):\n                The prior distribution used by sbi. Default: None, i.e., uniform\n                distributions between the variable bounds are assumed.\n\n        Returns:\n            prior (distribution):\n                The prior distribution used by sbi.\n        \"\"\"\n        if prior is None:\n            prior_min = []\n            prior_max = []\n            for _, param_bounds in self.variables_bounds.items():\n                if isinstance(param_bounds, list):\n                    prior_min.append(param_bounds[0])\n                    prior_max.append(param_bounds[1])\n\n            return utils.BoxUniform(\n                low=torch.as_tensor(prior_min), high=torch.as_tensor(prior_max)\n            )\n        else:\n            return prior\n\n    def _get_fitting_variables_name_list(self):\n        \"\"\"\n        Returns a list with the names of the fitting variables.\n\n        Returns:\n            fitting_variables_name_list (list):\n                list with names of fitting variables\n        \"\"\"\n        name_list = []\n        for param_name, param_bounds in self.variables_bounds.items():\n            if isinstance(param_bounds, list):\n                name_list.append(param_name)\n        return name_list\n\n    def _get_hyperopt_space(self):\n        \"\"\"\n        Generates the hyperopt variable space from the fitting variable bounds. The\n        variable space is a uniform distribution between the bounds.\n\n        Returns:\n            fitting_variables_space (list):\n                list with hyperopt variables\n        \"\"\"\n        fitting_variables_space = []\n        for param_name, param_bounds in self.variables_bounds.items():\n            if isinstance(param_bounds, list):\n                fitting_variables_space.append(\n                    hp.uniform(param_name, min(param_bounds), max(param_bounds))\n                )\n        return fitting_variables_space\n\n    def _get_const_params(self):\n        \"\"\"\n        Returns:\n            const_params (dict):\n                Dictionary with constant variables. The keys are the parameter names\n                and the values are the parameter values.\n        \"\"\"\n        const_params = {}\n        for param_name, param_bounds in self.variables_bounds.items():\n            if not (isinstance(param_bounds, list)):\n                const_params[param_name] = param_bounds\n        return const_params\n\n    def _check_get_loss_function(self):\n        \"\"\"\n        Checks if the get_loss_function is compatible to the experiment and the neuron\n        model(s). To test, the experiment is run once with the tuned neuron model\n        (generating results_ist) and once with the target neuron model (if provided,\n        generating results_soll). Then, the get_loss_function is called with the\n        results_ist and results_soll.\n        \"\"\"\n        print(\"checking neuron_models, experiment, get_loss...\", end=\"\")\n\n        fitparams = []\n        for bounds in self.variables_bounds.values():\n            if isinstance(bounds, list):\n                fitparams.append(bounds[0])\n\n        if self.results_soll is not None:\n            ### only generate results_ist with standard neuron model\n            results_ist = self._run_simulator_with_results(fitparams)[\"results\"]\n        else:\n            ### run simulator with both populations (standard neuron model and target\n            ### neuron model) and generatate results_ist and results_soll\n            results_ist = self._run_simulator_with_results(fitparams)[\"results\"]\n            self.results_soll = self._run_simulator_with_results(\n                fitparams, pop=self.pop_target\n            )[\"results\"]\n\n        try:\n            self.__get_loss__(results_ist, self.results_soll)\n        except:\n            print(\n                \"\\nThe get_loss_function, experiment and neuron model(s) are not compatible:\\n\"\n            )\n            traceback.print_exc()\n            quit()\n        print(\"Done\\n\")\n\n    def _raw_neuron(self, neuron, name):\n        \"\"\"\n        Generates a population with one neuron of the given neuron model.\n\n        Args:\n            neuron (ANNarchy Neuron):\n                The neuron model.\n\n            name (str):\n                The name of the population.\n        \"\"\"\n        Population(1, neuron=neuron, name=name)\n\n    def _test_variables(self):\n        \"\"\"\n        Check if the tuned neuron model contains all parameters which are defined in\n        variables_bounds or even more.\n        \"\"\"\n        ### collect all names\n        all_vars_names = np.concatenate(\n            [\n                np.array(list(self.const_params.keys())),\n                np.array(self.fitting_variables_name_list),\n            ]\n        ).tolist()\n        ### check if pop has these parameters\n        pop_parameter_names = get_population(self.pop).attributes.copy()\n        for name in pop_parameter_names.copy():\n            if name in all_vars_names:\n                all_vars_names.remove(name)\n                pop_parameter_names.remove(name)\n        if len(pop_parameter_names) > 0:\n            print(\n                \"OptNeuron: WARNING: attributes\",\n                pop_parameter_names,\n                \"are not used/initialized.\",\n            )\n        if len(all_vars_names) > 0:\n            print(\n                \"OptNeuron: WARNING: The neuron_model does not contain parameters\",\n                all_vars_names,\n                \"!\",\n            )\n\n    def _run_simulator(self, fitparams):\n        \"\"\"\n        Runs the function simulator with the multiprocessing manager (if function is\n        called multiple times this saves memory, otherwise same as calling simulator\n        directly).\n\n        Args:\n            fitparams (list):\n                list with values for fitting parameters\n\n        Returns:\n            return_dict (dict):\n                dictionary needed for optimization with hyperopt, containing the loss,\n                the loss variance (in case of noisy models with multiple runs per loss\n                calculation), and the status (STATUS_OK for hyperopt).\n        \"\"\"\n\n        ### initialize manager and generate m_list = dictionary to save data\n        manager = multiprocessing.Manager()\n        m_list = manager.dict()\n\n        ### in case of noisy models, here optionally run multiple simulations, to mean the loss\n        lossAr = np.zeros(self.num_rep_loss)\n\n        return_results = False\n        for nr_run in range(self.num_rep_loss):\n            ### initialize for each run a new rng (--> not always have same noise in case of noisy models/simulations)\n            rng = np.random.default_rng()\n            ### run simulator with multiprocessign manager\n            proc = Process(\n                target=self._simulator, args=(fitparams, rng, m_list, return_results)\n            )\n            proc.start()\n            proc.join()\n            ### get simulation results/loss\n            lossAr[nr_run] = m_list[0]\n\n        ### calculate mean and std of loss\n        if self.num_rep_loss > 1:\n            loss = np.mean(lossAr)\n            std = np.std(lossAr)\n        else:\n            loss = lossAr[0]\n            std = None\n\n        ### return loss and other things for optimization\n        if self.num_rep_loss > 1:\n            return {\"status\": STATUS_OK, \"loss\": loss, \"loss_variance\": std}\n        else:\n            return {\"status\": STATUS_OK, \"loss\": loss}\n\n    def _sbi_simulation_wrapper(self, fitparams):\n        \"\"\"\n        This function is called by sbi. It calls the simulator function and\n        returns the loss and adjusts the format of the input parameters.\n\n        Args:\n            fitparams (tensor):\n                either a batch of parameters (tensor with two dimensions) or a single\n                parameter set\n\n        Returns:\n            loss (tensor):\n                loss as tensor for sbi inference\n        \"\"\"\n        fitparams = np.asarray(fitparams)\n        if len(fitparams.shape) == 2:\n            ### batch parameters!\n            data = []\n            for idx in range(fitparams.shape[0]):\n                data.append(self._run_simulator(fitparams[idx])[\"loss\"])\n        else:\n            ### single parameter set!\n            data = [self._run_simulator(fitparams)[\"loss\"]]\n\n        return torch.as_tensor(data)\n\n    def _run_simulator_with_results(self, fitparams, pop=None):\n        \"\"\"\n        Runs the function simulator with the multiprocessing manager (if function is\n        called multiple times this saves memory, otherwise same as calling simulator\n        directly) and also returns the results.\n\n        Args:\n            fitparams (list):\n                list with values for fitting parameters\n\n            pop (str, optional):\n                ANNarchy population name. Default: None, i.e., the tuned population\n                is used.\n\n        Returns:\n            return_dict (dict):\n                dictionary needed for optimization with hyperopt, containing the loss,\n                the loss variance (in case of noisy models with multiple runs per loss\n                calculation), and the status (STATUS_OK for hyperopt) and the results\n                generated by the experiment.\n        \"\"\"\n        ### check if pop is given\n        if pop is None:\n            pop = self.pop\n        ### initialize manager and generate m_list = dictionary to save data\n        manager = multiprocessing.Manager()\n        m_list = manager.dict()\n\n        ### in case of noisy models, here optionally run multiple simulations, to mean the loss\n        lossAr = np.zeros(self.num_rep_loss)\n        all_loss_list = []\n        return_results = True\n        for nr_run in range(self.num_rep_loss):\n            ### initialize for each run a new rng (--> not always have same noise in case of noisy models/simulations)\n            rng = np.random.default_rng()\n            ### run simulator with multiprocessign manager\n            proc = Process(\n                target=self._simulator,\n                args=(fitparams, rng, m_list, return_results, pop),\n            )\n            proc.start()\n            proc.join()\n            ### get simulation results/loss\n            lossAr[nr_run] = m_list[0]\n            results_ist = m_list[1]\n            all_loss_list.append(m_list[2])\n\n        all_loss_arr = np.array(all_loss_list)\n        ### calculate mean and std of loss\n        if self.num_rep_loss > 1:\n            loss = np.mean(lossAr)\n            std = np.std(lossAr)\n            all_loss = np.mean(all_loss_arr, 0)\n        else:\n            loss = lossAr[0]\n            std = None\n            all_loss = all_loss_arr[0]\n\n        ### return loss and other things for optimization and results\n        if self.num_rep_loss > 1:\n            return {\n                \"status\": STATUS_OK,\n                \"loss\": loss,\n                \"loss_variance\": std,\n                \"std\": std,\n                \"all_loss\": all_loss,\n                \"results\": results_ist,\n            }\n        else:\n            return {\n                \"status\": STATUS_OK,\n                \"loss\": loss,\n                \"std\": std,\n                \"all_loss\": all_loss,\n                \"results\": results_ist,\n            }\n\n    def _simulator(\n        self, fitparams, rng, m_list=[0, 0, 0], return_results=False, pop=None\n    ):\n        \"\"\"\n        Runs the experiment with the given parameters and 'returns' the loss and\n        optionally the results and all individual losses of the get_loss_function. The\n        'returned' values are saved in m_list.\n\n        Args:\n            fitparams (list):\n                list with values for fitting parameters\n\n            rng (numpy random generator):\n                random generator for the simulation\n\n            m_list (list, optional):\n                list with the loss, the results, and the all_loss. Default: [0, 0, 0].\n\n            return_results (bool, optional):\n                If True, the results are returned. Default: False.\n\n            pop (str, optional):\n                ANNarchy population name. Default: None, i.e., the tuned population\n                is used.\n        \"\"\"\n        ### TODO use rng here and add it to CompNeuroExp\n        ### check if pop is given\n        if pop is None:\n            pop = self.pop\n\n        ### set parameters which should not be optimized and parameters which should be\n        ### optimized before the experiment, they should not be resetted by the\n        ### experiment!\n        self._set_fitting_parameters(fitparams, pop=pop)\n\n        ### conduct loaded experiment\n        results = self.experiment.run(pop)\n\n        if self.results_soll is not None:\n            ### compute loss\n            all_loss = self.__get_loss__(results, self.results_soll)\n            if isinstance(all_loss, list) or isinstance(all_loss, type(np.zeros(1))):\n                loss = sum(all_loss)\n            else:\n                loss = all_loss\n        else:\n            all_loss = 999\n            loss = 999\n        ### \"return\" loss and other optional things\n        m_list[0] = loss\n        if return_results:\n            m_list[1] = results\n            m_list[2] = all_loss\n\n    def _set_fitting_parameters(\n        self,\n        fitparams,\n        pop=None,\n    ):\n        \"\"\"\n        Sets all given parameters for the population pop.\n\n        Args:\n            pop (str, optional):\n                ANNarchy population name. Default: None, i.e., the tuned population\n                is used.\n        \"\"\"\n        if pop is None:\n            pop = self.pop\n\n        ### get all variables dict (combine fitting variables and const variables)\n        all_variables_dict = self.const_params.copy()\n\n        for fitting_variable_idx, fitting_variable_name in enumerate(\n            self.fitting_variables_name_list\n        ):\n            all_variables_dict[fitting_variable_name] = fitparams[fitting_variable_idx]\n\n        ### evaluate variables defined by a str\n        for key, val in all_variables_dict.items():\n            if isinstance(val, str):\n                all_variables_dict[key] = ef.evaluate_expression_with_dict(\n                    val, all_variables_dict\n                )\n\n        ### only set parameters of the fitted neuron model (in case target neuron model is given)\n        if pop == self.pop:\n            ### set parameters\n            for param_name, param_val in all_variables_dict.items():\n                pop_parameter_names = get_population(pop).attributes\n                ### only if param_name in parameter attributes\n                if param_name in pop_parameter_names:\n                    setattr(\n                        get_population(pop),\n                        param_name,\n                        param_val,\n                    )\n\n    def _test_fit(self, fitparams_dict):\n        \"\"\"\n        Runs the experiment with the optimized parameters obtained with hyperopt and\n        returns the loss, the results and all individual losses of the\n        get_loss_function.\n\n        Args:\n            fitparams_dict (dict):\n                dictionary with parameter names (keys) and their values (values)\n\n        Returns:\n            fit (dict):\n                dictionary containing the loss, the loss variance (in case of noisy\n                models with multiple runs per loss calculation), and the status\n                (STATUS_OK for hyperopt) and the results generated by the experiment.\n        \"\"\"\n        return self._run_simulator_with_results(\n            [fitparams_dict[name] for name in self.fitting_variables_name_list]\n        )\n\n    def _run_with_sbi(self, max_evals, sbi_plot_file):\n        \"\"\"\n        Runs the optimization with sbi.\n\n        Args:\n            max_evals (int):\n                number of runs the optimization method performs\n\n            sbi_plot_file (str):\n                If you use \"sbi\": the name of the figure which will be saved and shows\n                the posterior.\n\n        Returns:\n            best (dict):\n                dictionary containing the optimized parameters and the posterior.\n        \"\"\"\n        ### get prior bounds\n        prior_min = []\n        prior_max = []\n        for _, param_bounds in self.variables_bounds.items():\n            if isinstance(param_bounds, list):\n                prior_min.append(param_bounds[0])\n                prior_max.append(param_bounds[1])\n\n        ### run sbi\n        simulator, prior = prepare_for_sbi(\n            self._sbi_simulation_wrapper,\n            self.prior,\n            {\n                \"lower_bound\": torch.as_tensor(prior_min),\n                \"upper_bound\": torch.as_tensor(prior_max),\n            },\n        )\n        inference = SNPE(prior, density_estimator=\"mdn\")\n        theta, x = simulate_for_sbi(\n            simulator=simulator,\n            proposal=prior,\n            num_simulations=max_evals,\n            num_workers=1,\n        )\n        density_estimator = inference.append_simulations(theta, x).train()\n        posterior = inference.build_posterior(density_estimator)\n        x_o = torch.as_tensor([0])  # data which should be obtained: loss==0\n        posterior = posterior.set_default_x(x_o)\n\n        ### get best params\n        posterior_samples = posterior.sample(\n            (10000,)\n        )  # posterior = distribution P(params|data) --> set data and then sample possible parameters\n        best_params = posterior_samples[\n            torch.argmax(posterior.log_prob(posterior_samples))\n        ].numpy()  # sampled parameters with highest prob in posterior\n\n        ### create best dict with best parameters\n        best = {}\n        for param_idx, param_name in enumerate(self.fitting_variables_name_list):\n            best[param_name] = best_params[param_idx]\n\n        ### also return posterior\n        best[\"posterior\"] = posterior\n\n        ### plot posterior\n        plot_limits = [\n            [prior_min[idx], prior_max[idx]] for idx in range(len(prior_max))\n        ]\n        analysis.pairplot(\n            posterior_samples,\n            limits=plot_limits,\n            ticks=plot_limits,\n            fig_size=(5, 5),\n            labels=self.fitting_variables_name_list,\n        )\n\n        ### save plot\n        sf.create_dir(\"/\".join(sbi_plot_file.split(\"/\")[:-1]))\n        plt.savefig(sbi_plot_file)\n\n        return best\n\n    @check_types()\n    def run(\n        self,\n        max_evals: int,\n        results_file_name: str = \"best\",\n        sbi_plot_file: str = \"posterior.svg\",\n    ):\n        \"\"\"\n        Runs the optimization.\n\n        Args:\n            max_evals (int):\n                number of runs the optimization method performs\n\n            results_file_name (str, optional):\n                name of the file which is saved. The file contains the optimized and\n                target results, the obtained parameters, the loss, and the SD of the\n                loss (in case of noisy models with multiple runs per loss calculation)\n                Default: \"best\".\n\n            sbi_plot_file (str, optional):\n                If you use \"sbi\": the name of the figure which will be saved and shows\n                the posterior. Default: \"posterior.svg\".\n\n        Returns:\n            best (dict):\n                dictionary containing the optimized parameters (as keys) and:\n\n                - \"loss\": the loss\n                - \"all_loss\": the individual losses of the get_loss_function\n                - \"std\": the SD of the loss (in case of noisy models with multiple\n                    runs per loss calculation)\n                - \"results\": the results generated by the experiment\n                - \"results_soll\": the target results\n        \"\"\"\n        if self.method == \"hyperopt\":\n            ### run optimization with hyperopt and return best dict\n            best = fmin(\n                fn=self._run_simulator,\n                space=self.fv_space,\n                algo=tpe.suggest,\n                max_evals=max_evals,\n            )\n        elif self.method == \"sbi\":\n            ### run optimization with sbi and return best dict\n            best = self._run_with_sbi(max_evals, sbi_plot_file)\n        else:\n            print(\"ERROR run; method should be 'hyperopt' or 'sbi'\")\n            quit()\n        fit = self._test_fit(best)\n        best[\"loss\"] = fit[\"loss\"]\n        if self.method == \"sbi\":\n            print(\"\\tbest loss:\", best[\"loss\"])\n        best[\"all_loss\"] = fit[\"all_loss\"]\n        best[\"std\"] = fit[\"std\"]\n        best[\"results\"] = fit[\"results\"]\n        best[\"results_soll\"] = self.results_soll\n        self.results = best\n\n        ### SAVE OPTIMIZED PARAMS AND LOSS\n        sf.save_variables([best], [results_file_name], \"parameter_fit\")\n\n        return best\n
"},{"location":"main/optimize_neuron/#CompNeuroPy.opt_neuron.OptNeuron.__init__","title":"__init__(experiment, get_loss_function, variables_bounds, neuron_model, results_soll=None, target_neuron_model=None, time_step=1.0, compile_folder_name='annarchy_OptNeuron', num_rep_loss=1, method='hyperopt', prior=None, fv_space=None, record=[])","text":"

This prepares the optimization. To run the optimization call the run function.

Parameters:

Name Type Description Default experiment CompNeuroExp class

CompNeuroExp class containing a 'run' function which defines the simulations and recordings

required get_loss_function function

function which takes results_ist and results_soll as arguments and calculates/returns the loss

required variables_bounds dict

Dictionary with parameter names (keys) and their bounds (values). If single values are given as values, the parameter is constant, i.e., not optimized. If a list is given as value, the parameter is optimized and the list contains the lower and upper bound of the parameter (order is not important).

required neuron_model ANNarchy Neuron

The neuron model whose parameters should be optimized.

required results_soll Any

Some variable which contains the target data and can be used by the get_loss_function (second argument of get_loss_function)

Warning

Either provide results_soll or a target_neuron_model not both!

Default: None.

None target_neuron_model ANNarchy Neuron

The neuron model which produces the target data by running the experiment.

Warning

Either provide results_soll or a target_neuron_model not both!

Default: None.

None time_step float

The time step for the simulation in ms. Default: 1.

1.0 compile_folder_name string

The name of the annarchy compilation folder within annarchy_folders/. Default: 'annarchy_OptNeuron'.

'annarchy_OptNeuron' num_rep_loss int

Only interesting for noisy simulations/models. How often should the simulaiton be run to calculate the loss (the defined number of losses is obtained and averaged). Default: 1.

1 method str

Either 'sbi' or 'hyperopt'. If 'sbi' is used, the optimization is performed with sbi. If 'hyperopt' is used, the optimization is performed with hyperopt. Default: 'hyperopt'.

'hyperopt' prior distribution

The prior distribution used by sbi. Default: None, i.e., uniform distributions between the variable bounds are assumed.

None fv_space list

The search space for hyperopt. Default: None, i.e., uniform distributions between the variable bounds are assumed.

None record list

List of strings which define what variables of the tuned neuron should be recorded. Default: [].

[] Source code in src/CompNeuroPy/opt_neuron.py
@check_types()\ndef __init__(\n    self,\n    experiment: Type[CompNeuroExp],\n    get_loss_function: Callable[[Any, Any], float | list[float]],\n    variables_bounds: dict[str, float | list[float]],\n    neuron_model: Neuron,\n    results_soll: Any | None = None,\n    target_neuron_model: Neuron | None = None,\n    time_step: float = 1.0,\n    compile_folder_name: str = \"annarchy_OptNeuron\",\n    num_rep_loss: int = 1,\n    method: str = \"hyperopt\",\n    prior=None,\n    fv_space: list = None,\n    record: list[str] = [],\n):\n    \"\"\"\n    This prepares the optimization. To run the optimization call the run function.\n\n    Args:\n        experiment (CompNeuroExp class):\n            CompNeuroExp class containing a 'run' function which defines the\n            simulations and recordings\n\n        get_loss_function (function):\n            function which takes results_ist and results_soll as arguments and\n            calculates/returns the loss\n\n        variables_bounds (dict):\n            Dictionary with parameter names (keys) and their bounds (values). If\n            single values are given as values, the parameter is constant, i.e., not\n            optimized. If a list is given as value, the parameter is optimized and\n            the list contains the lower and upper bound of the parameter (order is\n            not important).\n\n        neuron_model (ANNarchy Neuron):\n            The neuron model whose parameters should be optimized.\n\n        results_soll (Any, optional):\n            Some variable which contains the target data and can be used by the\n            get_loss_function (second argument of get_loss_function)\n            !!! warning\n                Either provide results_soll or a target_neuron_model not both!\n            Default: None.\n\n        target_neuron_model (ANNarchy Neuron, optional):\n            The neuron model which produces the target data by running the\n            experiment.\n            !!! warning\n                Either provide results_soll or a target_neuron_model not both!\n            Default: None.\n\n        time_step (float, optional):\n            The time step for the simulation in ms. Default: 1.\n\n        compile_folder_name (string, optional):\n            The name of the annarchy compilation folder within annarchy_folders/.\n            Default: 'annarchy_OptNeuron'.\n\n        num_rep_loss (int, optional):\n            Only interesting for noisy simulations/models. How often should the\n            simulaiton be run to calculate the loss (the defined number of losses\n            is obtained and averaged). Default: 1.\n\n        method (str, optional):\n            Either 'sbi' or 'hyperopt'. If 'sbi' is used, the optimization is\n            performed with sbi. If 'hyperopt' is used, the optimization is\n            performed with hyperopt. Default: 'hyperopt'.\n\n        prior (distribution, optional):\n            The prior distribution used by sbi. Default: None, i.e., uniform\n            distributions between the variable bounds are assumed.\n\n        fv_space (list, optional):\n            The search space for hyperopt. Default: None, i.e., uniform\n            distributions between the variable bounds are assumed.\n\n        record (list, optional):\n            List of strings which define what variables of the tuned neuron should\n            be recorded. Default: [].\n    \"\"\"\n\n    if len(self.opt_created) > 0:\n        print(\n            \"OptNeuron: Error: Already another OptNeuron created. Only create one per python session!\"\n        )\n        quit()\n    else:\n        print(\n            \"OptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\"\n        )\n\n        ### set object variables\n        self.opt_created.append(1)\n        self.record = record\n        self.results_soll = results_soll\n        self.variables_bounds = variables_bounds\n        self.fitting_variables_name_list = self._get_fitting_variables_name_list()\n        self.method = method\n        if method == \"hyperopt\":\n            if fv_space is None:\n                self.fv_space = self._get_hyperopt_space()\n            else:\n                self.fv_space = fv_space\n        self.const_params = self._get_const_params()\n        self.num_rep_loss = num_rep_loss\n        self.neuron_model = neuron_model\n        if method == \"sbi\":\n            self.prior = self._get_prior(prior)\n        self.target_neuron = target_neuron_model\n        self.compile_folder_name = compile_folder_name\n        self.__get_loss__ = get_loss_function\n\n        ### check target_neuron/results_soll\n        self._check_target()\n        ### check neuron models\n        self._check_neuron_models()\n\n        ### setup ANNarchy\n        setup(dt=time_step)\n\n        ### create and compile model\n        ### if neuron models and target neuron model --> create both models then\n        ### test, then clear and create only model for neuron model\n        model, target_model, monitors = self._generate_models()\n\n        self.pop = model.populations[0]\n        if target_model is not None:\n            self.pop_target = target_model.populations[0]\n        else:\n            self.pop_target = None\n        ### create experiment with current monitors\n        self.experiment = experiment(monitors=monitors)\n\n        ### check variables of model\n        self._test_variables()\n\n        ### check neuron models, experiment, get_loss\n        ### if results_soll is None -_> generate results_soll\n        self._check_get_loss_function()\n\n        ### after checking neuron models, experiment, get_loss\n        ### if two models exist --> clear ANNarchy and create/compile again only\n        ### standard model, thus recreate also monitors and experiment\n        clear()\n        model, _, monitors = self._generate_models()\n        self.monitors = monitors\n        self.experiment = experiment(monitors=monitors)\n
"},{"location":"main/optimize_neuron/#CompNeuroPy.opt_neuron.OptNeuron.run","title":"run(max_evals, results_file_name='best', sbi_plot_file='posterior.svg')","text":"

Runs the optimization.

Parameters:

Name Type Description Default max_evals int

number of runs the optimization method performs

required results_file_name str

name of the file which is saved. The file contains the optimized and target results, the obtained parameters, the loss, and the SD of the loss (in case of noisy models with multiple runs per loss calculation) Default: \"best\".

'best' sbi_plot_file str

If you use \"sbi\": the name of the figure which will be saved and shows the posterior. Default: \"posterior.svg\".

'posterior.svg'

Returns:

Name Type Description best dict

dictionary containing the optimized parameters (as keys) and:

  • \"loss\": the loss
  • \"all_loss\": the individual losses of the get_loss_function
  • \"std\": the SD of the loss (in case of noisy models with multiple runs per loss calculation)
  • \"results\": the results generated by the experiment
  • \"results_soll\": the target results
Source code in src/CompNeuroPy/opt_neuron.py
@check_types()\ndef run(\n    self,\n    max_evals: int,\n    results_file_name: str = \"best\",\n    sbi_plot_file: str = \"posterior.svg\",\n):\n    \"\"\"\n    Runs the optimization.\n\n    Args:\n        max_evals (int):\n            number of runs the optimization method performs\n\n        results_file_name (str, optional):\n            name of the file which is saved. The file contains the optimized and\n            target results, the obtained parameters, the loss, and the SD of the\n            loss (in case of noisy models with multiple runs per loss calculation)\n            Default: \"best\".\n\n        sbi_plot_file (str, optional):\n            If you use \"sbi\": the name of the figure which will be saved and shows\n            the posterior. Default: \"posterior.svg\".\n\n    Returns:\n        best (dict):\n            dictionary containing the optimized parameters (as keys) and:\n\n            - \"loss\": the loss\n            - \"all_loss\": the individual losses of the get_loss_function\n            - \"std\": the SD of the loss (in case of noisy models with multiple\n                runs per loss calculation)\n            - \"results\": the results generated by the experiment\n            - \"results_soll\": the target results\n    \"\"\"\n    if self.method == \"hyperopt\":\n        ### run optimization with hyperopt and return best dict\n        best = fmin(\n            fn=self._run_simulator,\n            space=self.fv_space,\n            algo=tpe.suggest,\n            max_evals=max_evals,\n        )\n    elif self.method == \"sbi\":\n        ### run optimization with sbi and return best dict\n        best = self._run_with_sbi(max_evals, sbi_plot_file)\n    else:\n        print(\"ERROR run; method should be 'hyperopt' or 'sbi'\")\n        quit()\n    fit = self._test_fit(best)\n    best[\"loss\"] = fit[\"loss\"]\n    if self.method == \"sbi\":\n        print(\"\\tbest loss:\", best[\"loss\"])\n    best[\"all_loss\"] = fit[\"all_loss\"]\n    best[\"std\"] = fit[\"std\"]\n    best[\"results\"] = fit[\"results\"]\n    best[\"results_soll\"] = self.results_soll\n    self.results = best\n\n    ### SAVE OPTIMIZED PARAMS AND LOSS\n    sf.save_variables([best], [results_file_name], \"parameter_fit\")\n\n    return best\n
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Documentation for CompNeuroPy","text":"

CompNeuroPy is an assisting Python package for working with ANNarchy (GitHub, documentation, DOI). It is intended to help structure simulations with computational neuroscience models in a modular way and to make them more easily replicable. People who want to start working with ANNarchy are strongly recommended to first learn exclusively the functionality of ANNarchy. CompNeuroPy uses very few features of ANNarchy at this time. But also adds various special features.

  • v1.0.0:
"},{"location":"installation/","title":"Installation","text":"

From PyPI using pip:

pip install CompNeuroPy\n

With downloaded source code; using pip in the top-level directory of the downloaded source code:

pip install .\n

or in development mode:

pip install -e .\n

You must install ANNarchy separately, best after CompNeuroPy.

git clone https://github.com/ANNarchy/ANNarchy\ncd ANNarchy\ngit checkout develop\npip install .\ncd ..\nrm -rf ANNarchy\n

Optional install torch, sbi, and hyperopt to be able to use OptNeuron

pip install torch sbi hyperopt\n

"},{"location":"license/","title":"License","text":""},{"location":"license/#mit-license","title":"MIT License","text":"

Copyright (c) 2022 Oliver Maith

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

"},{"location":"additional/analysis_functions/","title":"Analysis Functions","text":""},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.PlotRecordings","title":"PlotRecordings","text":"

Plot recordings from CompNeuroMonitors.

TODO: CHeck if there are memory issues with large recordings or many subplots.

Source code in CompNeuroPy/analysis_functions.py
class PlotRecordings:\n    \"\"\"\n    Plot recordings from CompNeuroMonitors.\n\n    TODO: CHeck if there are memory issues with large recordings or many subplots.\n    \"\"\"\n\n    @check_types()\n    def __init__(\n        self,\n        figname: str,\n        recordings: list[dict],\n        recording_times: RecordingTimes,\n        shape: tuple[int, int],\n        plan: dict,\n        chunk: int = 0,\n        time_lim: None | tuple[float, float] = None,\n        dpi: int = 300,\n    ) -> None:\n        \"\"\"\n        Create and save the plot.\n\n        Args:\n            figname (str):\n                The name of the figure to be saved.\n            recordings (list):\n                A recordings list obtained from CompNeuroMonitors.\n            recording_times (RecordingTimes):\n                The RecordingTimes object containing the recording times obtained from\n                CompNeuroMonitors.\n            shape (tuple):\n                The shape of the figure. (number of rows, number of columns)\n            plan (dict):\n                Defines which recordings are plotted in which subplot and how. The plan\n                has to contain the following keys: \"position\", \"compartment\",\n                \"variable\", \"format\". The values of the keys have to be lists of the\n                same length. The values of the key \"position\" have to be integers\n                between 1 and the number of subplots (defined by shape). The values of\n                the key \"compartment\" have to be the names of the model compartments as\n                strings. The values of the key \"variable\" have to be strings containing\n                the names of the recorded variables or equations using the recorded\n                variables. The values of the key \"format\" have to be strings defining\n                how the recordings are plotted. The following formats are available for\n                spike recordings: \"raster\", \"mean\", \"hybrid\", \"interspike\". The\n                following formats are available for other recordings: \"line\",\n                \"line_mean\", \"matrix\", \"matrix_mean\".\n            chunk (int, optional):\n                The chunk of the recordings to be plotted. Default: 0.\n            time_lim (tuple, optional):\n                Defines the x-axis for all subplots. The tuple contains two\n                numbers: start and end time in ms. The times have to be\n                within the chunk. Default: None, i.e., the whole chunk is plotted.\n            dpi (int, optional):\n                The dpi of the saved figure. Default: 300.\n        \"\"\"\n        ### print start message\n        print(f\"Generate fig {figname}\", end=\"... \", flush=True)\n\n        ### set attributes\n        self.figname = figname\n        self.recordings = recordings\n        self.recording_times = recording_times\n        self.shape = shape\n        self.plan = plan\n        self.chunk = chunk\n        self.time_lim = time_lim\n        self.dpi = dpi\n\n        ### get available compartments (from recordings) and recorded variables for each\n        ### compartment\n        (\n            self._compartment_list,\n            self._compartment_recordings_dict,\n        ) = self._get_compartment_recordings()\n\n        ### check plan keys and values\n        self._check_plan()\n\n        ### get start and end time for plotting and timestep\n        self._start_time, self._end_time, self._time_step = self._get_start_end_time()\n\n        ### get compbined time array for recordings of each compartment\n        self._time_arr_list = self._get_time_arr_list()\n\n        ### get data from recordings for each subplot\n        self._raw_data_list = self._get_raw_data_list()\n\n        ### create plot\n        self._plot()\n\n        ### print end message\n        print(\"Done\\n\")\n\n    def _get_compartment_recordings(self):\n        \"\"\"\n        Get available compartment names from recordings.\n        Get recorded variables (names) for each compartment.\n\n        Returns:\n            compartment_list (list):\n                List of compartment names.\n            compartment_recordings_dict (dict):\n                Dictionary with compartment names as keys and list of recorded variables\n                as values.\n        \"\"\"\n        ### check if chunk is valid\n        if self.chunk >= len(self.recordings) or self.chunk < 0:\n            print(\n                f\"\\nERROR PlotRecordings: chunk {self.chunk} is not valid.\\n\"\n                f\"Number of chunks: {len(self.recordings)}\\n\"\n            )\n            quit()\n\n        ### get compartment names and recorded variables for each compartment\n        compartment_list = []\n        compartment_recordings_dict = {}\n        for recordings_key in self.recordings[self.chunk].keys():\n            if \";\" not in recordings_key:\n                continue\n\n            ### get compartment\n            compartment, recorded_variable = recordings_key.split(\";\")\n            if compartment not in compartment_list:\n                compartment_list.append(compartment)\n                compartment_recordings_dict[compartment] = []\n\n            ### get recordings for compartment\n            if recorded_variable != \"period\" and recorded_variable != \"parameter_dict\":\n                compartment_recordings_dict[compartment].append(recorded_variable)\n\n        return compartment_list, compartment_recordings_dict\n\n    def _check_plan(self):\n        \"\"\"\n        Check if plan is valid.\n        \"\"\"\n\n        ### check if plan keys are valid\n        valid_keys = [\"position\", \"compartment\", \"variable\", \"format\"]\n        for key in self.plan.keys():\n            if key not in valid_keys:\n                print(\n                    f\"\\nERROR PlotRecordings: plan key {key} is not valid.\\n\"\n                    f\"Valid keys are {valid_keys}.\\n\"\n                )\n                quit()\n\n        ### check if plan values are valid (have same length)\n        for key in self.plan.keys():\n            if len(self.plan[key]) != len(self.plan[\"position\"]):\n                print(\n                    f\"\\nERROR PlotRecordings: plan value of key '{key}' has not the same length as plan value of key 'position'.\\n\"\n                )\n                quit()\n\n        ### check if plan positions are valid\n        ### check if min and max are valid\n        if get_minimum(self.plan[\"position\"]) < 1:\n            print(\n                f\"\\nERROR PlotRecordings: plan position has to be >= 1.\\n\"\n                f\"plan position: {self.plan['position']}\\n\"\n            )\n            quit()\n        if get_maximum(self.plan[\"position\"]) > self.shape[0] * self.shape[1]:\n            print(\n                f\"\\nERROR PlotRecordings: plan position has to be <= shape[0] * shape[1].\\n\"\n                f\"plan position: {self.plan['position']}\\n\"\n                f\"shape: {self.shape}\\n\"\n            )\n            quit()\n        ### check if plan positions are unique\n        if len(np.unique(self.plan[\"position\"])) != len(self.plan[\"position\"]):\n            print(\n                f\"\\nERROR PlotRecordings: plan position has to be unique.\\n\"\n                f\"plan position: {self.plan['position']}\\n\"\n            )\n            quit()\n\n        ### check if plan compartments are valid\n        for compartment in self.plan[\"compartment\"]:\n            if compartment not in self._compartment_list:\n                print(\n                    f\"\\nERROR PlotRecordings: plan compartment {compartment} is not valid.\\n\"\n                    f\"Valid compartments are {self._compartment_list}.\\n\"\n                )\n                quit()\n\n        ### check if plan variables are valid\n        for plot_idx in range(len(self.plan[\"variable\"])):\n            compartment = self.plan[\"compartment\"][plot_idx]\n            variable: str = self.plan[\"variable\"][plot_idx]\n            ### check if variable contains a mathematical expression\n            if \"+\" in variable or \"-\" in variable or \"*\" in variable or \"/\" in variable:\n                ### separate variables\n                variable = variable.replace(\" \", \"\")\n                variable = variable.replace(\"+\", \" \")\n                variable = variable.replace(\"-\", \" \")\n                variable = variable.replace(\"*\", \" \")\n                variable = variable.replace(\"/\", \" \")\n                variables_list = variable.split(\" \")\n                ### remove numbers\n                variables_list = [var for var in variables_list if not var.isdigit()]\n                ### spike and axon_spike are not allowed in equations\n                if \"spike\" in variables_list or \"axon_spike\" in variables_list:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan variable {variable} is not valid.\\n\"\n                        f\"Variables 'spike' and 'axon_spike' are not allowed in equations.\\n\"\n                    )\n                    quit()\n            else:\n                variables_list = [variable]\n            ### check if variables are valid\n            for var in variables_list:\n                if var not in self._compartment_recordings_dict[compartment]:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan variable {var} is not valid for compartment {compartment}.\\n\"\n                        f\"Valid variables are {self._compartment_recordings_dict[compartment]}.\\n\"\n                    )\n                    quit()\n\n        ### check if plan formats are valid\n        valid_formats_spike = [\"raster\", \"mean\", \"hybrid\", \"interspike\", \"cv\"]\n        valid_formats_other = [\"line\", \"line_mean\", \"matrix\", \"matrix_mean\"]\n        for plot_idx in range(len(self.plan[\"format\"])):\n            variable = self.plan[\"variable\"][plot_idx]\n            format = self.plan[\"format\"][plot_idx]\n            ### check if format is valid\n            if variable == \"spike\" or variable == \"axon_spike\":\n                if format not in valid_formats_spike:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan format {format} is not valid for variable {variable}.\\n\"\n                        f\"Valid formats are {valid_formats_spike}.\\n\"\n                    )\n                    quit()\n            else:\n                if format not in valid_formats_other:\n                    print(\n                        f\"\\nERROR PlotRecordings: plan format {format} is not valid for variable {variable}.\\n\"\n                        f\"Valid formats are {valid_formats_other}.\\n\"\n                    )\n                    quit()\n\n    def _get_start_end_time(self):\n        \"\"\"\n        Check if time_lim is given and valid. If it's not given get it from recordings.\n        Get timestep from recordings.\n\n        Returns:\n            start_time (float):\n                The start time of the recordings.\n            end_time (float):\n                The end time of the recordings.\n            time_step (float):\n                The timestep of the recordings.\n\n        Raises:\n            ValueError: If given time_lim is not within the chunk.\n        \"\"\"\n\n        chunk_time_lims = self.recording_times.time_lims(chunk=self.chunk)\n        ### check if time_lim is given\n        if isinstance(self.time_lim, type(None)):\n            ### get start and end time from recording_times\n            start_time, end_time = chunk_time_lims\n        else:\n            ### check if time_lim is within chunk\n            if (\n                self.time_lim[0] < chunk_time_lims[0]\n                or self.time_lim[1] > chunk_time_lims[1]\n            ):\n                raise ValueError(\n                    f\"\\nERROR PlotRecordings: time_lim {self.time_lim} is not within chunk.\\n\"\n                    f\"chunk time lims: {chunk_time_lims[0]} - {chunk_time_lims[1]}\\n\"\n                )\n            start_time, end_time = self.time_lim\n\n        ### get timestep\n        time_step = self.recordings[self.chunk][\"dt\"]\n\n        return start_time, end_time, time_step\n\n    def _get_time_arr_list(self):\n        \"\"\"\n        Get combined time array for each subplot of plan.\n\n        Returns:\n            time_arr_list (list):\n                List with time arrays for each subplot of plan.\n        \"\"\"\n        ### loop over compartments of plan\n        time_arr_dict = {}\n        for compartment in np.unique(self.plan[\"compartment\"]):\n            actual_period = self.recordings[self.chunk][f\"{compartment};period\"]\n\n            ### get time array for each recording period of the chunk\n            time_arr_period_list = []\n            nr_periods = self.recording_times._get_nr_periods(\n                chunk=self.chunk, compartment=compartment\n            )\n            for period in range(nr_periods):\n                time_lims = self.recording_times.time_lims(\n                    chunk=self.chunk, compartment=compartment, period=period\n                )\n                start_time_preiod = time_lims[0]\n                end_time_period = round(\n                    time_lims[1] + actual_period, get_number_of_decimals(actual_period)\n                )\n                time_arr_period_list.append(\n                    np.arange(start_time_preiod, end_time_period, actual_period)\n                )\n\n            ### combine time arrays of periods\n            time_arr_dict[compartment] = np.concatenate(time_arr_period_list)\n\n        ### get time array for each subplot of plan\n        time_arr_list = []\n        for plot_idx in range(len(self.plan[\"position\"])):\n            compartment = self.plan[\"compartment\"][plot_idx]\n            time_arr_list.append(time_arr_dict[compartment])\n\n        return time_arr_list\n\n    def _get_raw_data_list(self):\n        \"\"\"\n        Get raw data for each subplot of plan.\n\n        Returns:\n            data_list (dict):\n                List with data for each subplot of plan.\n        \"\"\"\n        data_list = []\n        ### loop over subplots of plan\n        for plot_idx in range(len(self.plan[\"position\"])):\n            compartment = self.plan[\"compartment\"][plot_idx]\n            variable: str = self.plan[\"variable\"][plot_idx]\n            ### check if variable is equation\n            if \"+\" in variable or \"-\" in variable or \"*\" in variable or \"/\" in variable:\n                ### get the values of the recorded variables of the compartment, store\n                ### them in dict\n                value_dict = {\n                    rec_var_name: self.recordings[self.chunk][\n                        f\"{compartment};{rec_var_name}\"\n                    ]\n                    for rec_var_name in self._compartment_recordings_dict[compartment]\n                }\n                ### evaluate equation with these values\n                variable_data = ef.evaluate_expression_with_dict(\n                    expression=variable, value_dict=value_dict\n                )\n            else:\n                ### get data from recordings\n                variable_data = self.recordings[self.chunk][f\"{compartment};{variable}\"]\n            ### append data to data_list\n            data_list.append(variable_data)\n\n        return data_list\n\n    def _plot(self):\n        \"\"\"\n        Create plot.\n        \"\"\"\n        ### create figure\n        plt.figure(figsize=([6.4 * self.shape[1], 4.8 * self.shape[0]]))\n\n        ### loop over subplots of plan\n        for plot_idx in range(len(self.plan[\"position\"])):\n            ### create subplot\n            plt.subplot(self.shape[0], self.shape[1], self.plan[\"position\"][plot_idx])\n\n            ### fill subplot\n            self._fill_subplot(plot_idx)\n\n        ### save figure\n        plt.tight_layout()\n        figname_parts = self.figname.split(\"/\")\n        if len(figname_parts) > 1:\n            save_dir = \"/\".join(figname_parts[:-1])\n            sf.create_dir(save_dir)\n        plt.savefig(self.figname, dpi=self.dpi)\n        plt.close()\n\n    def _fill_subplot(self, plot_idx):\n        \"\"\"\n        Fill subplot with data.\n\n        Args:\n            plot_idx (int):\n                The index of the subplot in the plan.\n        \"\"\"\n        variable: str = self.plan[\"variable\"][plot_idx]\n\n        ### general subplot settings\n        plt.xlabel(\"time [ms]\")\n        plt.xlim(self._start_time, self._end_time)\n\n        if variable == \"spike\" or variable == \"axon_spike\":\n            ### spike recordings\n            self._fill_subplot_spike(plot_idx)\n        else:\n            ### other (array) recordings\n            self._fill_subplot_other(plot_idx)\n\n    def _fill_subplot_spike(self, plot_idx):\n        \"\"\"\n        Fill subplot with spike data.\n\n        Args:\n            plot_idx (int):\n                The index of the subplot in the plan.\n        \"\"\"\n        ### get data\n        compartment = self.plan[\"compartment\"][plot_idx]\n        format: str = self.plan[\"format\"][plot_idx]\n        data = self._raw_data_list[plot_idx]\n\n        ### get spike times and ranks\n        spike_times, spike_ranks = my_raster_plot(data)\n        spike_times = spike_times * self._time_step\n\n        ### get spikes within time_lims\n        mask: np.ndarray = (\n            (spike_times >= self._start_time).astype(int)\n            * (spike_times <= self._end_time).astype(int)\n        ).astype(bool)\n\n        ### check if there are no spikes\n        if mask.size == 0:\n            ### set title\n            plt.title(f\"Spikes {compartment}\")\n            ### print warning\n            print(\n                f\"\\n  WARNING PlotRecordings: {compartment} does not contain any spikes in the given time interval.\"\n            )\n            ### plot text\n            plt.text(\n                0.5,\n                0.5,\n                f\"{compartment} does not contain any spikes.\",\n                va=\"center\",\n                ha=\"center\",\n            )\n            plt.xticks([])\n            plt.yticks([])\n            plt.xlim(0, 1)\n            plt.xlabel(\"\")\n            return\n\n        ### plot raster plot\n        if format == \"raster\" or format == \"hybrid\":\n            self._raster_plot(compartment, spike_ranks, spike_times, mask)\n\n        ### plot mean firing rate\n        if format == \"mean\" or format == \"hybrid\":\n            self._mean_firing_rate_plot(compartment, data, format)\n\n        ### plot interspike interval histogram\n        if format == \"interspike\":\n            self._interspike_interval_plot(compartment, data)\n\n        ### plot coefficient of variation histogram\n        if format == \"cv\":\n            self._coefficient_of_variation_plot(compartment, data)\n\n    def _raster_plot(self, compartment, spike_ranks, spike_times, mask):\n        \"\"\"\n        Plot raster plot.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            spike_ranks (array):\n                The spike ranks.\n            spike_times (array):\n                The spike times.\n            mask (array):\n                The mask for the spike times.\n        \"\"\"\n        ### set title\n        plt.title(f\"Spikes {compartment} ({spike_ranks.max() + 1})\")\n        ### check if there is only one neuron\n        if spike_ranks.max() == 0:\n            marker, size = [\"|\", 3000]\n        else:\n            marker, size = [\".\", 3]\n        ### plot spikes\n        plt.scatter(\n            spike_times[mask],\n            spike_ranks[mask],\n            color=\"k\",\n            marker=marker,\n            s=size,\n            linewidth=0.1,\n        )\n        ### set limits\n        plt.ylim(-0.5, spike_ranks.max() + 0.5)\n        ### set ylabel\n        plt.ylabel(\"# neurons\")\n        ### set yticks\n        if spike_ranks.max() == 0:\n            plt.yticks([0])\n        else:\n            plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))\n\n    def _mean_firing_rate_plot(self, compartment, data, format):\n        \"\"\"\n        Plot mean firing rate.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            data (array):\n                The spike data.\n            format (str):\n                The format of the plot.\n        \"\"\"\n        ### set title\n        plt.title(f\"Activity {compartment} ({len(data)})\")\n        ### set axis\n        ax = plt.gca()\n        color = \"k\"\n        ### for hybrid format plot mean firing rate in second y-axis\n        if format == \"hybrid\":\n            ax = plt.gca().twinx()\n            color = \"r\"\n        ### get mean firing rate\n        time_arr, firing_rate = get_pop_rate(\n            spikes=data,\n            t_start=self._start_time,\n            t_end=self._end_time,\n            time_step=self._time_step,\n        )\n        ### plot mean firing rate\n        ax.plot(time_arr, firing_rate, color=color)\n        ### set limits\n        ax.set_xlim(self._start_time, self._end_time)\n        ### set ylabel\n        ax.set_ylabel(\"Mean firing rate [Hz]\", color=color)\n        ax.tick_params(axis=\"y\", colors=color)\n\n    def _interspike_interval_plot(self, compartment, data):\n        \"\"\"\n        Plot interspike interval histogram.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            data (dict):\n                The spike data.\n        \"\"\"\n        ### set title\n        plt.title(f\"Interspike interval histogram {compartment} ({len(data)})\")\n        ### get interspike intervals\n        interspike_intervals_list = inter_spike_interval(spikes=data)\n        ### plot histogram\n        plt.hist(\n            interspike_intervals_list,\n            bins=100,\n            range=(0, 200),\n            density=True,\n            color=\"k\",\n        )\n        ### set limits\n        plt.xlim(0, 200)\n        ### set ylabel\n        plt.ylabel(\"Probability\")\n        plt.xlabel(\"Interspike interval [ms]\")\n\n    def _coefficient_of_variation_plot(self, compartment, data):\n        \"\"\"\n        Plot coefficient of variation histogram.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            data (dict):\n                The spike data.\n        \"\"\"\n        ### set title\n        plt.title(f\"Coefficient of variation histogram {compartment} ({len(data)})\")\n        ### get coefficient of variation\n        coefficient_of_variation_dict = coefficient_of_variation(\n            spikes=data,\n            per_neuron=True,\n        )\n        coefficient_of_variation_list = list(coefficient_of_variation_dict.values())\n        ### plot histogram\n        plt.hist(\n            coefficient_of_variation_list,\n            bins=100,\n            range=(0, 2),\n            density=True,\n            color=\"k\",\n        )\n        ### set limits\n        plt.xlim(0, 2)\n        ### set ylabel\n        plt.ylabel(\"Probability\")\n        plt.xlabel(\"Coefficient of variation\")\n\n    def _fill_subplot_other(self, plot_idx):\n        \"\"\"\n        Fill subplot with array data.\n\n        Args:\n            plot_idx (int):\n                The index of the subplot in the plan.\n        \"\"\"\n        ### get data\n        compartment = self.plan[\"compartment\"][plot_idx]\n        variable: str = self.plan[\"variable\"][plot_idx]\n        format: str = self.plan[\"format\"][plot_idx]\n        data_arr = self._raw_data_list[plot_idx]\n        time_arr = self._time_arr_list[plot_idx]\n\n        ### get data within time_lims\n        mask: np.ndarray = (\n            (time_arr >= self._start_time).astype(int)\n            * (time_arr <= self._end_time).astype(int)\n        ).astype(bool)\n\n        ### fill gaps in time_arr and data_arr with nan\n        time_arr, data_arr = time_data_add_nan(\n            time_arr=time_arr[mask], data_arr=data_arr[mask], axis=0\n        )\n\n        ### plot line plot\n        if \"line\" in format:\n            self._line_plot(\n                compartment,\n                variable,\n                time_arr,\n                data_arr,\n                plot_idx,\n                mean=\"mean\" in format,\n            )\n\n        ### plot matrix plot\n        if \"matrix\" in format:\n            self._matrix_plot(\n                compartment,\n                variable,\n                time_arr,\n                data_arr,\n                plot_idx,\n                mean=\"mean\" in format,\n            )\n\n    def _line_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean):\n        \"\"\"\n        Plot line plot.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            variable (str):\n                The name of the variable.\n            time_arr (array):\n                The time array.\n            data_arr (array):\n                The data array.\n            plot_idx (int):\n                The index of the subplot in the plan.\n            mean (bool):\n                If True, plot the mean of the data. Population: average over neurons.\n                Projection: average over preneurons (results in one line for each\n                postneuron).\n        \"\"\"\n\n        ### set title\n        plt.title(f\"Variable {variable} of {compartment} ({data_arr.shape[1]})\")\n\n        ### Shape of data defines how to plot\n        ### 2D array where elements are no lists\n        ### = population data [time, neurons]\n        ### --> plot line for each neuron\n        if len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is not True:\n            ### mean -> average over neurons\n            if mean:\n                data_arr = np.mean(data_arr, 1, keepdims=True)\n            ### plot line for each neuron\n            for neuron in range(data_arr.shape[1]):\n                plt.plot(\n                    time_arr,\n                    data_arr[:, neuron],\n                    color=\"k\",\n                )\n\n        ### 2D array where elements are lists\n        ### = projection data [time, postneurons][preneurons]\n        ### 3D array\n        ### = projection data [time, postneurons, preneurons]\n        ### --> plot line for each preneuron postneuron pair\n        elif len(data_arr.shape) == 3 or (\n            len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is True\n        ):\n            ### plot line for each preneuron postneuron pair\n            for post_neuron in range(data_arr.shape[1]):\n                ### the post_neuron has a constant number of preneurons\n                ### --> create array with preneuron indices [time, preneurons]\n                post_neuron_data = np.array(data_arr[:, post_neuron])\n                ### mean -> average over preneurons\n                if mean:\n                    post_neuron_data = np.mean(post_neuron_data, 1, keepdims=True)\n                for pre_neuron in range(post_neuron_data.shape[1]):\n                    plt.plot(\n                        time_arr,\n                        post_neuron_data[:, pre_neuron],\n                        color=\"k\",\n                    )\n        else:\n            print(\n                f\"\\nERROR PlotRecordings: shape of data not supported, {compartment}, {variable} in plot {plot_idx}.\\n\"\n            )\n\n    def _matrix_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean):\n        \"\"\"\n        Plot matrix plot.\n\n        Args:\n            compartment (str):\n                The name of the compartment.\n            variable (str):\n                The name of the variable.\n            time_arr (array):\n                The time array.\n            data_arr (array):\n                The data array.\n            plot_idx (int):\n                The index of the subplot in the plan.\n            mean (bool):\n                If True, plot the mean of the data. Population: average over neurons.\n                Projection: average over preneurons (results in one line for each\n                postneuron).\n        \"\"\"\n        ### number of neurons i.e. postneurons\n        nr_neurons = data_arr.shape[1]\n\n        ### Shape of data defines how to plot\n        ### 2D array where elements are no lists\n        ### = population data [time, neurons]\n        ### --> plot matrix row for each neuron\n        ### mean -> average over neurons\n        if len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is not True:\n            ### mean -> average over neurons\n            if mean:\n                data_arr = np.mean(data_arr, 1, keepdims=True)\n\n        ### 2D array where elements are lists\n        ### = projection data [time, postneurons][preneurons]\n        ### 3D array\n        ### = projection data [time, postneurons, preneurons]\n        ### --> plot matrix row for each preneuron postneuron pair (has to reshape to 2D array [time, neuron pair])\n        ### mean -> average over preneurons\n        elif len(data_arr.shape) == 3 or (\n            len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is True\n        ):\n            array_2D_list = []\n            ### loop over postneurons\n            for post_neuron in range(data_arr.shape[1]):\n                ### the post_neuron has a constant number of preneurons\n                ### --> create array with preneuron indices [time, preneurons]\n                post_neuron_data = np.array(data_arr[:, post_neuron])\n                ### mean --> average over preneurons\n                if mean:\n                    post_neuron_data = np.mean(post_neuron_data, 1, keepdims=True)\n                ### append all preneurons arrays to array_2D_list\n                for pre_neuron in range(post_neuron_data.shape[1]):\n                    array_2D_list.append(post_neuron_data[:, pre_neuron])\n                ### append a None array to array_2D_list to separate postneurons\n                array_2D_list.append(np.empty(post_neuron_data.shape[0]) * np.nan)\n\n            ### convert array_2D_list to 2D array, not use last None array\n            data_arr = np.array(array_2D_list[:-1]).T\n\n        ### some other shape not supported\n        else:\n            print(\n                f\"\\nERROR PlotRecordings: shape of data not supported, {compartment}, {variable} in plot {plot_idx}.\\n\"\n            )\n\n        ### plot matrix row for each neuron or preneuron postneuron pair\n        plt.imshow(\n            data_arr.T,\n            aspect=\"auto\",\n            vmin=np.nanmin(data_arr),\n            vmax=np.nanmax(data_arr),\n            extent=[\n                time_arr.min()\n                - self.recordings[self.chunk][f\"{compartment};period\"] / 2,\n                time_arr.max()\n                + self.recordings[self.chunk][f\"{compartment};period\"] / 2,\n                data_arr.shape[1] - 0.5,\n                -0.5,\n            ],\n            cmap=\"viridis\",\n            interpolation=\"none\",\n        )\n        if data_arr.shape[1] == 1:\n            plt.yticks([0])\n        else:\n            ### all y ticks\n            y_tick_positions_all_arr = np.arange(data_arr.shape[1])\n            ### boolean array of valid y ticks\n            valid_y_ticks = np.logical_not(np.isnan(data_arr).any(axis=0))\n            ### get y tick labels\n            if False in valid_y_ticks:\n                ### there are nan entries\n                ### split at nan entries\n                y_tick_positions_split_list = np.array_split(\n                    y_tick_positions_all_arr, np.where(np.logical_not(valid_y_ticks))[0]\n                )\n                ### decrease by 1 after each nan entry\n                y_tick_positions_split_list = [\n                    y_tick_positions_split - idx_split\n                    for idx_split, y_tick_positions_split in enumerate(\n                        y_tick_positions_split_list\n                    )\n                ]\n                ### join split arrays\n                y_tick_labels_all_arr = np.concatenate(y_tick_positions_split_list)\n            else:\n                y_tick_labels_all_arr = y_tick_positions_all_arr\n\n            valid_y_ticks_selected_idx_arr = np.linspace(\n                0,\n                np.sum(valid_y_ticks),\n                num=min([10, np.sum(valid_y_ticks)]),\n                dtype=int,\n                endpoint=False,\n            )\n            valid_y_ticks_selected_arr = y_tick_positions_all_arr[valid_y_ticks][\n                valid_y_ticks_selected_idx_arr\n            ]\n            valid_y_ticks_labels_selected_arr = y_tick_labels_all_arr[valid_y_ticks][\n                valid_y_ticks_selected_idx_arr\n            ]\n\n            plt.yticks(valid_y_ticks_selected_arr, valid_y_ticks_labels_selected_arr)\n\n        ### set title\n        plt.title(\n            f\"Variable {variable} of {compartment} ({nr_neurons}) [{ef.sci(np.nanmin(data_arr))}, {ef.sci(np.nanmax(data_arr))}]\"\n        )\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.PlotRecordings.__init__","title":"__init__(figname, recordings, recording_times, shape, plan, chunk=0, time_lim=None, dpi=300)","text":"

Create and save the plot.

Parameters:

Name Type Description Default figname str

The name of the figure to be saved.

required recordings list

A recordings list obtained from CompNeuroMonitors.

required recording_times RecordingTimes

The RecordingTimes object containing the recording times obtained from CompNeuroMonitors.

required shape tuple

The shape of the figure. (number of rows, number of columns)

required plan dict

Defines which recordings are plotted in which subplot and how. The plan has to contain the following keys: \"position\", \"compartment\", \"variable\", \"format\". The values of the keys have to be lists of the same length. The values of the key \"position\" have to be integers between 1 and the number of subplots (defined by shape). The values of the key \"compartment\" have to be the names of the model compartments as strings. The values of the key \"variable\" have to be strings containing the names of the recorded variables or equations using the recorded variables. The values of the key \"format\" have to be strings defining how the recordings are plotted. The following formats are available for spike recordings: \"raster\", \"mean\", \"hybrid\", \"interspike\". The following formats are available for other recordings: \"line\", \"line_mean\", \"matrix\", \"matrix_mean\".

required chunk int

The chunk of the recordings to be plotted. Default: 0.

0 time_lim tuple

Defines the x-axis for all subplots. The tuple contains two numbers: start and end time in ms. The times have to be within the chunk. Default: None, i.e., the whole chunk is plotted.

None dpi int

The dpi of the saved figure. Default: 300.

300 Source code in CompNeuroPy/analysis_functions.py
@check_types()\ndef __init__(\n    self,\n    figname: str,\n    recordings: list[dict],\n    recording_times: RecordingTimes,\n    shape: tuple[int, int],\n    plan: dict,\n    chunk: int = 0,\n    time_lim: None | tuple[float, float] = None,\n    dpi: int = 300,\n) -> None:\n    \"\"\"\n    Create and save the plot.\n\n    Args:\n        figname (str):\n            The name of the figure to be saved.\n        recordings (list):\n            A recordings list obtained from CompNeuroMonitors.\n        recording_times (RecordingTimes):\n            The RecordingTimes object containing the recording times obtained from\n            CompNeuroMonitors.\n        shape (tuple):\n            The shape of the figure. (number of rows, number of columns)\n        plan (dict):\n            Defines which recordings are plotted in which subplot and how. The plan\n            has to contain the following keys: \"position\", \"compartment\",\n            \"variable\", \"format\". The values of the keys have to be lists of the\n            same length. The values of the key \"position\" have to be integers\n            between 1 and the number of subplots (defined by shape). The values of\n            the key \"compartment\" have to be the names of the model compartments as\n            strings. The values of the key \"variable\" have to be strings containing\n            the names of the recorded variables or equations using the recorded\n            variables. The values of the key \"format\" have to be strings defining\n            how the recordings are plotted. The following formats are available for\n            spike recordings: \"raster\", \"mean\", \"hybrid\", \"interspike\". The\n            following formats are available for other recordings: \"line\",\n            \"line_mean\", \"matrix\", \"matrix_mean\".\n        chunk (int, optional):\n            The chunk of the recordings to be plotted. Default: 0.\n        time_lim (tuple, optional):\n            Defines the x-axis for all subplots. The tuple contains two\n            numbers: start and end time in ms. The times have to be\n            within the chunk. Default: None, i.e., the whole chunk is plotted.\n        dpi (int, optional):\n            The dpi of the saved figure. Default: 300.\n    \"\"\"\n    ### print start message\n    print(f\"Generate fig {figname}\", end=\"... \", flush=True)\n\n    ### set attributes\n    self.figname = figname\n    self.recordings = recordings\n    self.recording_times = recording_times\n    self.shape = shape\n    self.plan = plan\n    self.chunk = chunk\n    self.time_lim = time_lim\n    self.dpi = dpi\n\n    ### get available compartments (from recordings) and recorded variables for each\n    ### compartment\n    (\n        self._compartment_list,\n        self._compartment_recordings_dict,\n    ) = self._get_compartment_recordings()\n\n    ### check plan keys and values\n    self._check_plan()\n\n    ### get start and end time for plotting and timestep\n    self._start_time, self._end_time, self._time_step = self._get_start_end_time()\n\n    ### get compbined time array for recordings of each compartment\n    self._time_arr_list = self._get_time_arr_list()\n\n    ### get data from recordings for each subplot\n    self._raw_data_list = self._get_raw_data_list()\n\n    ### create plot\n    self._plot()\n\n    ### print end message\n    print(\"Done\\n\")\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.my_raster_plot","title":"my_raster_plot(spikes)","text":"

Returns two vectors representing for each recorded spike 1) the spike times and 2) the ranks of the neurons. The spike times are always in simulation steps (in contrast to default ANNarchy raster_plot).

Parameters:

Name Type Description Default spikes dict

ANNarchy spike dict of one population

required

Returns:

Name Type Description t array

spike times in simulation steps

n array

ranks of the neurons

Source code in CompNeuroPy/analysis_functions.py
def my_raster_plot(spikes: dict):\n    \"\"\"\n    Returns two vectors representing for each recorded spike 1) the spike times and 2)\n    the ranks of the neurons. The spike times are always in simulation steps (in\n    contrast to default ANNarchy raster_plot).\n\n    Args:\n        spikes (dict):\n            ANNarchy spike dict of one population\n\n    Returns:\n        t (array):\n            spike times in simulation steps\n        n (array):\n            ranks of the neurons\n    \"\"\"\n    t, n = raster_plot(spikes)\n    np.zeros(10)\n    t = np.round(t / dt(), 0).astype(int)\n    return t, n\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_nanmean","title":"get_nanmean(a, axis=None, dtype=None)","text":"

Same as np.nanmean but without printing warnings.

Parameters:

Name Type Description Default a array_like

Array containing numbers whose mean is desired. If a is not an array, a conversion is attempted.

required axis None or int or tuple of ints

Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.

.. numpy versionadded:: 1.7.0

If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before.

None dtype data - type

Type to use in computing the mean. For integer inputs, the default is float64; for floating point inputs, it is the same as the input dtype.

None

Returns:

Name Type Description m ndarray, see dtype parameter above

If out=None, returns a new array containing the mean values, otherwise a reference to the output array is returned.

Source code in CompNeuroPy/analysis_functions.py
def get_nanmean(a, axis=None, dtype=None):\n    \"\"\"\n    Same as np.nanmean but without printing warnings.\n\n    Args:\n        a (array_like):\n            Array containing numbers whose mean is desired. If `a` is not an\n            array, a conversion is attempted.\n        axis (None or int or tuple of ints, optional):\n            Axis or axes along which the means are computed. The default is to\n            compute the mean of the flattened array.\n\n            .. numpy versionadded:: 1.7.0\n\n            If this is a tuple of ints, a mean is performed over multiple axes,\n            instead of a single axis or all the axes as before.\n        dtype (data-type, optional):\n            Type to use in computing the mean.  For integer inputs, the default\n            is `float64`; for floating point inputs, it is the same as the\n            input dtype.\n\n    Returns:\n        m (ndarray, see dtype parameter above):\n            If `out=None`, returns a new array containing the mean values,\n            otherwise a reference to the output array is returned.\n    \"\"\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        ret = np.nanmean(a, axis=axis, dtype=dtype)\n    return ret\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_nanstd","title":"get_nanstd(a, axis=None, dtype=None)","text":"

Same as np.nanstd but without printing warnings.

Parameters:

Name Type Description Default a array_like

Calculate the standard deviation of these values.

required axis None or int or tuple of ints

Axis or axes along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array.

.. numpy versionadded:: 1.7.0

If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single axis or all the axes as before.

None dtype dtype

Type to use in computing the standard deviation. For arrays of integer type the default is float64, for arrays of float types it is the same as the array type.

None

Returns:

Name Type Description standard_deviation ndarray, see dtype parameter above

If out is None, return a new array containing the standard deviation, otherwise return a reference to the output array.

Source code in CompNeuroPy/analysis_functions.py
def get_nanstd(a, axis=None, dtype=None):\n    \"\"\"\n    Same as np.nanstd but without printing warnings.\n\n    Args:\n        a (array_like):\n            Calculate the standard deviation of these values.\n        axis (None or int or tuple of ints, optional):\n            Axis or axes along which the standard deviation is computed. The\n            default is to compute the standard deviation of the flattened array.\n\n            .. numpy versionadded:: 1.7.0\n\n            If this is a tuple of ints, a standard deviation is performed over\n            multiple axes, instead of a single axis or all the axes as before.\n        dtype (dtype, optional):\n            Type to use in computing the standard deviation. For arrays of\n            integer type the default is float64, for arrays of float types it is\n            the same as the array type.\n\n    Returns:\n        standard_deviation (ndarray, see dtype parameter above):\n            If `out` is None, return a new array containing the standard deviation,\n            otherwise return a reference to the output array.\n    \"\"\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        ret = np.nanstd(a, axis=axis, dtype=dtype)\n    return ret\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_population_power_spectrum","title":"get_population_power_spectrum(spikes, time_step, t_start=None, t_end=None, fft_size=None)","text":"

Generates power spectrum of population spikes, returns frequency_arr and power_spectrum_arr. Using the Welch methode from: Welch, P. (1967). The use of fast Fourier transform for the estimation of power spectra: a method based on time averaging over short, modified periodograms. IEEE Transactions on audio and electroacoustics, 15(2), 70-73.

The spike arrays are splitted into multiple arrays and then multiple FFTs are performed and the results are averaged.

Size of splitted signals and the time step of the simulation determine the frequency resolution and the maximum frequency: maximum frequency [Hz] = 500 / time_step frequency resolution [Hz] = 1000 / (time_step * fftSize)

Parameters:

Name Type Description Default spikes dicitonary

ANNarchy spike dict of one population

required time_step float

time step of the simulation in ms

required t_start float or int

start time of analyzed data in ms. Default: time of first spike

None t_end float or int

end time of analyzed data in ms. Default: time of last spike

None fft_size int

signal size for the FFT (size of splitted arrays) has to be a power of 2. Default: maximum

None

Returns:

Name Type Description frequency_arr array

array with frequencies

spectrum array

array with power spectrum

Source code in CompNeuroPy/analysis_functions.py
def get_population_power_spectrum(\n    spikes,\n    time_step,\n    t_start=None,\n    t_end=None,\n    fft_size=None,\n):\n    \"\"\"\n    Generates power spectrum of population spikes, returns frequency_arr and\n    power_spectrum_arr. Using the Welch methode from: Welch, P. (1967). The use of fast\n    Fourier transform for the estimation of power spectra: a method based on time\n    averaging over short, modified periodograms. IEEE Transactions on audio and\n    electroacoustics, 15(2), 70-73.\n\n    The spike arrays are splitted into multiple arrays and then multiple FFTs are\n    performed and the results are averaged.\n\n    Size of splitted signals and the time step of the simulation determine the frequency\n    resolution and the maximum frequency:\n        maximum frequency [Hz] = 500 / time_step\n        frequency resolution [Hz] = 1000 / (time_step * fftSize)\n\n    Args:\n        spikes (dicitonary):\n            ANNarchy spike dict of one population\n        time_step (float):\n            time step of the simulation in ms\n        t_start (float or int, optional):\n            start time of analyzed data in ms. Default: time of first spike\n        t_end (float or int, optional):\n            end time of analyzed data in ms. Default: time of last spike\n        fft_size (int, optional):\n            signal size for the FFT (size of splitted arrays)\n            has to be a power of 2. Default: maximum\n\n    Returns:\n        frequency_arr (array):\n            array with frequencies\n        spectrum (array):\n            array with power spectrum\n    \"\"\"\n\n    def ms_to_s(x):\n        return x / 1000\n\n    ### get population_size / sampling_frequency\n    populations_size = len(list(spikes.keys()))\n    sampling_frequency = 1 / ms_to_s(time_step)  # in Hz\n\n    ### check if there are spikes in data\n    t, _ = my_raster_plot(spikes)\n    if len(t) < 2:\n        ### there are no 2 spikes\n        print(\"WARNING: get_population_power_spectrum: <2 spikes!\")\n        ### --> return None or zeros\n        if fft_size == None:\n            print(\n                \"ERROR: get_population_power_spectrum: <2 spikes and no fft_size given!\"\n            )\n            quit()\n        else:\n            frequency_arr = np.fft.fftfreq(fft_size, 1.0 / sampling_frequency)\n            frequency_arr_ret = frequency_arr[2 : int(fft_size / 2)]\n            spectrum_ret = np.zeros(frequency_arr_ret.shape)\n            return [frequency_arr_ret, spectrum_ret]\n\n    ### check if t_start / t_end are None\n    if t_start == None:\n        t_start = round(t.min() * time_step, get_number_of_decimals(time_step))\n    if t_end == None:\n        t_end = round(t.max() * time_step, get_number_of_decimals(time_step))\n\n    ### calculate time\n    simulation_time = round(t_end - t_start, get_number_of_decimals(time_step))  # in ms\n\n    ### get fft_size\n    ### if None --> as large as possible\n    if fft_size is None:\n        pow = 1\n        while (2 ** (pow + 1)) / sampling_frequency < ms_to_s(simulation_time):\n            pow = pow + 1\n        fft_size = 2**pow\n\n    if ms_to_s(simulation_time) < (fft_size / sampling_frequency):\n        ### catch a too large fft_size\n        print(\n            f\"Too large fft_size {fft_size} for duration {simulation_time} ms. FFT_size has to be smaller than {int(ms_to_s(simulation_time)*sampling_frequency)}!\"\n        )\n        return [np.zeros(int(fft_size / 2 - 2)), np.zeros(int(fft_size / 2 - 2))]\n    elif (np.log2(fft_size) - int(np.log2(fft_size))) != 0:\n        ### catch fft_size if its not power of 2\n        print(\"FFT_size hast to be power of 2!\")\n        return [np.zeros(int(fft_size / 2 - 2)), np.zeros(int(fft_size / 2 - 2))]\n    else:\n        print(\n            f\"power sepctrum, min = {1000 / (time_step * fft_size)}, max = {500 / time_step}\"\n        )\n        ### calculate frequency powers\n        spectrum = np.zeros((populations_size, fft_size))\n        for neuron in range(populations_size):\n            ### sampling steps array\n            spiketrain = np.zeros(\n                int(np.round(ms_to_s(simulation_time) * sampling_frequency))\n            )\n            ### spike times as sampling steps\n            idx = (\n                np.round(\n                    ms_to_s((np.array(spikes[neuron]) * time_step)) * sampling_frequency\n                )\n            ).astype(np.int32)\n            ### cut the spikes before t_start and after t_end\n            idx_start = ms_to_s(t_start) * sampling_frequency\n            idx_end = ms_to_s(t_end) * sampling_frequency\n            mask = ((idx > idx_start).astype(int) * (idx < idx_end).astype(int)).astype(\n                bool\n            )\n            idx = (idx[mask] - idx_start).astype(np.int32)\n\n            ### set spiketrain array to one if there was a spike at sampling step\n            spiketrain[idx] = 1\n\n            ### generate multiple overlapping sequences out of the spike trains\n            spiketrain_sequences = _hanning_split_overlap(\n                spiketrain, fft_size, int(fft_size / 2)\n            )\n\n            ### generate power spectrum\n            spectrum[neuron] = get_nanmean(\n                np.abs(np.fft.fft(spiketrain_sequences)) ** 2, 0\n            )\n\n        ### mean spectrum over all neurons\n        spectrum = get_nanmean(spectrum, 0)\n\n        frequency_arr = np.fft.fftfreq(fft_size, 1.0 / sampling_frequency)\n\n        return (frequency_arr[2 : int(fft_size / 2)], spectrum[2 : int(fft_size / 2)])\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_power_spektrum_from_time_array","title":"get_power_spektrum_from_time_array(arr, presimulationTime, simulationTime, simulation_dt, samplingfrequency=250, fftSize=1024)","text":"

Generates power spectrum of time signal (returns frequencies_arr and power_arr). Using the Welch methode (Welch,1967).

amplingfrequency: to sample the arr, in Hz --> max frequency = samplingfrequency / 2 fftSize: signal size for FFT, duration (in s) = fftSize / samplingfrequency --> frequency resolution = samplingfrequency / fftSize

Parameters:

Name Type Description Default arr array

time array, value for each timestep

required presimulationTime float or int

simulation time which will not be analyzed

required simulationTime float or int

analyzed simulation time

required simulation_dt float or int

simulation timestep

required samplingfrequency float or int

sampling frequency for sampling the time array. Default: 250

250 fftSize int

signal size for the FFT (size of splitted arrays) has to be a power of 2. Default: 1024

1024

Returns:

Name Type Description frequency_arr array

array with frequencies

spectrum array

array with power spectrum

Source code in CompNeuroPy/analysis_functions.py
def get_power_spektrum_from_time_array(\n    arr,\n    presimulationTime,\n    simulationTime,\n    simulation_dt,\n    samplingfrequency=250,\n    fftSize=1024,\n):\n    \"\"\"\n    Generates power spectrum of time signal (returns frequencies_arr and power_arr).\n    Using the Welch methode (Welch,1967).\n\n    amplingfrequency: to sample the arr, in Hz --> max frequency = samplingfrequency / 2\n    fftSize: signal size for FFT, duration (in s) = fftSize / samplingfrequency\n    --> frequency resolution = samplingfrequency / fftSize\n\n    Args:\n        arr (array):\n            time array, value for each timestep\n        presimulationTime (float or int):\n            simulation time which will not be analyzed\n        simulationTime (float or int):\n            analyzed simulation time\n        simulation_dt (float or int):\n            simulation timestep\n        samplingfrequency (float or int, optional):\n            sampling frequency for sampling the time array. Default: 250\n        fftSize (int, optional):\n            signal size for the FFT (size of splitted arrays)\n            has to be a power of 2. Default: 1024\n\n    Returns:\n        frequency_arr (array):\n            array with frequencies\n        spectrum (array):\n            array with power spectrum\n    \"\"\"\n\n    if (simulationTime / 1000) < (fftSize / samplingfrequency):\n        print(\"Simulation time has to be >=\", fftSize / samplingfrequency, \"s for FFT!\")\n        return [np.zeros(int(fftSize / 2 - 2)), np.zeros(int(fftSize / 2 - 2))]\n    else:\n        ### sampling steps array\n        sampling_arr = arr[0 :: int((1 / samplingfrequency) * 1000 / simulation_dt)]\n\n        ### generate multiple overlapping sequences\n        sampling_arr_sequences = _hanning_split_overlap(\n            sampling_arr, fftSize, int(fftSize / 2)\n        )\n\n        ### generate power spectrum\n        spektrum = get_nanmean(np.abs(np.fft.fft(sampling_arr_sequences)) ** 2, 0)\n\n        frequenzen = np.fft.fftfreq(fftSize, 1.0 / samplingfrequency)\n\n        return (frequenzen[2 : int(fftSize / 2)], spektrum[2 : int(fftSize / 2)])\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_pop_rate","title":"get_pop_rate(spikes, t_start=None, t_end=None, time_step=1, t_smooth_ms=-1)","text":"

Generates a smoothed population firing rate. Returns a time array and a firing rate array.

Parameters:

Name Type Description Default spikes dictionary

ANNarchy spike dict of one population

required t_start float or int

start time of analyzed data in ms. Default: time of first spike

None t_end float or int

end time of analyzed data in ms. Default: time of last spike

None time_step float or int

time step of the simulation in ms. Default: 1

1 t_smooth_ms float or int

time window for firing rate calculation in ms, if -1 --> time window sizes are automatically detected. Default: -1

-1

Returns:

Name Type Description time_arr array

array with time steps in ms

rate array

array with population rate in Hz for each time step

Source code in CompNeuroPy/analysis_functions.py
def get_pop_rate(\n    spikes: dict,\n    t_start: float | int | None = None,\n    t_end: float | int | None = None,\n    time_step: float | int = 1,\n    t_smooth_ms: float | int = -1,\n):\n    \"\"\"\n    Generates a smoothed population firing rate. Returns a time array and a firing rate\n    array.\n\n    Args:\n        spikes (dictionary):\n            ANNarchy spike dict of one population\n        t_start (float or int, optional):\n            start time of analyzed data in ms. Default: time of first spike\n        t_end (float or int, optional):\n            end time of analyzed data in ms. Default: time of last spike\n        time_step (float or int, optional):\n            time step of the simulation in ms. Default: 1\n        t_smooth_ms (float or int, optional):\n            time window for firing rate calculation in ms, if -1 --> time window sizes\n            are automatically detected. Default: -1\n\n    Returns:\n        time_arr (array):\n            array with time steps in ms\n        rate (array):\n            array with population rate in Hz for each time step\n    \"\"\"\n    dt = time_step\n\n    t, _ = my_raster_plot(spikes)\n\n    ### check if there are spikes in population at all\n    if len(t) > 1:\n        if t_start == None:\n            t_start = round(t.min() * time_step, get_number_of_decimals(time_step))\n        if t_end == None:\n            t_end = round(t.max() * time_step, get_number_of_decimals(time_step))\n\n        duration = round(t_end - t_start, get_number_of_decimals(time_step))\n\n        ### if t_smooth is given --> use classic time_window method\n        if t_smooth_ms > 0:\n            return _get_pop_rate_old(\n                spikes, duration, dt=dt, t_start=t_start, t_smooth_ms=t_smooth_ms\n            )\n        else:\n            ### concatenate all spike times and sort them\n            spike_arr = dt * np.sort(\n                np.concatenate(\n                    [np.array(spikes[neuron]).astype(int) for neuron in spikes.keys()]\n                )\n            )\n            nr_neurons = len(list(spikes.keys()))\n            nr_spikes = spike_arr.size\n\n            ### use _recursive_rate to get firing rate\n            ### spike array is splitted in time bins\n            ### time bins widths are automatically found\n            time_population_rate, population_rate = _recursive_rate(\n                spike_arr / 1000.0,\n                t0=t_start / 1000.0,\n                t1=(t_start + duration) / 1000.0,\n                duration_init=duration / 1000.0,\n                nr_neurons=nr_neurons,\n                nr_spikes=nr_spikes,\n            )\n            ### time_population_rate was returned in s --> transform it into ms\n            time_population_rate = time_population_rate * 1000\n            time_arr0 = np.arange(t_start, t_start + duration, dt)\n            if len(time_population_rate) > 1:\n                ### interpolate\n                interpolate_func = interp1d(\n                    time_population_rate,\n                    population_rate,\n                    kind=\"linear\",\n                    bounds_error=False,\n                    fill_value=(population_rate[0], population_rate[-1]),\n                )\n                population_rate_arr = interpolate_func(time_arr0)\n            else:\n                population_rate_arr = np.zeros(len(time_arr0))\n                mask = time_arr0 == time_population_rate[0]\n                population_rate_arr[mask] = population_rate[0]\n\n            ret = population_rate_arr\n    else:\n        if t_start == None or t_end == None:\n            return None\n        else:\n            duration = t_end - t_start\n            ret = np.zeros(int(duration / dt))\n\n    return (np.arange(t_start, t_start + duration, dt), ret)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.plot_recordings","title":"plot_recordings(figname, recordings, recording_times, chunk, shape, plan, time_lim=None, dpi=300)","text":"

Plots the recordings of a single chunk from recordings. Plotted variables are specified in plan.

Parameters:

Name Type Description Default figname str

path + name of figure (e.g. \"figures/my_figure.png\")

required recordings list

a recordings list from CompNeuroPy obtained with the function get_recordings() from a CompNeuroMonitors object.

required recording_times object

recording_times object from CompNeuroPy obtained with the function get_recording_times() from a CompNeuroMonitors object.

required chunk int

which chunk of recordings should be used (the index of chunk)

required shape tuple

Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns

required plan list of strings

Defines which recordings are plotted in which subplot and how. Entries of the list have the structure: \"subplot_nr;model_component_name;variable_to_plot;format\", e.g. \"1,my_pop1;v;line\". mode: defines how the data is plotted, available modes: - for spike data: raster, mean, hybrid - for other data: line, mean, matrix - only for projection data: matrix_mean

required time_lim tuple

Defines the x-axis for all subplots. The list contains two numbers: start and end time in ms. The times have to be within the chunk. Default: None, i.e., time lims of chunk

None dpi int

The dpi of the saved figure. Default: 300

300 Source code in CompNeuroPy/analysis_functions.py
@check_types()\ndef plot_recordings(\n    figname: str,\n    recordings: list,\n    recording_times: RecordingTimes,\n    chunk: int,\n    shape: tuple,\n    plan: list[str],\n    time_lim: None | tuple = None,\n    dpi: int = 300,\n):\n    \"\"\"\n    Plots the recordings of a single chunk from recordings. Plotted variables are\n    specified in plan.\n\n    Args:\n        figname (str):\n            path + name of figure (e.g. \"figures/my_figure.png\")\n        recordings (list):\n            a recordings list from CompNeuroPy obtained with the function\n            get_recordings() from a CompNeuroMonitors object.\n        recording_times (object):\n            recording_times object from CompNeuroPy obtained with the\n            function get_recording_times() from a CompNeuroMonitors object.\n        chunk (int):\n            which chunk of recordings should be used (the index of chunk)\n        shape (tuple):\n            Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns\n        plan (list of strings):\n            Defines which recordings are plotted in which subplot and how.\n            Entries of the list have the structure:\n                \"subplot_nr;model_component_name;variable_to_plot;format\",\n                e.g. \"1,my_pop1;v;line\".\n                mode: defines how the data is plotted, available modes:\n                    - for spike data: raster, mean, hybrid\n                    - for other data: line, mean, matrix\n                    - only for projection data: matrix_mean\n        time_lim (tuple, optional):\n            Defines the x-axis for all subplots. The list contains two\n            numbers: start and end time in ms. The times have to be\n            within the chunk. Default: None, i.e., time lims of chunk\n        dpi (int, optional):\n            The dpi of the saved figure. Default: 300\n    \"\"\"\n    proc = Process(\n        target=_plot_recordings,\n        args=(figname, recordings, recording_times, chunk, shape, plan, time_lim, dpi),\n    )\n    proc.start()\n    proc.join()\n    if proc.exitcode != 0:\n        quit()\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_number_of_zero_decimals","title":"get_number_of_zero_decimals(nr)","text":"

For numbers which are smaller than zero get the number of digits after the decimal point which are zero (plus 1). For the number 0 or numbers >=1 return zero, e.g.:

Parameters:

Name Type Description Default nr float or int

the number from which the number of digits are obtained

required

Returns:

Name Type Description decimals int

number of digits after the decimal point which are zero (plus 1)

Examples:

>>> get_number_of_zero_decimals(0.12)\n1\n>>> get_number_of_zero_decimals(0.012)\n2\n>>> get_number_of_zero_decimals(1.012)\n0\n
Source code in CompNeuroPy/analysis_functions.py
def get_number_of_zero_decimals(nr):\n    \"\"\"\n    For numbers which are smaller than zero get the number of digits after the decimal\n    point which are zero (plus 1). For the number 0 or numbers >=1 return zero, e.g.:\n\n    Args:\n        nr (float or int):\n            the number from which the number of digits are obtained\n\n    Returns:\n        decimals (int):\n            number of digits after the decimal point which are zero (plus 1)\n\n    Examples:\n        >>> get_number_of_zero_decimals(0.12)\n        1\n        >>> get_number_of_zero_decimals(0.012)\n        2\n        >>> get_number_of_zero_decimals(1.012)\n        0\n    \"\"\"\n    decimals = 0\n    if nr != 0:\n        while abs(nr) < 1:\n            nr = nr * 10\n            decimals = decimals + 1\n\n    return decimals\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_number_of_decimals","title":"get_number_of_decimals(nr)","text":"

Get number of digits after the decimal point.

Parameters:

Name Type Description Default nr float or int

the number from which the number of digits are obtained

required

Returns:

Name Type Description decimals int

number of digits after the decimal point

Examples:

>>> get_number_of_decimals(5)\n0\n>>> get_number_of_decimals(5.1)\n1\n>>> get_number_of_decimals(0.0101)\n4\n
Source code in CompNeuroPy/analysis_functions.py
def get_number_of_decimals(nr):\n    \"\"\"\n    Get number of digits after the decimal point.\n\n    Args:\n        nr (float or int):\n            the number from which the number of digits are obtained\n\n    Returns:\n        decimals (int):\n            number of digits after the decimal point\n\n    Examples:\n        >>> get_number_of_decimals(5)\n        0\n        >>> get_number_of_decimals(5.1)\n        1\n        >>> get_number_of_decimals(0.0101)\n        4\n    \"\"\"\n\n    if nr != int(nr):\n        decimals = len(str(nr).split(\".\")[1])\n    else:\n        decimals = 0\n\n    return decimals\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.sample_data_with_timestep","title":"sample_data_with_timestep(time_arr, data_arr, timestep)","text":"

Samples a data array each timestep using interpolation

Parameters:

Name Type Description Default time_arr array

times of data_arr in ms

required data_arr array

array with data values from which will be sampled

required timestep float or int

timestep in ms for sampling

required

Returns:

Name Type Description time_arr array

sampled time array

data_arr array

sampled data array

Source code in CompNeuroPy/analysis_functions.py
def sample_data_with_timestep(time_arr, data_arr, timestep):\n    \"\"\"\n    Samples a data array each timestep using interpolation\n\n    Args:\n        time_arr (array):\n            times of data_arr in ms\n        data_arr (array):\n            array with data values from which will be sampled\n        timestep (float or int):\n            timestep in ms for sampling\n\n    Returns:\n        time_arr (array):\n            sampled time array\n        data_arr (array):\n            sampled data array\n    \"\"\"\n    interpolate_func = interp1d(\n        time_arr, data_arr, bounds_error=False, fill_value=\"extrapolate\"\n    )\n    min_time = round(\n        round(time_arr[0] / timestep, 0) * timestep,\n        get_number_of_decimals(timestep),\n    )\n    max_time = round(\n        round(time_arr[-1] / timestep, 0) * timestep,\n        get_number_of_decimals(timestep),\n    )\n    new_time_arr = np.arange(min_time, max_time + timestep, timestep)\n    new_data_arr = interpolate_func(new_time_arr)\n\n    return (new_time_arr, new_data_arr)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.time_data_add_nan","title":"time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0)","text":"

If there are gaps in time_arr --> fill them with respective time values. Fill the corresponding data_arr values with nan.

By default it is tried to fill the time array with continuously increasing times based on the smallest time difference found there can still be discontinuities after filling the arrays (because existing time values are not changed).

But one can also give a fixed fill time step.

Parameters:

Name Type Description Default time_arr 1D array

times of data_arr in ms

required data_arr nD array

the size of the specified dimension of data array must have the same length as time_arr

required fill_time_step number, optional, default=None

if there are gaps they are filled with this time step

None axis int

which dimension of the data_arr belongs to the time_arr

0

Returns:

Name Type Description time_arr 1D array

time array with gaps filled

data_arr nD array

data array with gaps filled

Source code in CompNeuroPy/analysis_functions.py
def time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0):\n    \"\"\"\n    If there are gaps in time_arr --> fill them with respective time values.\n    Fill the corresponding data_arr values with nan.\n\n    By default it is tried to fill the time array with continuously increasing times\n    based on the smallest time difference found there can still be discontinuities after\n    filling the arrays (because existing time values are not changed).\n\n    But one can also give a fixed fill time step.\n\n    Args:\n        time_arr (1D array):\n            times of data_arr in ms\n        data_arr (nD array):\n            the size of the specified dimension of data array must have the same length\n            as time_arr\n        fill_time_step (number, optional, default=None):\n            if there are gaps they are filled with this time step\n        axis (int):\n            which dimension of the data_arr belongs to the time_arr\n\n    Returns:\n        time_arr (1D array):\n            time array with gaps filled\n        data_arr (nD array):\n            data array with gaps filled\n    \"\"\"\n    time_arr = time_arr.astype(float)\n    data_arr = data_arr.astype(float)\n    data_arr_shape = data_arr.shape\n\n    if data_arr_shape[axis] != time_arr.size:\n        print(\n            \"ERROR time_data_add_nan: time_arr must have same length as specified axis (default=0) of data_arr!\"\n        )\n        quit()\n\n    ### find gaps\n    time_diff_arr = np.round(np.diff(time_arr), 6)\n    if isinstance(fill_time_step, type(None)):\n        time_diff_min = time_diff_arr.min()\n    else:\n        time_diff_min = fill_time_step\n    gaps_arr = time_diff_arr > time_diff_min\n\n    ### split arrays at gaps\n    time_arr_split = np.split(\n        time_arr, indices_or_sections=np.where(gaps_arr)[0] + 1, axis=0\n    )\n    data_arr_split = np.split(\n        data_arr, indices_or_sections=np.where(gaps_arr)[0] + 1, axis=axis\n    )\n\n    ### fill gaps between splits\n    data_arr_append_shape = list(data_arr_shape)\n    for split_arr_idx in range(len(time_arr_split) - 1):\n        ### get gaps boundaries\n        current_end = time_arr_split[split_arr_idx][-1]\n        next_start = time_arr_split[split_arr_idx + 1][0]\n        ### create gap filling arrays\n        time_arr_append = np.arange(\n            current_end + time_diff_min, next_start, time_diff_min\n        )\n        data_arr_append_shape[axis] = time_arr_append.size\n        data_arr_append = np.zeros(tuple(data_arr_append_shape)) * np.nan\n        ### append gap filling arrays to splitted arrays\n        time_arr_split[split_arr_idx] = np.append(\n            arr=time_arr_split[split_arr_idx],\n            values=time_arr_append,\n            axis=0,\n        )\n        data_arr_split[split_arr_idx] = np.append(\n            arr=data_arr_split[split_arr_idx],\n            values=data_arr_append,\n            axis=axis,\n        )\n\n    ### combine splitted arrays again\n    time_arr = np.concatenate(time_arr_split, axis=0)\n    data_arr = np.concatenate(data_arr_split, axis=axis)\n\n    return (time_arr, data_arr)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.rmse","title":"rmse(a, b)","text":"

Calculates the root-mean-square error between two arrays.

Parameters:

Name Type Description Default a array

first array

required b array

second array

required

Returns:

Name Type Description rmse float

root-mean-square error

Source code in CompNeuroPy/analysis_functions.py
def rmse(a, b):\n    \"\"\"\n    Calculates the root-mean-square error between two arrays.\n\n    Args:\n        a (array):\n            first array\n        b (array):\n            second array\n\n    Returns:\n        rmse (float):\n            root-mean-square error\n    \"\"\"\n\n    return np.sqrt(np.mean((a - b) ** 2))\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.rsse","title":"rsse(a, b)","text":"

Calculates the root-sum-square error between two arrays.

Parameters:

Name Type Description Default a array

first array

required b array

second array

required

Returns:

Name Type Description rsse float

root-sum-square error

Source code in CompNeuroPy/analysis_functions.py
def rsse(a, b):\n    \"\"\"\n    Calculates the root-sum-square error between two arrays.\n\n    Args:\n        a (array):\n            first array\n        b (array):\n            second array\n\n    Returns:\n        rsse (float):\n            root-sum-square error\n    \"\"\"\n\n    return np.sqrt(np.sum((a - b) ** 2))\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_minimum","title":"get_minimum(input_data)","text":"

Returns the minimum of the input data.

Parameters:

Name Type Description Default input_data list, np.ndarray, tuple, or float

The input data from which the minimum is to be obtained.

required

Returns:

Name Type Description minimum float

The minimum of the input data.

Source code in CompNeuroPy/analysis_functions.py
def get_minimum(input_data: list | np.ndarray | tuple | float):\n    \"\"\"\n    Returns the minimum of the input data.\n\n    Args:\n        input_data (list, np.ndarray, tuple, or float):\n            The input data from which the minimum is to be obtained.\n\n    Returns:\n        minimum (float):\n            The minimum of the input data.\n    \"\"\"\n    if isinstance(input_data, (list, np.ndarray, tuple)):\n        # If the input is a list, numpy array, or tuple, we handle them as follows\n        flattened_list = [\n            item\n            for sublist in input_data\n            for item in (\n                sublist if isinstance(sublist, (list, np.ndarray, tuple)) else [sublist]\n            )\n        ]\n        return float(min(flattened_list))\n    else:\n        # If the input is a single value, return it as the minimum\n        return float(input_data)\n
"},{"location":"additional/analysis_functions/#CompNeuroPy.analysis_functions.get_maximum","title":"get_maximum(input_data)","text":"

Returns the maximum of the input data.

Parameters:

Name Type Description Default input_data list, np.ndarray, tuple, or float

The input data from which the maximum is to be obtained.

required

Returns:

Name Type Description maximum float

The maximum of the input data.

Source code in CompNeuroPy/analysis_functions.py
def get_maximum(input_data: list | np.ndarray | tuple | float):\n    \"\"\"\n    Returns the maximum of the input data.\n\n    Args:\n        input_data (list, np.ndarray, tuple, or float):\n            The input data from which the maximum is to be obtained.\n\n    Returns:\n        maximum (float):\n            The maximum of the input data.\n    \"\"\"\n\n    if isinstance(input_data, (list, np.ndarray, tuple)):\n        # If the input is a list, numpy array, or tuple, we handle them as follows\n        flattened_list = [\n            item\n            for sublist in input_data\n            for item in (\n                sublist if isinstance(sublist, (list, np.ndarray, tuple)) else [sublist]\n            )\n        ]\n        return float(max(flattened_list))\n    else:\n        # If the input is a single value, return it as the maximum\n        return float(input_data)\n
"},{"location":"additional/extra_functions/","title":"Extra Functions","text":""},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap","title":"Cmap","text":"

Class to create a colormap with a given name and range. The colormap can be called with a value between 0 and 1 to get the corresponding rgb value.

Source code in CompNeuroPy/extra_functions.py
class Cmap:\n    \"\"\"\n    Class to create a colormap with a given name and range. The colormap can be called\n    with a value between 0 and 1 to get the corresponding rgb value.\n    \"\"\"\n\n    def __init__(self, cmap_name, vmin, vmax):\n        \"\"\"\n        Args:\n            cmap_name (str):\n                Name of the colormap\n            vmin (float):\n                Lower limit of the colormap\n            vmax (float):\n                Upper limit of the colormap\n        \"\"\"\n        self.cmap_name = cmap_name\n        self.cmap = plt.get_cmap(cmap_name)\n        self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n        self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)\n\n    def __call__(self, x, alpha=1):\n        \"\"\"\n        Returns the rgba value of the colormap at the given value.\n\n        Args:\n            x (float):\n                Value between 0 and 1\n            alpha (float):\n                Alpha value of the rgba value\n\n        Returns:\n            rgba (tuple):\n                RGBA value of the colormap at the given value\n        \"\"\"\n        vals = self.get_rgb(x)\n        if isinstance(vals, tuple):\n            vals = vals[:3] + (alpha,)\n        else:\n            vals[:, -1] = alpha\n        return vals\n\n    def get_rgb(self, val):\n        \"\"\"\n        Returns the rgb value of the colormap at the given value.\n\n        Args:\n            val (float):\n                Value between 0 and 1\n\n        Returns:\n            rgb (tuple):\n                RGB value of the colormap at the given value\n        \"\"\"\n        return self.scalarMap.to_rgba(val)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap.__init__","title":"__init__(cmap_name, vmin, vmax)","text":"

Parameters:

Name Type Description Default cmap_name str

Name of the colormap

required vmin float

Lower limit of the colormap

required vmax float

Upper limit of the colormap

required Source code in CompNeuroPy/extra_functions.py
def __init__(self, cmap_name, vmin, vmax):\n    \"\"\"\n    Args:\n        cmap_name (str):\n            Name of the colormap\n        vmin (float):\n            Lower limit of the colormap\n        vmax (float):\n            Upper limit of the colormap\n    \"\"\"\n    self.cmap_name = cmap_name\n    self.cmap = plt.get_cmap(cmap_name)\n    self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n    self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap.__call__","title":"__call__(x, alpha=1)","text":"

Returns the rgba value of the colormap at the given value.

Parameters:

Name Type Description Default x float

Value between 0 and 1

required alpha float

Alpha value of the rgba value

1

Returns:

Name Type Description rgba tuple

RGBA value of the colormap at the given value

Source code in CompNeuroPy/extra_functions.py
def __call__(self, x, alpha=1):\n    \"\"\"\n    Returns the rgba value of the colormap at the given value.\n\n    Args:\n        x (float):\n            Value between 0 and 1\n        alpha (float):\n            Alpha value of the rgba value\n\n    Returns:\n        rgba (tuple):\n            RGBA value of the colormap at the given value\n    \"\"\"\n    vals = self.get_rgb(x)\n    if isinstance(vals, tuple):\n        vals = vals[:3] + (alpha,)\n    else:\n        vals[:, -1] = alpha\n    return vals\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.Cmap.get_rgb","title":"get_rgb(val)","text":"

Returns the rgb value of the colormap at the given value.

Parameters:

Name Type Description Default val float

Value between 0 and 1

required

Returns:

Name Type Description rgb tuple

RGB value of the colormap at the given value

Source code in CompNeuroPy/extra_functions.py
def get_rgb(self, val):\n    \"\"\"\n    Returns the rgb value of the colormap at the given value.\n\n    Args:\n        val (float):\n            Value between 0 and 1\n\n    Returns:\n        rgb (tuple):\n            RGB value of the colormap at the given value\n    \"\"\"\n    return self.scalarMap.to_rgba(val)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree","title":"DecisionTree","text":"

Class to create a decision tree.

Source code in CompNeuroPy/extra_functions.py
class DecisionTree:\n    \"\"\"\n    Class to create a decision tree.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        Create a new empty decision tree.\n        \"\"\"\n        ### node list is a list of lists\n        ### first idx = level of tree\n        ### second idx = all nodes in the level\n        self.node_list = [[]]\n\n    def node(self, parent=None, prob=0, name=None):\n        \"\"\"\n        Create a new node in the decision tree.\n\n        Args:\n            parent (node object):\n                Parent node of the new node\n            prob (float):\n                Probability of the new node\n            name (str):\n                Name of the new node\n\n        Returns:\n            new_node (node object):\n                The new node\n        \"\"\"\n\n        ### create new node\n        new_node = DecisionTreeNode(tree=self, parent=parent, prob=prob, name=name)\n        ### add it to node_list\n        if len(self.node_list) == new_node.level:\n            self.node_list.append([])\n        self.node_list[new_node.level].append(new_node)\n        ### return the node object\n        return new_node\n\n    def get_path_prod(self, name):\n        \"\"\"\n        Get the path and path product of a node with a given name.\n\n        Args:\n            name (str):\n                Name of the node\n\n        Returns:\n            path (str):\n                Path to the node\n            path_prod (float):\n                Path product of the node\n        \"\"\"\n\n        ### search for all nodes with name\n        ### start from behind\n        search_node_list = []\n        path_list = []\n        path_prod_list = []\n        for level in range(len(self.node_list) - 1, -1, -1):\n            for node in self.node_list[level]:\n                if node.name == name:\n                    search_node_list.append(node)\n        ### get the paths and path products for the found nodes\n        for node in search_node_list:\n            path, path_prod = self._get_path_prod_rec(node)\n            path_list.append(path)\n            path_prod_list.append(path_prod)\n        ### return the paths and path products\n        return [\n            [path_list[idx], path_prod_list[idx]]\n            for idx in range(len(search_node_list))\n        ]\n\n    def _get_path_prod_rec(self, node):\n        \"\"\"\n        Recursive function to get the path and path product of a node.\n\n        Args:\n            node (node object):\n                Node to get the path and path product of\n\n        Returns:\n            path_str (str):\n                Path to the node\n            prob (float):\n                Path product of the node\n        \"\"\"\n        node: DecisionTreeNode = node\n\n        if node.parent == None:\n            return [\"/\" + node.name, node.prob]\n        else:\n            path_str, prob = self._get_path_prod_rec(node.parent)\n            return [path_str + \"/\" + node.name, prob * node.prob]\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree.__init__","title":"__init__()","text":"

Create a new empty decision tree.

Source code in CompNeuroPy/extra_functions.py
def __init__(self):\n    \"\"\"\n    Create a new empty decision tree.\n    \"\"\"\n    ### node list is a list of lists\n    ### first idx = level of tree\n    ### second idx = all nodes in the level\n    self.node_list = [[]]\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree.node","title":"node(parent=None, prob=0, name=None)","text":"

Create a new node in the decision tree.

Parameters:

Name Type Description Default parent node object

Parent node of the new node

None prob float

Probability of the new node

0 name str

Name of the new node

None

Returns:

Name Type Description new_node node object

The new node

Source code in CompNeuroPy/extra_functions.py
def node(self, parent=None, prob=0, name=None):\n    \"\"\"\n    Create a new node in the decision tree.\n\n    Args:\n        parent (node object):\n            Parent node of the new node\n        prob (float):\n            Probability of the new node\n        name (str):\n            Name of the new node\n\n    Returns:\n        new_node (node object):\n            The new node\n    \"\"\"\n\n    ### create new node\n    new_node = DecisionTreeNode(tree=self, parent=parent, prob=prob, name=name)\n    ### add it to node_list\n    if len(self.node_list) == new_node.level:\n        self.node_list.append([])\n    self.node_list[new_node.level].append(new_node)\n    ### return the node object\n    return new_node\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTree.get_path_prod","title":"get_path_prod(name)","text":"

Get the path and path product of a node with a given name.

Parameters:

Name Type Description Default name str

Name of the node

required

Returns:

Name Type Description path str

Path to the node

path_prod float

Path product of the node

Source code in CompNeuroPy/extra_functions.py
def get_path_prod(self, name):\n    \"\"\"\n    Get the path and path product of a node with a given name.\n\n    Args:\n        name (str):\n            Name of the node\n\n    Returns:\n        path (str):\n            Path to the node\n        path_prod (float):\n            Path product of the node\n    \"\"\"\n\n    ### search for all nodes with name\n    ### start from behind\n    search_node_list = []\n    path_list = []\n    path_prod_list = []\n    for level in range(len(self.node_list) - 1, -1, -1):\n        for node in self.node_list[level]:\n            if node.name == name:\n                search_node_list.append(node)\n    ### get the paths and path products for the found nodes\n    for node in search_node_list:\n        path, path_prod = self._get_path_prod_rec(node)\n        path_list.append(path)\n        path_prod_list.append(path_prod)\n    ### return the paths and path products\n    return [\n        [path_list[idx], path_prod_list[idx]]\n        for idx in range(len(search_node_list))\n    ]\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode","title":"DecisionTreeNode","text":"

Class to create a node in a decision tree.

Source code in CompNeuroPy/extra_functions.py
class DecisionTreeNode:\n    \"\"\"\n    Class to create a node in a decision tree.\n    \"\"\"\n\n    id_counter = 0\n\n    def __init__(self, tree: DecisionTree, parent=None, prob=0, name=\"\"):\n        \"\"\"\n        Create a new node in a decision tree.\n\n        Args:\n            tree (DecisionTree object):\n                Decision tree the node belongs to\n            parent (node object):\n                Parent node of the new node\n            prob (float):\n                Probability of the new node\n            name (str):\n                Name of the new node\n        \"\"\"\n        self.tree = tree\n        parent: DecisionTreeNode = parent\n        self.parent = parent\n        self.prob = prob\n        self.name = name\n        self.id = int(self.id_counter)\n        self.id_counter += 1\n        if parent != None:\n            self.level = int(parent.level + 1)\n        else:\n            self.level = int(0)\n\n    def add(self, name, prob):\n        \"\"\"\n        Add a child node to the node.\n\n        Args:\n            name (str):\n                Name of the new node\n            prob (float):\n                Probability of the new node\n\n        Returns:\n            new_node (node object):\n                The new node\n        \"\"\"\n\n        return self.tree.node(parent=self, prob=prob, name=name)\n\n    def get_path_prod(self):\n        \"\"\"\n        Get the path and path product of the node.\n\n        Returns:\n            path (str):\n                Path to the node\n            path_prod (float):\n                Path product of the node\n        \"\"\"\n        return self.tree._get_path_prod_rec(self)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode.__init__","title":"__init__(tree, parent=None, prob=0, name='')","text":"

Create a new node in a decision tree.

Parameters:

Name Type Description Default tree DecisionTree object

Decision tree the node belongs to

required parent node object

Parent node of the new node

None prob float

Probability of the new node

0 name str

Name of the new node

'' Source code in CompNeuroPy/extra_functions.py
def __init__(self, tree: DecisionTree, parent=None, prob=0, name=\"\"):\n    \"\"\"\n    Create a new node in a decision tree.\n\n    Args:\n        tree (DecisionTree object):\n            Decision tree the node belongs to\n        parent (node object):\n            Parent node of the new node\n        prob (float):\n            Probability of the new node\n        name (str):\n            Name of the new node\n    \"\"\"\n    self.tree = tree\n    parent: DecisionTreeNode = parent\n    self.parent = parent\n    self.prob = prob\n    self.name = name\n    self.id = int(self.id_counter)\n    self.id_counter += 1\n    if parent != None:\n        self.level = int(parent.level + 1)\n    else:\n        self.level = int(0)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode.add","title":"add(name, prob)","text":"

Add a child node to the node.

Parameters:

Name Type Description Default name str

Name of the new node

required prob float

Probability of the new node

required

Returns:

Name Type Description new_node node object

The new node

Source code in CompNeuroPy/extra_functions.py
def add(self, name, prob):\n    \"\"\"\n    Add a child node to the node.\n\n    Args:\n        name (str):\n            Name of the new node\n        prob (float):\n            Probability of the new node\n\n    Returns:\n        new_node (node object):\n            The new node\n    \"\"\"\n\n    return self.tree.node(parent=self, prob=prob, name=name)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.DecisionTreeNode.get_path_prod","title":"get_path_prod()","text":"

Get the path and path product of the node.

Returns:

Name Type Description path str

Path to the node

path_prod float

Path product of the node

Source code in CompNeuroPy/extra_functions.py
def get_path_prod(self):\n    \"\"\"\n    Get the path and path product of the node.\n\n    Returns:\n        path (str):\n            Path to the node\n        path_prod (float):\n            Path product of the node\n    \"\"\"\n    return self.tree._get_path_prod_rec(self)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.print_df","title":"print_df(df)","text":"

Prints the complete dataframe df

Parameters:

Name Type Description Default df pandas dataframe

Dataframe to be printed

required Source code in CompNeuroPy/extra_functions.py
def print_df(df):\n    \"\"\"\n    Prints the complete dataframe df\n\n    Args:\n        df (pandas dataframe):\n            Dataframe to be printed\n    \"\"\"\n    with pd.option_context(\n        \"display.max_rows\", None\n    ):  # more options can be specified also\n        print(df)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.flatten_list","title":"flatten_list(lst)","text":"

Retuns flattened list

Parameters:

Name Type Description Default lst list of lists or mixed

values and lists): List to be flattened

required

Returns:

Name Type Description new_list list

Flattened list

Source code in CompNeuroPy/extra_functions.py
def flatten_list(lst):\n    \"\"\"\n    Retuns flattened list\n\n    Args:\n        lst (list of lists or mixed: values and lists):\n            List to be flattened\n\n    Returns:\n        new_list (list):\n            Flattened list\n    \"\"\"\n\n    ### if lists in lst --> upack them and retunr flatten_list of new list\n    new_lst = []\n    list_in_lst = False\n    for val in lst:\n        if isinstance(val, list):\n            list_in_lst = True\n            for sub_val in val:\n                new_lst.append(sub_val)\n        else:\n            new_lst.append(val)\n\n    if list_in_lst:\n        return flatten_list(new_lst)\n    ### else return lst\n    else:\n        return lst\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.remove_key","title":"remove_key(d, key)","text":"

Removes an element from a dict, returns the new dict

Parameters:

Name Type Description Default d dict

Dict to be modified

required key str

Key to be removed

required

Returns:

Name Type Description r dict

Modified dict

Source code in CompNeuroPy/extra_functions.py
def remove_key(d, key):\n    \"\"\"\n    Removes an element from a dict, returns the new dict\n\n    Args:\n        d (dict):\n            Dict to be modified\n        key (str):\n            Key to be removed\n\n    Returns:\n        r (dict):\n            Modified dict\n    \"\"\"\n    r = dict(d)\n    del r[key]\n    return r\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.suppress_stdout","title":"suppress_stdout()","text":"

Suppresses the print output of a function

Examples:

with suppress_stdout():\n    print(\"this will not be printed\")\n
Source code in CompNeuroPy/extra_functions.py
@contextmanager\ndef suppress_stdout():\n    \"\"\"\n    Suppresses the print output of a function\n\n    Examples:\n        ```python\n        with suppress_stdout():\n            print(\"this will not be printed\")\n        ```\n    \"\"\"\n    with open(os.devnull, \"w\") as devnull:\n        old_stdout = sys.stdout\n        sys.stdout = devnull\n        try:\n            yield\n        finally:\n            sys.stdout = old_stdout\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.sci","title":"sci(nr)","text":"

Rounds a number to a single decimal. If number is smaller than 0 it is converted to scientific notation with 1 decimal.

Parameters:

Name Type Description Default nr float or int

Number to be converted

required

Returns:

Name Type Description str str

String of the number in scientific notation

Examples:

>>> sci(0.0001)\n'1.0e-4'\n>>> sci(1.77)\n'1.8'\n>>> sci(1.77e-5)\n'1.8e-5'\n>>> sci(177.22)\n'177.2'\n
Source code in CompNeuroPy/extra_functions.py
def sci(nr):\n    \"\"\"\n    Rounds a number to a single decimal.\n    If number is smaller than 0 it is converted to scientific notation with 1 decimal.\n\n    Args:\n        nr (float or int):\n            Number to be converted\n\n    Returns:\n        str (str):\n            String of the number in scientific notation\n\n    Examples:\n        >>> sci(0.0001)\n        '1.0e-4'\n        >>> sci(1.77)\n        '1.8'\n        >>> sci(1.77e-5)\n        '1.8e-5'\n        >>> sci(177.22)\n        '177.2'\n    \"\"\"\n    if af.get_number_of_zero_decimals(nr) == 0:\n        return str(round(nr, 1))\n    else:\n        return f\"{nr*10**af.get_number_of_zero_decimals(nr):.1f}e-{af.get_number_of_zero_decimals(nr)}\"\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.create_cm","title":"create_cm(colors, name='my_cmap', N=256, gamma=1.0, vmin=0, vmax=1)","text":"

Create a LinearSegmentedColormap from a list of colors.

Parameters:

Name Type Description Default colors array-like of colors or array-like of (value, color

If only colors are given, they are equidistantly mapped from the range :math:[0, 1]; i.e. 0 maps to colors[0] and 1 maps to colors[-1]. If (value, color) pairs are given, the mapping is from value to color. This can be used to divide the range unevenly.

required name str

The name of the colormap, by default 'my_cmap'.

'my_cmap' N int

The number of rgb quantization levels, by default 256.

256 gamma float

Gamma correction value, by default 1.0.

1.0 vmin float

The minimum value of the colormap, by default 0.

0 vmax float

The maximum value of the colormap, by default 1.

1

Returns:

Name Type Description linear_colormap _LinearColormapClass

The colormap object

Source code in CompNeuroPy/extra_functions.py
def create_cm(colors, name=\"my_cmap\", N=256, gamma=1.0, vmin=0, vmax=1):\n    \"\"\"\n    Create a `LinearSegmentedColormap` from a list of colors.\n\n    Args:\n        colors (array-like of colors or array-like of (value, color)):\n            If only colors are given, they are equidistantly mapped from the\n            range :math:`[0, 1]`; i.e. 0 maps to ``colors[0]`` and 1 maps to\n            ``colors[-1]``.\n            If (value, color) pairs are given, the mapping is from *value*\n            to *color*. This can be used to divide the range unevenly.\n        name (str, optional):\n            The name of the colormap, by default 'my_cmap'.\n        N (int, optional):\n            The number of rgb quantization levels, by default 256.\n        gamma (float, optional):\n            Gamma correction value, by default 1.0.\n        vmin (float, optional):\n            The minimum value of the colormap, by default 0.\n        vmax (float, optional):\n            The maximum value of the colormap, by default 1.\n\n    Returns:\n        linear_colormap (_LinearColormapClass):\n            The colormap object\n    \"\"\"\n    if not np.iterable(colors):\n        raise ValueError(\"colors must be iterable\")\n\n    if (\n        isinstance(colors[0], Sized)\n        and len(colors[0]) == 2\n        and not isinstance(colors[0], str)\n    ):\n        # List of value, color pairs\n        vals, colors = zip(*colors)\n        vals = np.array(vals).astype(float)\n        colors = list(colors)\n        ### insert values for 0 and 1 if not given\n        ### they equal the colors of the borders of the given range\n        if vals.min() != 0.0:\n            colors = [colors[np.argmin(vals)]] + colors\n            vals = np.insert(vals, 0, 0.0)\n        if vals.max() != 1.0:\n            colors = colors + [colors[np.argmax(vals)]]\n            vals = np.insert(vals, len(vals), 1.0)\n    else:\n        vals = np.linspace(0, 1, len(colors))\n\n    ### sort values and colors, they have to increase\n    sort_idx = np.argsort(vals)\n    vals = vals[sort_idx]\n    colors = [colors[idx] for idx in sort_idx]\n\n    r_g_b_a = np.zeros((len(colors), 4))\n    for color_idx, color in enumerate(colors):\n        if isinstance(color, str):\n            ### color given by name\n            r_g_b_a[color_idx] = to_rgba_array(color)\n        else:\n            ### color given by rgb(maybe a) value\n            color = np.array(color).astype(float)\n            ### check color size\n            if len(color) != 3 and len(color) != 4:\n                raise ValueError(\n                    \"colors must be names or consist of 3 (rgb) or 4 (rgba) numbers\"\n                )\n            if color.max() > 1:\n                ### assume that max value is 255\n                color[:3] = color[:3] / 255\n            if len(color) == 4:\n                ### gamma already given\n                r_g_b_a[color_idx] = color\n            else:\n                ### add gamma\n                r_g_b_a[color_idx] = np.concatenate([color, np.array([gamma])])\n    r = r_g_b_a[:, 0]\n    g = r_g_b_a[:, 1]\n    b = r_g_b_a[:, 2]\n    a = r_g_b_a[:, 3]\n\n    cdict = {\n        \"red\": np.column_stack([vals, r, r]),\n        \"green\": np.column_stack([vals, g, g]),\n        \"blue\": np.column_stack([vals, b, b]),\n        \"alpha\": np.column_stack([vals, a, a]),\n    }\n\n    return _LinearColormapClass(name, cdict, N, gamma, vmin, vmax)\n
"},{"location":"additional/extra_functions/#CompNeuroPy.extra_functions.evaluate_expression_with_dict","title":"evaluate_expression_with_dict(expression, value_dict)","text":"

Evaluate a mathematical expression using values from a dictionary.

This function takes a mathematical expression as a string and a dictionary containing variable names as keys and corresponding values as numpy arrays. It replaces the variable names in the expression with their corresponding values from the dictionary and evaluates the expression.

Parameters:

Name Type Description Default expression str

A mathematical expression to be evaluated. Variable names in the expression should match the keys in the value_dict.

required value_dict dict

A dictionary containing variable names (strings) as keys and corresponding numpy arrays or numbers as values.

required

Returns:

Name Type Description result value or array

The result of evaluating the expression using the provided values.

Examples:

>>> my_dict = {\"a\": np.ones(10), \"b\": np.arange(10)}\n>>> my_string = \"a*2-b+10\"\n>>> evaluate_expression_with_dict(my_string, my_dict)\narray([12., 11., 10.,  9.,  8.,  7.,  6.,  5.,  4.,  3.])\n
Source code in CompNeuroPy/extra_functions.py
def evaluate_expression_with_dict(expression, value_dict):\n    \"\"\"\n    Evaluate a mathematical expression using values from a dictionary.\n\n    This function takes a mathematical expression as a string and a dictionary\n    containing variable names as keys and corresponding values as numpy arrays.\n    It replaces the variable names in the expression with their corresponding\n    values from the dictionary and evaluates the expression.\n\n    Args:\n        expression (str):\n            A mathematical expression to be evaluated. Variable\n            names in the expression should match the keys in the value_dict.\n        value_dict (dict):\n            A dictionary containing variable names (strings) as\n            keys and corresponding numpy arrays or numbers as values.\n\n    Returns:\n        result (value or array):\n            The result of evaluating the expression using the provided values.\n\n    Examples:\n        >>> my_dict = {\"a\": np.ones(10), \"b\": np.arange(10)}\n        >>> my_string = \"a*2-b+10\"\n        >>> evaluate_expression_with_dict(my_string, my_dict)\n        array([12., 11., 10.,  9.,  8.,  7.,  6.,  5.,  4.,  3.])\n    \"\"\"\n    # Replace dictionary keys in the expression with their corresponding values\n    ### replace names with dict entries\n    expression = _replace_names_with_dict(\n        expression=expression, name_of_dict=\"value_dict\", dictionary=value_dict\n    )\n\n    ### evaluate the new expression\n    try:\n        result = eval(expression)\n        return result\n    except Exception as e:\n        raise ValueError(f\"Error while evaluating expression: {str(e)}\")\n
"},{"location":"additional/model_functions/","title":"Model Functions","text":""},{"location":"additional/model_functions/#CompNeuroPy.model_functions.compile_in_folder","title":"compile_in_folder(folder_name, net=None, clean=False, silent=False)","text":"

Creates the compilation folder in annarchy_folders/ or uses existing ones. Compiles the current network.

Parameters:

Name Type Description Default folder_name str

Name of the folder within annarchy_folders/

required net ANNarchy network

ANNarchy network. Default: None.

None clean bool

If True, the library is recompiled entirely, else only the changes since last compilation are compiled. Default: False.

False silent bool

Suppress output. Defaults to False.

False Source code in CompNeuroPy/model_functions.py
def compile_in_folder(folder_name, net=None, clean=False, silent=False):\n    \"\"\"\n    Creates the compilation folder in annarchy_folders/ or uses existing ones. Compiles\n    the current network.\n\n    Args:\n        folder_name (str):\n            Name of the folder within annarchy_folders/\n        net (ANNarchy network, optional):\n            ANNarchy network. Default: None.\n        clean (bool, optional):\n            If True, the library is recompiled entirely, else only the changes since\n            last compilation are compiled. Default: False.\n        silent (bool, optional):\n            Suppress output. Defaults to False.\n    \"\"\"\n    sf.create_dir(\"annarchy_folders/\" + folder_name, print_info=False)\n    if isinstance(net, type(None)):\n        compile(\"annarchy_folders/\" + folder_name, clean=clean, silent=silent)\n    else:\n        net.compile(\"annarchy_folders/\" + folder_name, clean=clean, silent=silent)\n    if os.getcwd().split(\"/\")[-1] == \"annarchy_folders\":\n        os.chdir(\"../\")\n
"},{"location":"additional/model_functions/#CompNeuroPy.model_functions.annarchy_compiled","title":"annarchy_compiled(net_id=0)","text":"

Check if ANNarchy network was compiled.

Parameters:

Name Type Description Default net_id int

Network ID. Default: 0.

0 Source code in CompNeuroPy/model_functions.py
def annarchy_compiled(net_id=0):\n    \"\"\"\n    Check if ANNarchy network was compiled.\n\n    Args:\n        net_id (int, optional):\n            Network ID. Default: 0.\n    \"\"\"\n    return Global._network[net_id][\"compiled\"]\n
"},{"location":"additional/model_functions/#CompNeuroPy.model_functions.get_full_model","title":"get_full_model()","text":"

Return all current population and projection names.

Returns:

Name Type Description model_dict dict

Dictionary with keys \"populations\" and \"projections\" and values lists of population and projection names, respectively.

Source code in CompNeuroPy/model_functions.py
def get_full_model():\n    \"\"\"\n    Return all current population and projection names.\n\n    Returns:\n        model_dict (dict):\n            Dictionary with keys \"populations\" and \"projections\" and values lists of\n            population and projection names, respectively.\n    \"\"\"\n    return {\n        \"populations\": [pop.name for pop in populations()],\n        \"projections\": [proj.name for proj in projections()],\n    }\n
"},{"location":"additional/model_functions/#CompNeuroPy.model_functions.cnp_clear","title":"cnp_clear(functions=True, neurons=True, synapses=True, constants=True)","text":"

Like clear with ANNarchy, but CompNeuroModel objects are also cleared.

Parameters:

Name Type Description Default functions bool

If True, all functions are cleared. Default: True.

True neurons bool

If True, all neurons are cleared. Default: True.

True synapses bool

If True, all synapses are cleared. Default: True.

True constants bool

If True, all constants are cleared. Default: True.

True Source code in CompNeuroPy/model_functions.py
def cnp_clear(functions=True, neurons=True, synapses=True, constants=True):\n    \"\"\"\n    Like clear with ANNarchy, but CompNeuroModel objects are also cleared.\n\n    Args:\n        functions (bool, optional):\n            If True, all functions are cleared. Default: True.\n        neurons (bool, optional):\n            If True, all neurons are cleared. Default: True.\n        synapses (bool, optional):\n            If True, all synapses are cleared. Default: True.\n        constants (bool, optional):\n            If True, all constants are cleared. Default: True.\n    \"\"\"\n    clear(functions=functions, neurons=neurons, synapses=synapses, constants=constants)\n    for model_name in CompNeuroModel._initialized_models.keys():\n        CompNeuroModel._initialized_models[model_name] = False\n    for model_name in CompNeuroModel._compiled_models.keys():\n        CompNeuroModel._compiled_models[model_name] = False\n
"},{"location":"additional/simulation_functions/","title":"Simulation Functions","text":""},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.current_step","title":"current_step(pop, t1=500, t2=500, a1=0, a2=100)","text":"

Stimulates a given population in two periods with two input currents.

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required t1 int

time in ms before current step

500 t2 int

time in ms after current step

500 a1 int

current amplitude before current step

0 a2 int

current amplitude after current step

100

Returns:

Name Type Description return_dict dict

dictionary containing:

  • duration (int): duration of the simulation
Source code in CompNeuroPy/simulation_functions.py
def current_step(pop, t1=500, t2=500, a1=0, a2=100):\n    \"\"\"\n    Stimulates a given population in two periods with two input currents.\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        t1 (int):\n            time in ms before current step\n        t2 (int):\n            time in ms after current step\n        a1 (int):\n            current amplitude before current step\n        a2 (int):\n            current amplitude after current step\n\n    Returns:\n        return_dict (dict):\n            dictionary containing:\n\n            - duration (int): duration of the simulation\n    \"\"\"\n\n    ### save prev input current\n    I_prev = get_population(pop).I_app\n\n    ### first/pre current step simulation\n    get_population(pop).I_app = a1\n    simulate(t1)\n\n    ### second/post current step simulation\n    get_population(pop).I_app = a2\n    simulate(t2)\n\n    ### reset input current to previous value\n    get_population(pop).I_app = I_prev\n\n    ### return some additional information which could be usefull\n    return {\"duration\": t1 + t2}\n
"},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.current_stim","title":"current_stim(pop, t=500, a=100)","text":"

Stimulates a given population during specified period 't' with input current with amplitude 'a', after this stimulation the current is reset to initial value (before stimulation).

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required t int

duration in ms

500 a int

current amplitude

100 Source code in CompNeuroPy/simulation_functions.py
def current_stim(pop, t=500, a=100):\n    \"\"\"\n    Stimulates a given population during specified period 't' with input current with\n    amplitude 'a', after this stimulation the current is reset to initial value\n    (before stimulation).\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        t (int):\n            duration in ms\n        a (int):\n            current amplitude\n    \"\"\"\n\n    return current_step(pop, t1=t, t2=0, a1=a, a2=0)\n
"},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.current_ramp","title":"current_ramp(pop, a0, a1, dur, n)","text":"

Conducts multiple current stimulations with constantly changing current inputs. After this current_ramp stimulation the current amplitude is reset to the initial value (before current ramp).

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required a0 int

initial current amplitude (of first stimulation)

required a1 int

final current amplitude (of last stimulation)

required dur int

duration of the complete current ramp (all stimulations)

required n int

number of stimulations

required

Warning

dur/n should be divisible by the simulation time step without remainder

Returns:

Name Type Description return_dict dict

dictionary containing:

  • da (int): current step size
  • dur_stim (int): duration of one stimulation

Raises:

Type Description AssertionError

if resulting duration of one stimulation is not divisible by the simulation time step without remainder

Source code in CompNeuroPy/simulation_functions.py
def current_ramp(pop, a0, a1, dur, n):\n    \"\"\"\n    Conducts multiple current stimulations with constantly changing current inputs.\n    After this current_ramp stimulation the current amplitude is reset to the initial\n    value (before current ramp).\n\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        a0 (int):\n            initial current amplitude (of first stimulation)\n        a1 (int):\n            final current amplitude (of last stimulation)\n        dur (int):\n            duration of the complete current ramp (all stimulations)\n        n (int):\n            number of stimulations\n\n    !!! warning\n        dur/n should be divisible by the simulation time step without remainder\n\n    Returns:\n        return_dict (dict):\n            dictionary containing:\n\n            - da (int): current step size\n            - dur_stim (int): duration of one stimulation\n\n    Raises:\n        AssertionError: if resulting duration of one stimulation is not divisible by the\n            simulation time step without remainder\n    \"\"\"\n\n    assert (dur / n) / dt() % 1 == 0, (\n        \"ERROR current_ramp: dur/n should result in a duration (for a single stimulation) which is divisible by the simulation time step (without remainder)\\ncurrent duration = \"\n        + str(dur / n)\n        + \", timestep = \"\n        + str(dt())\n        + \"!\\n\"\n    )\n\n    da = (a1 - a0) / (n - 1)  # for n stimulations only n-1 steps occur\n    dur_stim = dur / n\n    amp = a0\n    for _ in range(n):\n        current_stim(pop, t=dur_stim, a=amp)\n        amp = amp + da\n\n    return {\"da\": da, \"dur_stim\": dur_stim}\n
"},{"location":"additional/simulation_functions/#CompNeuroPy.simulation_functions.increasing_current","title":"increasing_current(pop, a0, da, nr_steps, dur_step)","text":"

Conducts multiple current stimulations with constantly increasing current inputs. After this increasing_current stimulation the current amplitude is reset to the initial value (before increasing_current).

Parameters:

Name Type Description Default pop str

population name of population, which should be stimulated with input current neuron model of population has to contain \"I_app\" as input current

required a0 int

initial current amplitude (of first stimulation)

required da int

current step size

required nr_steps int

number of stimulations

required dur_step int

duration of one stimulation

required

Returns:

Name Type Description return_dict dict

dictionary containing:

  • current_list (list): list of current amplitudes for each stimulation
Source code in CompNeuroPy/simulation_functions.py
def increasing_current(pop, a0, da, nr_steps, dur_step):\n    \"\"\"\n    Conducts multiple current stimulations with constantly increasing current inputs.\n    After this increasing_current stimulation the current amplitude is reset to the\n    initial value (before increasing_current).\n\n    Args:\n        pop (str):\n            population name of population, which should be stimulated with input current\n            neuron model of population has to contain \"I_app\" as input current\n        a0 (int):\n            initial current amplitude (of first stimulation)\n        da (int):\n            current step size\n        nr_steps (int):\n            number of stimulations\n        dur_step (int):\n            duration of one stimulation\n\n    Returns:\n        return_dict (dict):\n            dictionary containing:\n\n            - current_list (list): list of current amplitudes for each stimulation\n    \"\"\"\n    current_list = []\n    a = a0\n    for _ in range(nr_steps):\n        current_list.append(a)\n        current_stim(pop, t=dur_step, a=a)\n        a += da\n\n    return {\"current_list\": current_list}\n
"},{"location":"additional/simulation_requirements/","title":"Simulation Requirements","text":""},{"location":"additional/simulation_requirements/#CompNeuroPy.simulation_requirements.ReqPopHasAttr","title":"ReqPopHasAttr","text":"

Checks if population(s) contains the attribute(s) (parameters or variables)

Source code in CompNeuroPy/simulation_requirements.py
class ReqPopHasAttr:\n    \"\"\"\n    Checks if population(s) contains the attribute(s) (parameters or variables)\n    \"\"\"\n\n    def __init__(self, pop, attr):\n        \"\"\"\n        Args:\n            pop (str or list of strings):\n                population name(s)\n            attr (str or list of strings):\n                attribute name(s)\n        \"\"\"\n        self.pop_name_list = pop\n        self.attr_name_list = attr\n        ### convert single strings into list\n        if not (isinstance(pop, list)):\n            self.pop_name_list = [pop]\n        if not (isinstance(attr, list)):\n            self.attr_name_list = [attr]\n\n    def run(self):\n        \"\"\"\n        Checks if population(s) contains the attribute(s) (parameters or variables)\n\n        Raises:\n            ValueError: if population(s) does not contain the attribute(s)\n        \"\"\"\n        for attr_name in self.attr_name_list:\n            for pop_name in self.pop_name_list:\n                pop: Population = get_population(pop_name)\n                if not (attr_name in pop.attributes):\n                    raise ValueError(\n                        \"Population \"\n                        + pop_name\n                        + \" does not contain attribute \"\n                        + attr_name\n                        + \"!\\n\"\n                    )\n
"},{"location":"additional/simulation_requirements/#CompNeuroPy.simulation_requirements.ReqPopHasAttr.__init__","title":"__init__(pop, attr)","text":"

Parameters:

Name Type Description Default pop str or list of strings

population name(s)

required attr str or list of strings

attribute name(s)

required Source code in CompNeuroPy/simulation_requirements.py
def __init__(self, pop, attr):\n    \"\"\"\n    Args:\n        pop (str or list of strings):\n            population name(s)\n        attr (str or list of strings):\n            attribute name(s)\n    \"\"\"\n    self.pop_name_list = pop\n    self.attr_name_list = attr\n    ### convert single strings into list\n    if not (isinstance(pop, list)):\n        self.pop_name_list = [pop]\n    if not (isinstance(attr, list)):\n        self.attr_name_list = [attr]\n
"},{"location":"additional/simulation_requirements/#CompNeuroPy.simulation_requirements.ReqPopHasAttr.run","title":"run()","text":"

Checks if population(s) contains the attribute(s) (parameters or variables)

Raises:

Type Description ValueError

if population(s) does not contain the attribute(s)

Source code in CompNeuroPy/simulation_requirements.py
def run(self):\n    \"\"\"\n    Checks if population(s) contains the attribute(s) (parameters or variables)\n\n    Raises:\n        ValueError: if population(s) does not contain the attribute(s)\n    \"\"\"\n    for attr_name in self.attr_name_list:\n        for pop_name in self.pop_name_list:\n            pop: Population = get_population(pop_name)\n            if not (attr_name in pop.attributes):\n                raise ValueError(\n                    \"Population \"\n                    + pop_name\n                    + \" does not contain attribute \"\n                    + attr_name\n                    + \"!\\n\"\n                )\n
"},{"location":"additional/system_functions/","title":"System Functions","text":""},{"location":"additional/system_functions/#CompNeuroPy.system_functions.clear_dir","title":"clear_dir(path)","text":"

Deletes all files and subdirectories in the specified folder.

Parameters:

Name Type Description Default path str

Path to the folder to clear.

required Source code in CompNeuroPy/system_functions.py
def clear_dir(path):\n    \"\"\"\n    Deletes all files and subdirectories in the specified folder.\n\n    Args:\n        path (str):\n            Path to the folder to clear.\n    \"\"\"\n    try:\n        if not os.path.exists(path):\n            print(f\"The folder '{path}' does not exist.\")\n            return\n\n        for filename in os.listdir(path):\n            file_path = os.path.join(path, filename)\n            try:\n                if os.path.isfile(file_path) or os.path.islink(file_path):\n                    os.unlink(file_path)\n                elif os.path.isdir(file_path):\n                    shutil.rmtree(file_path)\n            except Exception:\n                print(traceback.format_exc())\n                print(f\"Failed to delete {file_path}\")\n    except Exception:\n        print(traceback.format_exc())\n        print(f\"Failed to clear {path}\")\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.create_dir","title":"create_dir(path, print_info=False, clear=False)","text":"

Creates a directory.

Parameters:

Name Type Description Default path str

Path to the directory to create.

required print_info bool

Whether to print information about the directory creation. Default: False.

False clear bool

Whether to clear the directory if it already exists. Default: False.

False Source code in CompNeuroPy/system_functions.py
def create_dir(path, print_info=False, clear=False):\n    \"\"\"\n    Creates a directory.\n\n    Args:\n        path (str):\n            Path to the directory to create.\n\n        print_info (bool, optional):\n            Whether to print information about the directory creation. Default: False.\n\n        clear (bool, optional):\n            Whether to clear the directory if it already exists. Default: False.\n    \"\"\"\n    try:\n        if isinstance(path, str):\n            if len(path) > 0:\n                os.makedirs(path)\n        else:\n            print(\"create_dir, ERROR: path is no str\")\n    except Exception:\n        if os.path.isdir(path):\n            if print_info:\n                print(path + \" already exists\")\n            if clear:\n                ### clear folder\n                ### do you really want?\n                answer = input(f\"Do you really want to clear {path} (y/n):\")\n                while answer != \"y\" and answer != \"n\":\n                    print(\"please enter y or n\")\n                    answer = input(f\"Do you really want to clear {path} (y/n):\")\n                ### clear or not depending on answer\n                if answer == \"y\":\n                    clear_dir(path)\n                    if print_info:\n                        print(path + \" already exists and was cleared.\")\n                else:\n                    if print_info:\n                        print(path + \" already exists and was not cleared.\")\n        else:\n            print(traceback.format_exc())\n            print(\"could not create \" + path + \" folder\")\n            quit()\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.save_variables","title":"save_variables(variable_list, name_list, path='./')","text":"

Parameters:

Name Type Description Default variable_list list

variables to save

required name_list list

names of the save files of the variables

required path str or list

save path for all variables, or save path for each variable of the variable_list. Default: \"./\"

'./'

Examples:

import numpy as np\nfrom CompNeuroPy import save_variables, load_variables\n\n### create variables\nvar1 = np.random.rand(10)\nvar2 = np.random.rand(10)\n\n### save variables\nsave_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n### load variables\nloaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n### use loaded variables\nprint(loaded_variables[\"var1_file\"])\nprint(loaded_variables[\"var2_file\"])\n
Source code in CompNeuroPy/system_functions.py
def save_variables(variable_list: list, name_list: list, path: str | list = \"./\"):\n    \"\"\"\n    Args:\n        variable_list (list):\n            variables to save\n        name_list (list):\n            names of the save files of the variables\n        path (str or list):\n            save path for all variables, or save path for each variable of the\n            variable_list. Default: \"./\"\n\n    Examples:\n        ```python\n        import numpy as np\n        from CompNeuroPy import save_variables, load_variables\n\n        ### create variables\n        var1 = np.random.rand(10)\n        var2 = np.random.rand(10)\n\n        ### save variables\n        save_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n        ### load variables\n        loaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n        ### use loaded variables\n        print(loaded_variables[\"var1_file\"])\n        print(loaded_variables[\"var2_file\"])\n        ```\n    \"\"\"\n    for idx in range(len(variable_list)):\n        ### set save path\n        if isinstance(path, str):\n            save_path = path\n        else:\n            save_path = path[idx]\n        if save_path.endswith(\"/\"):\n            save_path = save_path[:-1]\n        ### set file name\n        file_name = f\"{name_list[idx]}.pkl\"\n        ### set variable\n        variable = variable_list[idx]\n        ### generate save folder\n        create_dir(save_path)\n        ### Saving a variable to a file\n        with open(f\"{save_path}/{file_name}\", \"wb\") as file:\n            pickle.dump(variable, file)\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.load_variables","title":"load_variables(name_list, path='./')","text":"

Parameters:

Name Type Description Default name_list list

names of the save files of the variables

required path str or list

save path for all variables, or save path for each variable of the variable_list. Default: \"./\"

'./'

Returns:

Name Type Description variable_dict dict

dictionary with the loaded variables, keys are the names of the files, values are the loaded variables

Examples:

import numpy as np\nfrom CompNeuroPy import save_variables, load_variables\n\n### create variables\nvar1 = np.random.rand(10)\nvar2 = np.random.rand(10)\n\n### save variables\nsave_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n### load variables\nloaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n### use loaded variables\nprint(loaded_variables[\"var1_file\"])\nprint(loaded_variables[\"var2_file\"])\n
Source code in CompNeuroPy/system_functions.py
def load_variables(name_list: list, path: str | list = \"./\"):\n    \"\"\"\n    Args:\n        name_list (list):\n            names of the save files of the variables\n        path (str or list, optional):\n            save path for all variables, or save path for each variable of the\n            variable_list. Default: \"./\"\n\n    Returns:\n        variable_dict (dict):\n            dictionary with the loaded variables, keys are the names of the\n            files, values are the loaded variables\n\n    Examples:\n        ```python\n        import numpy as np\n        from CompNeuroPy import save_variables, load_variables\n\n        ### create variables\n        var1 = np.random.rand(10)\n        var2 = np.random.rand(10)\n\n        ### save variables\n        save_variables([var1, var2], [\"var1_file\", \"var2_file\"], \"my_variables_folder\")\n\n        ### load variables\n        loaded_variables = load_variables([\"var1\", \"var2\"], \"my_variables_folder\")\n\n        ### use loaded variables\n        print(loaded_variables[\"var1_file\"])\n        print(loaded_variables[\"var2_file\"])\n        ```\n    \"\"\"\n    variable_dict = {}\n    for idx in range(len(name_list)):\n        ### set save path\n        if isinstance(path, str):\n            save_path = path\n        else:\n            save_path = path[idx]\n        if save_path.endswith(\"/\"):\n            save_path = save_path[:-1]\n        ### set file name\n        file_name = f\"{name_list[idx]}.pkl\"\n        ### Loading the variable from the file\n        with open(f\"{save_path}/{file_name}\", \"rb\") as file:\n            loaded_variable = pickle.load(file)\n        ### store variable in variable_dict\n        variable_dict[name_list[idx]] = loaded_variable\n\n    return variable_dict\n
"},{"location":"additional/system_functions/#CompNeuroPy.system_functions.timing_decorator","title":"timing_decorator(threshold=0.1)","text":"

Decorator to measure the execution time of a function.

Parameters:

Name Type Description Default threshold float

Threshold in seconds. If the execution time of the function is larger than this threshold, the execution time is printed. Default: 0.1.

0.1 Source code in CompNeuroPy/system_functions.py
def timing_decorator(threshold=0.1):\n    \"\"\"\n    Decorator to measure the execution time of a function.\n\n    Args:\n        threshold (float, optional):\n            Threshold in seconds. If the execution time of the function is\n            larger than this threshold, the execution time is printed. Default: 0.1.\n    \"\"\"\n\n    def decorator(func):\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            start_time = time()\n            result = func(*args, **kwargs)\n            end_time = time()\n            execution_time = end_time - start_time\n            if execution_time >= threshold:\n                print(f\"{func.__name__} took {execution_time:.4f} seconds\")\n            return result\n\n        return wrapper\n\n    return decorator\n
"},{"location":"built_in/models/","title":"Full Models","text":""},{"location":"built_in/models/#CompNeuroPy.full_models.BGM","title":"CompNeuroPy.full_models.BGM","text":"

Bases: CompNeuroModel

The basal ganglia model based on the model from Goenner et al. (2021).

Attributes:

Name Type Description name str

name of the model

description str

description of the model

model_creation_function function

function which creates the model

compile_folder_name str

name of the folder in which the model is compiled

model_kwargs dict

keyword arguments for model_creation_function

populations list

list of names of all populations of the model

projections list

list of names of all projections of the model

created bool

True if the model is created

compiled bool

True if the model is compiled

attribute_df pandas dataframe

dataframe containing all attributes of the model compartments

params dict

dictionary containing all parameters of the model

name_appendix str

string which is appended to all model compartments and parameters

Source code in CompNeuroPy/full_models/bgm_22/bgm.py
class BGM(CompNeuroModel):\n    \"\"\"\n    The basal ganglia model based on the model from [Goenner et al. (2021)](https://doi.org/10.1111/ejn.15082).\n\n    Attributes:\n        name (str):\n            name of the model\n        description (str):\n            description of the model\n        model_creation_function (function):\n            function which creates the model\n        compile_folder_name (str):\n            name of the folder in which the model is compiled\n        model_kwargs (dict):\n            keyword arguments for model_creation_function\n        populations (list):\n            list of names of all populations of the model\n        projections (list):\n            list of names of all projections of the model\n        created (bool):\n            True if the model is created\n        compiled (bool):\n            True if the model is compiled\n        attribute_df (pandas dataframe):\n            dataframe containing all attributes of the model compartments\n        params (dict):\n            dictionary containing all parameters of the model\n        name_appendix (str):\n            string which is appended to all model compartments and parameters\n    \"\"\"\n\n    @check_types()\n    def __init__(\n        self,\n        name: str = \"BGM_v01_p01\",\n        do_create: bool = True,\n        do_compile: bool = True,\n        compile_folder_name: str | None = None,\n        seed: int | None = None,\n        name_appendix: str = \"\",\n    ):\n        \"\"\"\n        Args:\n            name (str, optional):\n                name of the model, syntax: \"BGM_v<model_version>_p<parameters_version>\"\n                replace <model_version> and <parameters_version> with the versions you\n                want to use, see CompNeuroPy.full_models.BGM_22.parameters for available\n                versions. Default: \"BGM_v01_p01\"\n            do_create (bool, optional):\n                if True, the model is created after initialization. Default: True\n            do_compile (bool, optional):\n                if True, the model is compiled after creation. Default: True\n            compile_folder_name (str, optional):\n                name of the folder in which the compiled model is saved. Default: None,\n                i.e. \"annarchy_BGM_v<model_version>\" is used\n            seed (int, optional):\n                the seed for the random number generator used during model creation.\n                Default: None, i.e. random seed is used\n            name_appendix (str, optional):\n                string which is appended to all model compartments and parameters.\n                Allows to create multiple models with the same name and keep names of\n                compartments and parameters unique. Default: \"\"\n        \"\"\"\n        ### check if name is correct, otherwise raise ValueError\n        if not (\n            len(name.split(\"_\")) == 3\n            and name.split(\"_\")[0] == \"BGM\"\n            and name.split(\"_\")[1][0] == \"v\"\n            and name.split(\"_\")[2][0] == \"p\"\n        ):\n            raise ValueError(\n                \"name has to be of the form 'BGM_v<model_version>_p<parameters_version>'\"\n            )\n\n        ### set attributes (except the ones which are set in the super().__init__())\n        self.name_appendix = name_appendix\n        self.seed = seed\n        if len(self.name_appendix) > 0:\n            self._name_appendix_to_add = \":\" + name_appendix\n        else:\n            self._name_appendix_to_add = \"\"\n\n        ### set model_version_name\n        self._model_version_name = \"_\".join(name.split(\"_\")[:2])\n\n        ### update name with name_appendix\n        name = name + self._name_appendix_to_add\n\n        ### init default compile_folder_name\n        if compile_folder_name == None:\n            compile_folder_name = \"annarchy_\" + self._model_version_name\n\n        ### set description\n        description = (\n            \"The basal ganglia model based on the model from Goenner et al. (2021)\"\n        )\n\n        ### init random number generator\n        self._rng = np.random.default_rng(seed)\n\n        ### get model parameters before init, ignore name_appendix\n        self.params = self._get_params(name.split(\":\")[0])\n\n        ### init\n        super().__init__(\n            model_creation_function=self._model_creation_function,\n            name=name,\n            description=description,\n            do_create=do_create,\n            do_compile=do_compile,\n            compile_folder_name=compile_folder_name,\n        )\n\n    def _add_name_appendix(self):\n        \"\"\"\n        Rename all model compartments, keys (except general) in params dict and\n        names in attribute_df by appending the name_appendix to the original name.\n        \"\"\"\n\n        ### update the attribute_df of the model object (it still contains the original\n        ### names of the model creation)\n        self.attribute_df[\"compartment_name\"] = (\n            self.attribute_df[\"compartment_name\"] + self._name_appendix_to_add\n        )\n        ### rename populations and projections\n        populations_new = []\n        for pop_name in self.populations:\n            populations_new.append(pop_name + self._name_appendix_to_add)\n            get_population(pop_name).name = pop_name + self._name_appendix_to_add\n        self.populations = populations_new\n        projections_new = []\n        for proj_name in self.projections:\n            projections_new.append(proj_name + self._name_appendix_to_add)\n            get_projection(proj_name).name = proj_name + self._name_appendix_to_add\n        self.projections = projections_new\n        ### rename parameter keys except general\n        params_new = {}\n        for key, param_val in self.params.items():\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            if param_object == \"general\":\n                params_new[key] = param_val\n                continue\n\n            param_object = param_object + self._name_appendix_to_add\n            key_new = param_object + \".\" + param_name\n            params_new[key_new] = param_val\n        self.params = params_new\n\n    def _model_creation_function(self):\n        \"\"\"\n        Creates the model using the model_creation_function from the\n        model_creation_functions.py file. The function is defined by the\n        model_version_name.\n        \"\"\"\n        model_creation_function = eval(\n            \"importlib.import_module('CompNeuroPy.full_models.bgm_22.model_creation_functions').\"\n            + self._model_version_name\n        )\n        model_creation_function(self)\n\n    def create(self, do_compile=True, compile_folder_name=None):\n        \"\"\"\n        Creates the model and optionally compiles it directly.\n\n        Args:\n            do_compile (bool, optional):\n                If True the model is compiled directly. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder in which the model is compiled. Default: value from\n                initialization.\n        \"\"\"\n        ### create the model, but do not compile to set parameters before compilation\n        super().create(do_compile=False, compile_folder_name=compile_folder_name)\n\n        ### update names of compartments and parameters\n        self._add_name_appendix()\n\n        ### set parameters and connectivity of projections\n        ### for each projection the connectivity has to be defined in the params\n        self._set_params()\n        self._set_noise_values()\n        self._set_connections()\n\n        ### compile the model, after setting all parameters (included in compile state)\n        if do_compile:\n            self.compile(compile_folder_name)\n\n    def _set_params(self):\n        \"\"\"\n        sets params of all populations\n        \"\"\"\n\n        ### loop over all params\n        for key, param_val in self.params.items():\n            ### split key in param object and param name\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            ### if param is a noise param --> skip (separate function)\n            if param_name.split(\"_\")[-1] == \"noise\":\n                continue\n\n            ### if param name ends with init --> actual param_name (in pop) is without init\n            if param_name.split(\"_\")[-1] == \"init\":\n                param_name = \"_\".join(param_name.split(\"_\")[:-1])\n\n            ### if param_object is a pop in network\n            if param_object in self.populations:\n                ### and the param_name is an attribute of the pop --> set param of pop\n                if param_name in vars(get_population(param_object))[\"attributes\"]:\n                    ### if parameter values are given as distribution --> get numpy array\n                    if isinstance(param_val, str):\n                        if (\n                            \"Uniform\" in param_val\n                            or \"DiscreteUniform\" in param_val\n                            or \"Normal\" in param_val\n                            or \"LogNormal\" in param_val\n                            or \"Exponential\" in param_val\n                            or \"Gamma\" in param_val\n                        ):\n                            distribution = eval(param_val)\n                            param_val = distribution.get_values(\n                                shape=get_population(param_object).geometry\n                            )\n                    self.set_param(\n                        compartment=param_object,\n                        parameter_name=param_name,\n                        parameter_value=param_val,\n                    )\n                    ### if parameter base_mean --> also set I_base\n                    if param_name == \"base_mean\":\n                        self.set_param(\n                            compartment=param_object,\n                            parameter_name=\"I_base\",\n                            parameter_value=param_val,\n                        )\n\n    def _set_noise_values(self):\n        \"\"\"\n        sets noise params of all populations\n        \"\"\"\n\n        ### loop over all params\n        for key, param_val in self.params.items():\n            ### split key in param object and param name\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            ### if param_object is a pop in network and param_name ends with noise --> set noise param of pop\n            if (\n                param_object in self.populations\n                and param_name.split(\"_\")[-1] == \"noise\"\n            ):\n                if param_name == \"mean_rate_noise\":\n                    ### for mean and sd the actual parameter of the pop has to be calculated\n                    mean = param_val\n                    try:\n                        ### noise values defined by mean and sd\n                        sd = self.params[param_object + \".rate_sd_noise\"]\n                    except:\n                        ### if only mean is available, only set mean\n                        sd = 0\n                    if sd != 0:\n                        self.set_param(\n                            compartment=param_object,\n                            parameter_name=\"rates_noise\",\n                            parameter_value=self._rng.normal(\n                                mean, sd, get_population(param_object).size\n                            ),\n                        )\n                    else:\n                        self.set_param(\n                            compartment=param_object,\n                            parameter_name=\"rates_noise\",\n                            parameter_value=mean,\n                        )\n                elif param_name in vars(get_population(param_object))[\"attributes\"]:\n                    ### noise parameters which are actual attributes of the pop are simply set\n                    self.set_param(\n                        compartment=param_object,\n                        parameter_name=param_name,\n                        parameter_value=param_val,\n                    )\n                else:\n                    continue\n\n    def _set_connections(self):\n        \"\"\"\n        sets the connectivity and parameters of all projections\n        \"\"\"\n\n        ### dict for each projection, which params were already set during connectivity definition\n        already_set_params = {}\n\n        ### set connectivity\n        ### loop over all projections\n        set_con_failed = False\n        error_message_list = []\n        for proj_name in self.projections:\n            ### get the type of connectivity for projection\n            try:\n                connectivity = self.params[proj_name + \".connectivity\"]\n            except:\n                print(\n                    \"\\nERROR: missing connectivity parameter for\",\n                    proj_name,\n                    \"\\n\",\n                    proj_name + \".connectivity\",\n                    \"needed!\\n\",\n                    \"parameters id:\",\n                    self.params[\"general.id\"],\n                    \"\\n\",\n                )\n                quit()\n\n            possible_con_list = [\n                \"connect_fixed_number_pre\",\n                \"connect_all_to_all\",\n                \"connect_one_to_one\",\n                \"connect_fixed_probability\",\n            ]\n            if connectivity in possible_con_list:\n                try:\n                    # get all possible parameters of the connectivity function\n                    con_func = eval(f\"get_projection(proj_name).{connectivity}\")\n                    possible_con_params_list = list(\n                        inspect.signature(con_func).parameters.keys()\n                    )\n                    # check if paramters are given in the params dict and create the kwargs for the connectivity function\n                    con_kwargs = {}\n                    for con_param_key in possible_con_params_list:\n                        if proj_name + \".\" + con_param_key in self.params:\n                            con_kwargs[con_param_key] = eval(\n                                str(self.params[proj_name + \".\" + con_param_key])\n                            )\n                    # call the connectivity function with the obtained kwargs\n                    con_func(**con_kwargs)\n                    # store which parameters have been set\n                    already_set_params[proj_name] = list(con_kwargs.keys())\n                except:\n                    exc_type, exc_value, exc_traceback = sys.exc_info()\n                    error_message = traceback.format_exception_only(exc_type, exc_value)\n                    error_message_list.append([f\"ERROR: {proj_name}\"] + error_message)\n                    set_con_failed = True\n            else:\n                print(\n                    \"\\nERROR: wrong connectivity parameter for\",\n                    proj_name + \".connectivity!\\n\",\n                    \"parameters id:\",\n                    self.params[\"general.id\"],\n                    \"possible:\",\n                    possible_con_list,\n                    \"\\n\",\n                )\n                quit()\n        if set_con_failed:\n            print(\"\\n\")\n            for error_message in error_message_list:\n                print(\" \".join(error_message))\n            raise TypeError(\"Setting connectivities failed\")\n\n        ### set parameters\n        ### loop over all params\n        for key, param_val in self.params.items():\n            ### split key in param object and param name\n            param_object = key.split(\".\")[0]\n            param_name = key.split(\".\")[1]\n\n            if param_object == \"general\":\n                continue\n\n            ### if param_object is proj in network and param not already used and param is an attribute of proj --> set param of proj\n            if (\n                param_object in self.projections\n                and not (param_name in already_set_params[param_object])\n                and param_name in vars(get_projection(param_object))[\"attributes\"]\n            ):\n                self.set_param(\n                    compartment=param_object,\n                    parameter_name=param_name,\n                    parameter_value=param_val,\n                )\n\n    def _get_params(self, name):\n        \"\"\"\n        read all parameters for specified model name\n\n        Args:\n            name (str):\n                name of the model, specifies which column in the csv file is used\n        \"\"\"\n\n        csvPath = os.path.dirname(os.path.realpath(__file__)) + \"/parameters.csv\"\n        csvfile = open(csvPath, newline=\"\")\n\n        params = {}\n        reader = csv.reader(csvfile, delimiter=\",\")\n        fileRows = []\n        idx = -1\n        ### check if name is in the .csv file\n        for row in reader:\n            if row[0] == \"\":\n                continue\n            fileRows.append(row)\n            if \"general.id\" == row[0] and True in [\n                name == row[i] for i in range(1, len(row))\n            ]:\n                idx = [name == row[i] for i in range(1, len(row))].index(True) + 1\n            elif \"general.id\" == row[0]:\n                print(\n                    \"No Parameters available for given model name \"\n                    + name\n                    + \"! (file \"\n                    + csvPath\n                    + \")\"\n                )\n                quit()\n        if idx == -1:\n            print(\"No general.id in parameter csv file!\")\n            quit()\n        ### read the column corresponding to name\n        for row in fileRows:\n            if \"###\" in row[0]:\n                continue\n            if row[idx] == \"\":\n                continue\n\n            value = row[idx]\n            try:\n                ### if float(value) works value is a number --> check if it is int\n                if float(value) - int(float(value)) == 0:\n                    params[row[0]] = int(float(value))\n                else:\n                    params[row[0]] = float(value)\n            except:\n                ### value is a string\n                if value[0] == \"$\" and value[-1] == \"$\":\n                    ### value is a formula\n                    params[row[0]] = float(eval(value[1:-1]))\n                else:\n                    ### value is some other string\n                    params[row[0]] = value\n        csvfile.close()\n\n        return params\n\n    def _needed_imports(self):\n        for import_val in [\n            Uniform,\n            DiscreteUniform,\n            Normal,\n            LogNormal,\n            Exponential,\n            Gamma,\n            importlib,\n        ]:\n            print(import_val)\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.BGM.__init__","title":"__init__(name='BGM_v01_p01', do_create=True, do_compile=True, compile_folder_name=None, seed=None, name_appendix='')","text":"

Parameters:

Name Type Description Default name str

name of the model, syntax: \"BGM_v_p\" replace and with the versions you want to use, see CompNeuroPy.full_models.BGM_22.parameters for available versions. Default: \"BGM_v01_p01\" 'BGM_v01_p01' do_create bool

if True, the model is created after initialization. Default: True

True do_compile bool

if True, the model is compiled after creation. Default: True

True compile_folder_name str

name of the folder in which the compiled model is saved. Default: None, i.e. \"annarchy_BGM_v\" is used None seed int

the seed for the random number generator used during model creation. Default: None, i.e. random seed is used

None name_appendix str

string which is appended to all model compartments and parameters. Allows to create multiple models with the same name and keep names of compartments and parameters unique. Default: \"\"

'' Source code in CompNeuroPy/full_models/bgm_22/bgm.py
@check_types()\ndef __init__(\n    self,\n    name: str = \"BGM_v01_p01\",\n    do_create: bool = True,\n    do_compile: bool = True,\n    compile_folder_name: str | None = None,\n    seed: int | None = None,\n    name_appendix: str = \"\",\n):\n    \"\"\"\n    Args:\n        name (str, optional):\n            name of the model, syntax: \"BGM_v<model_version>_p<parameters_version>\"\n            replace <model_version> and <parameters_version> with the versions you\n            want to use, see CompNeuroPy.full_models.BGM_22.parameters for available\n            versions. Default: \"BGM_v01_p01\"\n        do_create (bool, optional):\n            if True, the model is created after initialization. Default: True\n        do_compile (bool, optional):\n            if True, the model is compiled after creation. Default: True\n        compile_folder_name (str, optional):\n            name of the folder in which the compiled model is saved. Default: None,\n            i.e. \"annarchy_BGM_v<model_version>\" is used\n        seed (int, optional):\n            the seed for the random number generator used during model creation.\n            Default: None, i.e. random seed is used\n        name_appendix (str, optional):\n            string which is appended to all model compartments and parameters.\n            Allows to create multiple models with the same name and keep names of\n            compartments and parameters unique. Default: \"\"\n    \"\"\"\n    ### check if name is correct, otherwise raise ValueError\n    if not (\n        len(name.split(\"_\")) == 3\n        and name.split(\"_\")[0] == \"BGM\"\n        and name.split(\"_\")[1][0] == \"v\"\n        and name.split(\"_\")[2][0] == \"p\"\n    ):\n        raise ValueError(\n            \"name has to be of the form 'BGM_v<model_version>_p<parameters_version>'\"\n        )\n\n    ### set attributes (except the ones which are set in the super().__init__())\n    self.name_appendix = name_appendix\n    self.seed = seed\n    if len(self.name_appendix) > 0:\n        self._name_appendix_to_add = \":\" + name_appendix\n    else:\n        self._name_appendix_to_add = \"\"\n\n    ### set model_version_name\n    self._model_version_name = \"_\".join(name.split(\"_\")[:2])\n\n    ### update name with name_appendix\n    name = name + self._name_appendix_to_add\n\n    ### init default compile_folder_name\n    if compile_folder_name == None:\n        compile_folder_name = \"annarchy_\" + self._model_version_name\n\n    ### set description\n    description = (\n        \"The basal ganglia model based on the model from Goenner et al. (2021)\"\n    )\n\n    ### init random number generator\n    self._rng = np.random.default_rng(seed)\n\n    ### get model parameters before init, ignore name_appendix\n    self.params = self._get_params(name.split(\":\")[0])\n\n    ### init\n    super().__init__(\n        model_creation_function=self._model_creation_function,\n        name=name,\n        description=description,\n        do_create=do_create,\n        do_compile=do_compile,\n        compile_folder_name=compile_folder_name,\n    )\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.BGM.create","title":"create(do_compile=True, compile_folder_name=None)","text":"

Creates the model and optionally compiles it directly.

Parameters:

Name Type Description Default do_compile bool

If True the model is compiled directly. Default: True.

True compile_folder_name str

Name of the folder in which the model is compiled. Default: value from initialization.

None Source code in CompNeuroPy/full_models/bgm_22/bgm.py
def create(self, do_compile=True, compile_folder_name=None):\n    \"\"\"\n    Creates the model and optionally compiles it directly.\n\n    Args:\n        do_compile (bool, optional):\n            If True the model is compiled directly. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder in which the model is compiled. Default: value from\n            initialization.\n    \"\"\"\n    ### create the model, but do not compile to set parameters before compilation\n    super().create(do_compile=False, compile_folder_name=compile_folder_name)\n\n    ### update names of compartments and parameters\n    self._add_name_appendix()\n\n    ### set parameters and connectivity of projections\n    ### for each projection the connectivity has to be defined in the params\n    self._set_params()\n    self._set_noise_values()\n    self._set_connections()\n\n    ### compile the model, after setting all parameters (included in compile state)\n    if do_compile:\n        self.compile(compile_folder_name)\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelBischop","title":"CompNeuroPy.full_models.HHmodelBischop","text":"

Bases: CompNeuroModel

Generates a single population of the Hodgkin & Huxley neuron model of Bischop et al. (2012) and optionally creates/compiles the network.

Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
class HHmodelBischop(CompNeuroModel):\n    \"\"\"\n    Generates a single population of the Hodgkin & Huxley neuron model of\n    [Bischop et al. (2012)](https://doi.org/10.3389/fnmol.2012.00078) and optionally\n    creates/compiles the network.\n    \"\"\"\n\n    def __init__(\n        self,\n        pop_size=1,\n        conductance_based_synapses=False,\n        name=\"single_HH_Bischop\",\n        do_create=True,\n        do_compile=True,\n        compile_folder_name=\"annarchy_single_HH_Bischop\",\n    ):\n        \"\"\"\n        Args:\n            pop_size (int, optional):\n                Number of neurons in the population. Default: 1.\n            conductance_based_synapses (bool, optional):\n                Whether the equations contain conductance based synapses for AMPA and\n                GABA. Default: False.\n            name (str, optional):\n                Name of the model. Default: \"single_HH_Bischop\".\n            do_create (bool, optional):\n                Whether to create the model. Default: True.\n            do_compile (bool, optional):\n                Whether to compile the model. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder for the compiled model.\n                Default: \"annarchy_single_HH_Bischop\".\n        \"\"\"\n        ### set attributes\n        self.pop_size = pop_size\n        self.conductance_based_synapses = conductance_based_synapses\n        # define description\n        description = \"\"\"\n            One population \"HH_Bischop\" with a single neuron of the Hodgkin\n            & Huxley neuron model of Bischop et al. (2012).\n        \"\"\"\n        # initialize CompNeuroModel\n        super().__init__(\n            model_creation_function=self._bischop_2012_creation_function,\n            name=name,\n            description=description,\n            do_create=do_create,\n            do_compile=do_compile,\n            compile_folder_name=compile_folder_name,\n        )\n\n    def _bischop_2012_creation_function(self):\n        if self.conductance_based_synapses:\n            Population(self.pop_size, neuron=HHneuronBischopSyn, name=\"HH_Bischop_syn\")\n        else:\n            Population(self.pop_size, neuron=HHneuronBischop, name=\"HH_Bischop\")\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelBischop.__init__","title":"__init__(pop_size=1, conductance_based_synapses=False, name='single_HH_Bischop', do_create=True, do_compile=True, compile_folder_name='annarchy_single_HH_Bischop')","text":"

Parameters:

Name Type Description Default pop_size int

Number of neurons in the population. Default: 1.

1 conductance_based_synapses bool

Whether the equations contain conductance based synapses for AMPA and GABA. Default: False.

False name str

Name of the model. Default: \"single_HH_Bischop\".

'single_HH_Bischop' do_create bool

Whether to create the model. Default: True.

True do_compile bool

Whether to compile the model. Default: True.

True compile_folder_name str

Name of the folder for the compiled model. Default: \"annarchy_single_HH_Bischop\".

'annarchy_single_HH_Bischop' Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
def __init__(\n    self,\n    pop_size=1,\n    conductance_based_synapses=False,\n    name=\"single_HH_Bischop\",\n    do_create=True,\n    do_compile=True,\n    compile_folder_name=\"annarchy_single_HH_Bischop\",\n):\n    \"\"\"\n    Args:\n        pop_size (int, optional):\n            Number of neurons in the population. Default: 1.\n        conductance_based_synapses (bool, optional):\n            Whether the equations contain conductance based synapses for AMPA and\n            GABA. Default: False.\n        name (str, optional):\n            Name of the model. Default: \"single_HH_Bischop\".\n        do_create (bool, optional):\n            Whether to create the model. Default: True.\n        do_compile (bool, optional):\n            Whether to compile the model. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder for the compiled model.\n            Default: \"annarchy_single_HH_Bischop\".\n    \"\"\"\n    ### set attributes\n    self.pop_size = pop_size\n    self.conductance_based_synapses = conductance_based_synapses\n    # define description\n    description = \"\"\"\n        One population \"HH_Bischop\" with a single neuron of the Hodgkin\n        & Huxley neuron model of Bischop et al. (2012).\n    \"\"\"\n    # initialize CompNeuroModel\n    super().__init__(\n        model_creation_function=self._bischop_2012_creation_function,\n        name=name,\n        description=description,\n        do_create=do_create,\n        do_compile=do_compile,\n        compile_folder_name=compile_folder_name,\n    )\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelCorbit","title":"CompNeuroPy.full_models.HHmodelCorbit","text":"

Bases: CompNeuroModel

Generates a single population of the Hodgkin & Huxley neuron model of Corbit et al. (2016) and optionally creates/compiles the network.

Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
class HHmodelCorbit(CompNeuroModel):\n    \"\"\"\n    Generates a single population of the Hodgkin & Huxley neuron model of\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) and\n    optionally creates/compiles the network.\n    \"\"\"\n\n    def __init__(\n        self,\n        pop_size=1,\n        conductance_based_synapses=False,\n        name=\"single_HH_Corbit\",\n        do_create=True,\n        do_compile=True,\n        compile_folder_name=\"annarchy_single_HH_Corbit\",\n    ):\n        \"\"\"\n        Args:\n            pop_size (int, optional):\n                Number of neurons in the population. Default: 1.\n            conductance_based_synapses (bool, optional):\n                Whether the equations contain conductance based synapses for AMPA and\n                GABA. Default: False.\n            name (str, optional):\n                Name of the model. Default: \"single_HH_Corbit\".\n            do_create (bool, optional):\n                Whether to create the model. Default: True.\n            do_compile (bool, optional):\n                Whether to compile the model. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder for the compiled model.\n                Default: \"annarchy_single_HH_Corbit\".\n        \"\"\"\n        ### set attributes\n        self.pop_size = pop_size\n        self.conductance_based_synapses = conductance_based_synapses\n        # define description\n        description = \"\"\"\n            One population \"HH_Bischop\" with a single neuron of the Hodgkin\n            & Huxley neuron model of Bischop et al. (2012).\n        \"\"\"\n        # initialize CompNeuroModel\n        super().__init__(\n            model_creation_function=self._model_creation_function,\n            name=name,\n            description=description,\n            do_create=do_create,\n            do_compile=do_compile,\n            compile_folder_name=compile_folder_name,\n        )\n\n    def _model_creation_function(self):\n        if self.conductance_based_synapses:\n            Population(self.pop_size, neuron=HHneuronCorbitSyn, name=\"HH_Corbit_syn\")\n        else:\n            Population(self.pop_size, neuron=HHneuronCorbit, name=\"HH_Corbit\")\n
"},{"location":"built_in/models/#CompNeuroPy.full_models.HHmodelCorbit.__init__","title":"__init__(pop_size=1, conductance_based_synapses=False, name='single_HH_Corbit', do_create=True, do_compile=True, compile_folder_name='annarchy_single_HH_Corbit')","text":"

Parameters:

Name Type Description Default pop_size int

Number of neurons in the population. Default: 1.

1 conductance_based_synapses bool

Whether the equations contain conductance based synapses for AMPA and GABA. Default: False.

False name str

Name of the model. Default: \"single_HH_Corbit\".

'single_HH_Corbit' do_create bool

Whether to create the model. Default: True.

True do_compile bool

Whether to compile the model. Default: True.

True compile_folder_name str

Name of the folder for the compiled model. Default: \"annarchy_single_HH_Corbit\".

'annarchy_single_HH_Corbit' Source code in CompNeuroPy/full_models/hodgkin_huxley_single_pop.py
def __init__(\n    self,\n    pop_size=1,\n    conductance_based_synapses=False,\n    name=\"single_HH_Corbit\",\n    do_create=True,\n    do_compile=True,\n    compile_folder_name=\"annarchy_single_HH_Corbit\",\n):\n    \"\"\"\n    Args:\n        pop_size (int, optional):\n            Number of neurons in the population. Default: 1.\n        conductance_based_synapses (bool, optional):\n            Whether the equations contain conductance based synapses for AMPA and\n            GABA. Default: False.\n        name (str, optional):\n            Name of the model. Default: \"single_HH_Corbit\".\n        do_create (bool, optional):\n            Whether to create the model. Default: True.\n        do_compile (bool, optional):\n            Whether to compile the model. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder for the compiled model.\n            Default: \"annarchy_single_HH_Corbit\".\n    \"\"\"\n    ### set attributes\n    self.pop_size = pop_size\n    self.conductance_based_synapses = conductance_based_synapses\n    # define description\n    description = \"\"\"\n        One population \"HH_Bischop\" with a single neuron of the Hodgkin\n        & Huxley neuron model of Bischop et al. (2012).\n    \"\"\"\n    # initialize CompNeuroModel\n    super().__init__(\n        model_creation_function=self._model_creation_function,\n        name=name,\n        description=description,\n        do_create=do_create,\n        do_compile=do_compile,\n        compile_folder_name=compile_folder_name,\n    )\n
"},{"location":"built_in/neuron_models/","title":"Neuron Models","text":""},{"location":"built_in/neuron_models/#artificial-neurons","title":"Artificial Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.IntegratorNeuron","title":"IntegratorNeuron","text":"

Bases: Neuron

TEMPLATE

Integrator Neuron for stop_condition in spiking models.

The variable g_ampa increases for incoming spikes (target ampa) and decreases exponentially with time constant tau. If g_ampa reaches a threshold, the neuron's variable decision, which is by default -1, changes to the neuron_id. This can be used to cause the stop_condition of ANNarchy's simulate_until() function (stop_codnition=\"decision>=0 : any\"). In case of multiple integrator neurons, the neuron_id can be used to identify the neuron that reached the threshold.

Warning

You have to define the variable neuron_id for each neuron in the Integrator population.

Parameters:

Name Type Description Default tau float

Time constant in ms of the neuron. Default: 1.

1 threshold float

Threshold for the decision g_ampa has to reach. Default: 1.

1

Examples:

from ANNarchy import Population, simulate_until\nfrom CompNeuroPy.neuron_models import Integrator\n\n# Create a population of 10 integrator neurons\nintegrator_neurons = Population(\n    geometry=10,\n    neuron=IntegratorNeuron(tau=1, threshold=1),\n    stop_condition=\"decision>=0 : any\",\n    name=\"integrator_neurons\",)\n\n# set the neuron_id for each neuron\nintegrator_neurons.neuron_id = range(10)\n\n# simulate until one neuron reaches the threshold\nsimulate_until(max_duration=1000, population=integrator_neurons)\n\n# check if simulation stop due to stop_codnition and which neuron reached the\n# threshold\nif (integrator_neurons.decision >= 0).any():\n    neurons_reached_thresh = integrator_neurons.neuron_id[\n        integrator_neurons.decision >= 0\n    ]\n    print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\nelse:\n    print(\"No neuron reached threshold.\")\n
Variables to record
  • g_ampa
  • decision
  • r
Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
class IntegratorNeuron(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Integrator Neuron for stop_condition in spiking models.\n\n    The variable g_ampa increases for incoming spikes (target ampa) and decreases\n    exponentially with time constant tau. If g_ampa reaches a threshold, the neuron's\n    variable decision, which is by default -1, changes to the neuron_id. This can be\n    used to cause the stop_condition of ANNarchy's simulate_until() function\n    (stop_codnition=\"decision>=0 : any\"). In case of multiple integrator neurons,\n    the neuron_id can be used to identify the neuron that reached the threshold.\n\n    !!! warning\n        You have to define the variable neuron_id for each neuron in the Integrator\n        population.\n\n    Parameters:\n        tau (float, optional):\n            Time constant in ms of the neuron. Default: 1.\n        threshold (float, optional):\n            Threshold for the decision g_ampa has to reach. Default: 1.\n\n    Examples:\n        ```python\n        from ANNarchy import Population, simulate_until\n        from CompNeuroPy.neuron_models import Integrator\n\n        # Create a population of 10 integrator neurons\n        integrator_neurons = Population(\n            geometry=10,\n            neuron=IntegratorNeuron(tau=1, threshold=1),\n            stop_condition=\"decision>=0 : any\",\n            name=\"integrator_neurons\",)\n\n        # set the neuron_id for each neuron\n        integrator_neurons.neuron_id = range(10)\n\n        # simulate until one neuron reaches the threshold\n        simulate_until(max_duration=1000, population=integrator_neurons)\n\n        # check if simulation stop due to stop_codnition and which neuron reached the\n        # threshold\n        if (integrator_neurons.decision >= 0).any():\n            neurons_reached_thresh = integrator_neurons.neuron_id[\n                integrator_neurons.decision >= 0\n            ]\n            print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\n        else:\n            print(\"No neuron reached threshold.\")\n        ```\n\n    Variables to record:\n        - g_ampa\n        - decision\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, tau: float = 1, threshold: float = 1):\n        # Create the arguments\n        parameters = f\"\"\"\n            tau = {tau} : population\n            threshold = {threshold} : population\n            neuron_id = 0\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = - g_ampa / tau\n                ddecision/dt = 0 : init = -1\n            \"\"\",\n            spike=\"\"\"\n                g_ampa >= threshold\n            \"\"\",\n            reset=\"\"\"\n                decision = neuron_id\n            \"\"\",\n            name=\"integrator_neuron\",\n            description=\"\"\"\n                Integrator Neuron, which integrates incoming spikes with value g_ampa\n                and emits a spike when reaching a threshold. After spike decision\n                changes, which can be used as for stop condition\"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.IntegratorNeuronSimple","title":"IntegratorNeuronSimple","text":"

Bases: Neuron

TEMPLATE

Integrator Neuron for stop_condition in spiking models.

The variable g_ampa increases for incoming spikes (target ampa) and decreases exponentially with time constant tau. You can check g_ampa and use it for the stop_condition of ANNarchy's simulate_until() function (stop_codnition=\"g_ampa>=some_value : any\"). In case of multiple integrator neurons, the neuron_id can be used to identify the neuron that reached the threshold.

Warning

You have to define the variable neuron_id for each neuron in the Integrator population.

Parameters:

Name Type Description Default tau float

Time constant in ms of the neuron. Default: 1.

1

Examples:

from ANNarchy import Population, simulate_until\nfrom CompNeuroPy.neuron_models import Integrator\n\n# Create a population of 10 integrator neurons\nintegrator_neurons = Population(\n    geometry=10,\n    neuron=IntegratorNeuronSimple(tau=1),\n    stop_condition=\"g_ampa>=5 : any\",\n    name=\"integrator_neurons\",)\n\n# set the neuron_id for each neuron\nintegrator_neurons.neuron_id = range(10)\n\n# simulate until one neuron reaches the threshold\nsimulate_until(max_duration=1000, population=integrator_neurons)\n\n# check if simulation stop due to stop_codnition and which neuron reached the\n# threshold\nif (integrator_neurons.g_ampa >= 5).any():\n    neurons_reached_thresh = integrator_neurons.neuron_id[\n        integrator_neurons.g_ampa >= 5\n    ]\n    print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\nelse:\n    print(\"No neuron reached threshold.\")\n
Variables to record
  • g_ampa
  • r
Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
class IntegratorNeuronSimple(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Integrator Neuron for stop_condition in spiking models.\n\n    The variable g_ampa increases for incoming spikes (target ampa) and decreases\n    exponentially with time constant tau. You can check g_ampa and use it for the\n    stop_condition of ANNarchy's simulate_until() function\n    (stop_codnition=\"g_ampa>=some_value : any\"). In case of multiple integrator neurons,\n    the neuron_id can be used to identify the neuron that reached the threshold.\n\n    !!! warning\n        You have to define the variable neuron_id for each neuron in the Integrator\n        population.\n\n    Parameters:\n        tau (float, optional):\n            Time constant in ms of the neuron. Default: 1.\n\n    Examples:\n        ```python\n        from ANNarchy import Population, simulate_until\n        from CompNeuroPy.neuron_models import Integrator\n\n        # Create a population of 10 integrator neurons\n        integrator_neurons = Population(\n            geometry=10,\n            neuron=IntegratorNeuronSimple(tau=1),\n            stop_condition=\"g_ampa>=5 : any\",\n            name=\"integrator_neurons\",)\n\n        # set the neuron_id for each neuron\n        integrator_neurons.neuron_id = range(10)\n\n        # simulate until one neuron reaches the threshold\n        simulate_until(max_duration=1000, population=integrator_neurons)\n\n        # check if simulation stop due to stop_codnition and which neuron reached the\n        # threshold\n        if (integrator_neurons.g_ampa >= 5).any():\n            neurons_reached_thresh = integrator_neurons.neuron_id[\n                integrator_neurons.g_ampa >= 5\n            ]\n            print(f\"Neuron(s) {neurons_reached_thresh} reached threshold.\")\n        else:\n            print(\"No neuron reached threshold.\")\n        ```\n\n    Variables to record:\n        - g_ampa\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, tau: float = 1):\n        # Create the arguments\n        parameters = f\"\"\"\n            tau = {tau} : population\n            neuron_id = 0\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = - g_ampa / tau\n                r = 0\n            \"\"\",\n            name=\"integrator_neuron_simple\",\n            description=\"\"\"\n                Integrator Neuron, which integrates incoming spikes with value g_ampa,\n                which can be used as a stop condition\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.PoissonNeuron","title":"PoissonNeuron","text":"

Bases: Neuron

TEMPLATE

Poisson neuron whose rate can be specified and is reached instantaneous. The neuron emits spikes following a Poisson distribution, the average firing rate is given by the parameter rates.

Parameters:

Name Type Description Default rates float

The average firing rate of the neuron in Hz. Default: 0.

0 Variables to record
  • p
  • r
Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
class PoissonNeuron(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Poisson neuron whose rate can be specified and is reached instantaneous. The\n    neuron emits spikes following a Poisson distribution, the average firing rate\n    is given by the parameter rates.\n\n    Parameters:\n        rates (float, optional):\n            The average firing rate of the neuron in Hz. Default: 0.\n\n    Variables to record:\n        - p\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, rates: float = 0):\n        # Create the arguments\n        parameters = f\"\"\"\n            rates = {rates}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                p = Uniform(0.0, 1.0) * 1000.0 / dt\n            \"\"\",\n            spike=\"\"\"\n                p <= rates\n            \"\"\",\n            reset=\"\"\"\n                p = 0.0\n            \"\"\",\n            name=\"poisson_neuron\",\n            description=\"\"\"\n                Poisson neuron whose rate can be specified and is reached instantaneous.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.PoissonNeuronUpDown","title":"PoissonNeuronUpDown","text":"

Bases: Neuron

TEMPLATE

The neuron emits spikes following a Poisson distribution, the average firing rate is given by the parameter rates and is reached with time constants tau_up and tau_down.

Attributes:

Name Type Description rates float

The average firing rate of the neuron in Hz. Default: 0.

tau_up float

Time constant in ms for increasing the firing rate. Default: 1.

tau_down float

Time constant in ms for decreasing the firing rate. Default: 1.

Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
class PoissonNeuronUpDown(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    The neuron emits spikes following a Poisson distribution, the average firing rate is\n    given by the parameter rates and is reached with time constants tau_up and tau_down.\n\n    Attributes:\n        rates (float, optional):\n            The average firing rate of the neuron in Hz. Default: 0.\n        tau_up (float, optional):\n            Time constant in ms for increasing the firing rate. Default: 1.\n        tau_down (float, optional):\n            Time constant in ms for decreasing the firing rate. Default: 1.\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(self, rates: float = 0, tau_up: float = 1, tau_down: float = 1):\n        # Create the arguments\n        parameters = f\"\"\"\n            rates = {rates}\n            tau_up = {tau_up}\n            tau_down = {tau_down}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                p = Uniform(0.0, 1.0) * 1000.0 / dt\n                dact/dt = if (rates - act) > 0:\n                              (rates - act) / tau_up\n                          else:\n                              (rates - act) / tau_down\n            \"\"\",\n            spike=\"\"\"\n                p <= act\n            \"\"\",\n            reset=\"\"\"\n                p = 0.0\n            \"\"\",\n            name=\"poisson_neuron_up_down\",\n            description=\"\"\"Poisson neuron whose rate can be specified and is reached\n                with time constants tau_up and tau_down.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.artificial_nm.PoissonNeuronSin","title":"PoissonNeuronSin","text":"

Bases: Neuron

TEMPLATE

Neuron emitting spikes following a Poisson distribution, the average firing rate is given by a sinus function.

Parameters:

Name Type Description Default amplitude float

Amplitude of the sinus function. Default: 0.

0 base float

Base (offset) of the sinus function. Default: 0.

0 frequency float

Frequency of the sinus function. Default: 0.

0 phase float

Phase of the sinus function. Default: 0.

0 Variables to record
  • rates
  • p
  • r
Source code in CompNeuroPy/neuron_models/final_models/artificial_nm.py
class PoissonNeuronSin(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    Neuron emitting spikes following a Poisson distribution, the average firing rate\n    is given by a sinus function.\n\n    Parameters:\n        amplitude (float, optional):\n            Amplitude of the sinus function. Default: 0.\n        base (float, optional):\n            Base (offset) of the sinus function. Default: 0.\n        frequency (float, optional):\n            Frequency of the sinus function. Default: 0.\n        phase (float, optional):\n            Phase of the sinus function. Default: 0.\n\n    Variables to record:\n        - rates\n        - p\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        amplitude: float = 0,\n        base: float = 0,\n        frequency: float = 0,\n        phase: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            amplitude = {amplitude}\n            base = {base}\n            frequency = {frequency}\n            phase = {phase}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                rates = amplitude * sin((2*pi*frequency)*(t/1000-phase)) + base\n                p     = Uniform(0.0, 1.0) * 1000.0 / dt\n            \"\"\",\n            spike=\"\"\"\n                p <= rates\n            \"\"\",\n            reset=\"\"\"\n                p = 0.0\n            \"\"\",\n            name=\"poisson_neuron_sin\",\n            description=\"Poisson neuron whose rate varies with a sinus function.\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#hodgkin-huxley-neurons","title":"Hodgkin Huxley Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronBischop","title":"HHneuronBischop","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012).

Variables to record
  • prev_v
  • I_L
  • alpha_h
  • beta_h
  • h_inf
  • tau_h
  • h
  • alpha_m
  • beta_m
  • m_inf
  • m
  • I_Na
  • alpha_n1
  • beta_n1
  • n1_inf
  • tau_n1
  • n1
  • I_Kv1
  • alpha_n3
  • beta_n3
  • n3_inf
  • tau_n3
  • n3
  • I_Kv3
  • PV
  • PV_Mg
  • dPV_Ca_dt
  • PV_Ca
  • Ca
  • k_inf
  • tau_k
  • k
  • I_SK
  • a_inf
  • a
  • I_Ca
  • v
  • r
Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronBischop(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Bischop et al. (2012)](https://doi.org/10.3389/fnmol.2012.00078).\n\n    Variables to record:\n        - prev_v\n        - I_L\n        - alpha_h\n        - beta_h\n        - h_inf\n        - tau_h\n        - h\n        - alpha_m\n        - beta_m\n        - m_inf\n        - m\n        - I_Na\n        - alpha_n1\n        - beta_n1\n        - n1_inf\n        - tau_n1\n        - n1\n        - I_Kv1\n        - alpha_n3\n        - beta_n3\n        - n3_inf\n        - tau_n3\n        - n3\n        - I_Kv3\n        - PV\n        - PV_Mg\n        - dPV_Ca_dt\n        - PV_Ca\n        - Ca\n        - k_inf\n        - tau_k\n        - k\n        - I_SK\n        - a_inf\n        - a\n        - I_Ca\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.bischop = _BischopStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.bischop.parameters_base\n\n    def _get_equations(self):\n        return self.bischop.equations_base + self.bischop.membrane_base\n\n    def _get_name(self):\n        return \"H_and_H_Bischop\"\n\n    def _get_description(self):\n        return (\n            \"Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012).\"\n        )\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronBischopSyn","title":"HHneuronBischopSyn","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012) with conductance-based synapses/currents for AMPA and GABA.

Variables to record
  • g_ampa
  • g_gaba
  • prev_v
  • I_L
  • alpha_h
  • beta_h
  • h_inf
  • tau_h
  • h
  • alpha_m
  • beta_m
  • m_inf
  • m
  • I_Na
  • alpha_n1
  • beta_n1
  • n1_inf
  • tau_n1
  • n1
  • I_Kv1
  • alpha_n3
  • beta_n3
  • n3_inf
  • tau_n3
  • n3
  • I_Kv3
  • PV
  • PV_Mg
  • dPV_Ca_dt
  • PV_Ca
  • Ca
  • k_inf
  • tau_k
  • k
  • I_SK
  • a_inf
  • a
  • I_Ca
  • v
  • r
Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronBischopSyn(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Bischop et al. (2012)](https://doi.org/10.3389/fnmol.2012.00078) with\n    conductance-based synapses/currents for AMPA and GABA.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - prev_v\n        - I_L\n        - alpha_h\n        - beta_h\n        - h_inf\n        - tau_h\n        - h\n        - alpha_m\n        - beta_m\n        - m_inf\n        - m\n        - I_Na\n        - alpha_n1\n        - beta_n1\n        - n1_inf\n        - tau_n1\n        - n1\n        - I_Kv1\n        - alpha_n3\n        - beta_n3\n        - n3_inf\n        - tau_n3\n        - n3\n        - I_Kv3\n        - PV\n        - PV_Mg\n        - dPV_Ca_dt\n        - PV_Ca\n        - Ca\n        - k_inf\n        - tau_k\n        - k\n        - I_SK\n        - a_inf\n        - a\n        - I_Ca\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.bischop = _BischopStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.bischop.parameters_conductance\n\n    def _get_equations(self):\n        return self.bischop.equations_conductance + self.bischop.membrane_conductance\n\n    def _get_name(self):\n        return \"H_and_H_Bischop_syn\"\n\n    def _get_description(self):\n        return \"\"\"\n                Hodgkin Huxley neuron model for striatal FSI from Bischop et al. (2012)\n                with conductance-based synapses/currents for AMPA and GABA.\n            \"\"\"\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronCorbit","title":"HHneuronCorbit","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016).

Variables to record
  • prev_v
  • I_L
  • m_Na
  • h_Na
  • I_Na
  • n_Kv3_inf
  • tau_n_Kv3_inf
  • n_Kv3
  • I_Kv3
  • m_Kv1
  • h_Kv1
  • I_Kv1
  • v
  • r
Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronCorbit(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016).\n\n    Variables to record:\n        - prev_v\n        - I_L\n        - m_Na\n        - h_Na\n        - I_Na\n        - n_Kv3_inf\n        - tau_n_Kv3_inf\n        - n_Kv3\n        - I_Kv3\n        - m_Kv1\n        - h_Kv1\n        - I_Kv1\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.corbit = _CorbitStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.corbit.parameters_base\n\n    def _get_equations(self):\n        return self.corbit.equations_base + self.corbit.membrane_base\n\n    def _get_name(self):\n        return \"H_and_H_Corbit\"\n\n    def _get_description(self):\n        return \"Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016).\"\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronCorbitSyn","title":"HHneuronCorbitSyn","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016) with conductance-based synapses/currents for AMPA and GABA.

Variables to record
  • g_ampa
  • g_gaba
  • prev_v
  • I_L
  • m_Na
  • h_Na
  • I_Na
  • n_Kv3_inf
  • tau_n_Kv3_inf
  • n_Kv3
  • I_Kv3
  • m_Kv1
  • h_Kv1
  • I_Kv1
  • v
  • r
Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronCorbitSyn(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) with\n    conductance-based synapses/currents for AMPA and GABA.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - prev_v\n        - I_L\n        - m_Na\n        - h_Na\n        - I_Na\n        - n_Kv3_inf\n        - tau_n_Kv3_inf\n        - n_Kv3\n        - I_Kv3\n        - m_Kv1\n        - h_Kv1\n        - I_Kv1\n        - v\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.corbit = _CorbitStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.corbit.parameters_conductance\n\n    def _get_equations(self):\n        return self.corbit.equations_conductance + self.corbit.membrane_conductance\n\n    def _get_name(self):\n        return \"H_and_H_Corbit_syn\"\n\n    def _get_description(self):\n        return \"\"\"\n                Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016)\n                with conductance-based synapses/currents for AMPA and GABA.\n            \"\"\"\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.H_and_H_like_nm.HHneuronCorbitVoltageClamp","title":"HHneuronCorbitVoltageClamp","text":"

Bases: _HHneuron

PREDEFINED

Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016) with voltage clamp. Membrane potential v is clamped and I_inf can be recorded.

Variables to record
  • prev_v
  • I_L
  • m_Na
  • h_Na
  • I_Na
  • n_Kv3_inf
  • tau_n_Kv3_inf
  • n_Kv3
  • I_Kv3
  • m_Kv1
  • h_Kv1
  • I_Kv1
  • v
  • I_inf
  • r
Source code in CompNeuroPy/neuron_models/final_models/H_and_H_like_nm.py
class HHneuronCorbitVoltageClamp(_HHneuron):\n    \"\"\"\n    PREDEFINED\n\n    Hodgkin Huxley neuron model for striatal FSI from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016) with\n    voltage clamp. Membrane potential v is clamped and I_inf can be recorded.\n\n    Variables to record:\n        - prev_v\n        - I_L\n        - m_Na\n        - h_Na\n        - I_Na\n        - n_Kv3_inf\n        - tau_n_Kv3_inf\n        - n_Kv3\n        - I_Kv3\n        - m_Kv1\n        - h_Kv1\n        - I_Kv1\n        - v\n        - I_inf\n        - r\n    \"\"\"\n\n    def __init__(self):\n        self.corbit = _CorbitStrings()\n\n        super().__init__()\n\n    def _get_parameters(self):\n        return self.corbit.parameters_base\n\n    def _get_equations(self):\n        return self.corbit.equations_base + self.corbit.membrane_voltage_clamp\n\n    def _get_name(self):\n        return \"H_and_H_Corbit_voltage_clamp\"\n\n    def _get_description(self):\n        return \"\"\"\n                Hodgkin Huxley neuron model for striatal FSI from Corbit et al. (2016)\n                with voltage clamp.\n            \"\"\"\n
"},{"location":"built_in/neuron_models/#izhikevich-2003-like-neurons","title":"Izhikevich (2003)-like Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003FixedNoisyAmpa","title":"Izhikevich2003FixedNoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance. Fixed means, the 3 factors of the quadratic equation cannot be changed.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 Variables to record
  • g_ampa
  • g_gaba
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003FixedNoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance. Fixed means, the 3 factors of the quadratic equation cannot be changed.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                dv/dt      = 0.04 * v * v + 5 * v + 140 - u + I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_fixed_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2003) with additional\n                conductance-based synapses for AMPA and GABA currents with noise in AMPA\n                conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyAmpa","title":"Izhikevich2003NoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 Variables to record
  • g_ampa
  • g_gaba
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            n2             = {n2} : population\n            n1             = {n1} : population\n            n0             = {n0} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                dv/dt      = n2 * v * v + n1 * v + n0 - u + I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_noisy_AMPA\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents with noise in AMPA conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyAmpaNonlin","title":"Izhikevich2003NoisyAmpaNonlin","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance. With nonlinear function for external current.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 nonlin float

Exponent of the nonlinear function for the external current.

1 Variables to record
  • g_ampa
  • g_gaba
  • I
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyAmpaNonlin(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance. With nonlinear function for external current.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n        nonlin (float, optional):\n            Exponent of the nonlinear function for the external current.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n        nonlin: float = 1,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            n2             = {n2} : population\n            n1             = {n1} : population\n            n0             = {n0} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n            nonlin         = {nonlin} : population\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                I = I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                dv/dt      = n2 * v * v + n1 * v + n0 - u + f(I,nonlin)\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            functions=\"\"\"\n                f(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            name=\"Izhikevich2003_noisy_AMPA_nonlin\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents with noise in AMPA conductance.\n                With nonlinear function for external current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyAmpaOscillating","title":"Izhikevich2003NoisyAmpaOscillating","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents with noise in AMPA conductance. With additional oscillation term.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 increase_noise float

Increase of the Poisson distributed (equivalent to a Poisson distributed spike train as input) noise in the AMPA conductance.

0 rates_noise float

Rate of the Poisson distributed noise in the AMPA conductance.

0 freq float

Frequency of the oscillation term.

0 amp float

Amplitude of the oscillation term.

6 Variables to record
  • osc
  • g_ampa
  • g_gaba
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyAmpaOscillating(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents with noise in AMPA\n    conductance. With additional oscillation term.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        increase_noise (float, optional):\n            Increase of the Poisson distributed (equivalent to a Poisson distributed\n            spike train as input) noise in the AMPA conductance.\n        rates_noise (float, optional):\n            Rate of the Poisson distributed noise in the AMPA conductance.\n        freq (float, optional):\n            Frequency of the oscillation term.\n        amp (float, optional):\n            Amplitude of the oscillation term.\n\n    Variables to record:\n        - osc\n        - g_ampa\n        - g_gaba\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        increase_noise: float = 0,\n        rates_noise: float = 0,\n        freq: float = 0,\n        amp: float = 6,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            n2             = {n2} : population\n            n1             = {n1} : population\n            n0             = {n0} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app}\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n            freq           = {freq}\n            amp            = {amp}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                osc        = amp * sin(t * 2 * pi * (freq / 1000))\n                dg_ampa/dt = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rates_noise, -g_ampa/tau_ampa, -g_ampa/tau_ampa + increase_noise/dt)\n                dg_gaba/dt = -g_gaba / tau_gaba\n                dv/dt      = n2 * v * v + n1 * v + n0 - u + I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba)) + osc\n                du/dt      = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_noisy_AMPA_oscillating\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents with noise in AMPA conductance.\n                With additional oscillation term.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyBase","title":"Izhikevich2003NoisyBase","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents and a noisy baseline current.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 base_mean float

Mean of the baseline current.

0 base_noise float

Standard deviation of the baseline current.

0 rate_base_noise float

Rate of the Poisson distributed noise in the baseline current, i.e. how often the baseline current is changed randomly.

0 Variables to record
  • g_ampa
  • g_gaba
  • offset_base
  • I_base
  • I
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyBase(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents and a noisy baseline\n    current.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current.\n        rate_base_noise (float, optional):\n            Rate of the Poisson distributed noise in the baseline current, i.e. how\n            often the baseline current is changed randomly.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - offset_base\n        - I_base\n        - I\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        base_mean: float = 0,\n        base_noise: float = 0,\n        rate_base_noise: float = 0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a               = {a} : population\n            b               = {b} : population\n            c               = {c} : population\n            d               = {d} : population\n            n2              = {n2} : population\n            n1              = {n1} : population\n            n0              = {n0} : population\n            tau_ampa        = {tau_ampa} : population\n            tau_gaba        = {tau_gaba} : population\n            E_ampa          = {E_ampa} : population\n            E_gaba          = {E_gaba} : population\n            I_app           = {I_app}\n            base_mean       = {base_mean}\n            base_noise      = {base_noise}\n            rate_base_noise = {rate_base_noise}\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt  = -g_ampa/tau_ampa\n                dg_gaba/dt  = -g_gaba / tau_gaba\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0, 1) * base_noise)\n                I_base      = base_mean + offset_base\n                I           = I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba)) + I_base\n                dv/dt       = n2 * v * v + n1 * v + n0 - u + I\n                du/dt       = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2003_noisy_I\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents and a noisy baseline current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2003_like_nm.Izhikevich2003NoisyBaseNonlin","title":"Izhikevich2003NoisyBaseNonlin","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2003)-like neuron model with additional conductance based synapses for AMPA and GABA currents and a noisy baseline current. With nonlinear function for external current.

Parameters:

Name Type Description Default a float

Time constant of the recovery variable u.

0 b float

Sensitivity of the recovery variable u to the membrane potential v.

0 c float

After-spike reset value of the membrane potential v.

0 d float

After-spike change of the recovery variable u.

0 n2 float

Factor of the quadratic equation of the membrane potential v.

0 n1 float

Factor of the quadratic equation of the membrane potential v.

0 n0 float

Factor of the quadratic equation of the membrane potential v.

0 tau_ampa float

Time constant of the AMPA conductance.

1 tau_gaba float

Time constant of the GABA conductance.

1 E_ampa float

Reversal potential of the AMPA conductance.

0 E_gaba float

Reversal potential of the GABA conductance.

0 I_app float

External applied current.

0 base_mean float

Mean of the baseline current.

0 base_noise float

Standard deviation of the baseline current.

0 rate_base_noise float

Rate of the Poisson distributed noise in the baseline current, i.e. how often the baseline current is changed randomly.

0 nonlin float

Exponent of the nonlinear function for the external current.

1 Variables to record
  • g_ampa
  • g_gaba
  • offset_base
  • I_base
  • I
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2003_like_nm.py
class Izhikevich2003NoisyBaseNonlin(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2003)](https://doi.org/10.1109/TNN.2003.820440)-like neuron model with\n    additional conductance based synapses for AMPA and GABA currents and a noisy baseline\n    current. With nonlinear function for external current.\n\n    Parameters:\n        a (float, optional):\n            Time constant of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential v.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        n2 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n1 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        n0 (float, optional):\n            Factor of the quadratic equation of the membrane potential v.\n        tau_ampa (float, optional):\n            Time constant of the AMPA conductance.\n        tau_gaba (float, optional):\n            Time constant of the GABA conductance.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA conductance.\n        E_gaba (float, optional):\n            Reversal potential of the GABA conductance.\n        I_app (float, optional):\n            External applied current.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current.\n        rate_base_noise (float, optional):\n            Rate of the Poisson distributed noise in the baseline current, i.e. how\n            often the baseline current is changed randomly.\n        nonlin (float, optional):\n            Exponent of the nonlinear function for the external current.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - offset_base\n        - I_base\n        - I\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        a: float = 0,\n        b: float = 0,\n        c: float = 0,\n        d: float = 0,\n        n2: float = 0,\n        n1: float = 0,\n        n0: float = 0,\n        tau_ampa: float = 1,\n        tau_gaba: float = 1,\n        E_ampa: float = 0,\n        E_gaba: float = 0,\n        I_app: float = 0,\n        base_mean: float = 0,\n        base_noise: float = 0,\n        rate_base_noise: float = 0,\n        nonlin: float = 1,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            a               = {a} : population\n            b               = {b} : population\n            c               = {c} : population\n            d               = {d} : population\n            n2              = {n2} : population\n            n1              = {n1} : population\n            n0              = {n0} : population\n            tau_ampa        = {tau_ampa} : population\n            tau_gaba        = {tau_gaba} : population\n            E_ampa          = {E_ampa} : population\n            E_gaba          = {E_gaba} : population\n            I_app           = {I_app}\n            base_mean       = {base_mean}\n            base_noise      = {base_noise}\n            rate_base_noise = {rate_base_noise}\n            nonlin          = {nonlin} : population\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=\"\"\"\n                dg_ampa/dt  = -g_ampa/tau_ampa\n                dg_gaba/dt  = -g_gaba / tau_gaba\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0, 1) * base_noise)\n                I_base      = base_mean + offset_base\n                I           = I_app - neg(g_ampa*(v - E_ampa)) - pos(g_gaba*(v - E_gaba))\n                dv/dt       = n2 * v * v + n1 * v + n0 - u + f(I,nonlin) + I_base\n                du/dt       = a * (b * v - u)\n            \"\"\",\n            spike=\"\"\"\n                v >= 30\n            \"\"\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            functions=\"\"\"\n                f(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            name=\"Izhikevich2003_noisy_I_nonlin\",\n            description=\"\"\"\n                Neuron model from Izhikevich (2003). With additional conductance based\n                synapses for AMPA and GABA currents and a noisy baseline current.\n                With nonlinear function for external current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#izhikevich-2007-like-neurons","title":"Izhikevich (2007)-like Neurons","text":""},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007","title":"Izhikevich2007","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 Variables to record
  • I_v
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n\n    Variables to record:\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS * mV**-1\n            v_r    = {v_r} : population # mV\n            v_t    = {v_t} : population # mV\n            a      = {a} : population # ms**-1\n            b      = {b} : population # nS\n            c      = {c} : population # mV\n            d      = {d} : population # pA\n            v_peak = {v_peak} : population # mV\n            I_app  = {I_app} # pA\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007\",\n            description=\"Neuron model equations from Izhikevich (2007).\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007RecCur","title":"Izhikevich2007RecCur","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with separate currents to record.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 Variables to record
  • I_v
  • v
  • u
  • I_u
  • I_k
  • I_a
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007RecCur(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with separate currents to record.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n\n    Variables to record:\n        - I_v\n        - v\n        - u\n        - I_u\n        - I_k\n        - I_a\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS * mV**-1\n            v_r    = {v_r} : population # mV\n            v_t    = {v_t} : population # mV\n            a      = {a} : population # ms**-1\n            b      = {b} : population # nS\n            c      = {c} : population # mV\n            d      = {d} : population # pA\n            v_peak = {v_peak} : population # mV\n            I_app  = {I_app} # pA\n        \"\"\"\n\n        affix = \"\"\"\n            I_u = -u\n            I_k = k*(v - v_r)*(v - v_t)\n            I_a = I_app\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(affix=affix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_record_currents\",\n            description=\"\"\"\n                Neuron model equations from Izhikevich (2007) with separate\n                currents to record.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007VoltageClamp","title":"Izhikevich2007VoltageClamp","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with voltage clamp to record I_inf.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 Variables to record
  • I_v
  • v
  • u
  • I_inf
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007VoltageClamp(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with voltage clamp to record I_inf.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n\n    Variables to record:\n        - I_v\n        - v\n        - u\n        - I_inf\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS * mV**-1\n            v_r    = {v_r} : population # mV\n            v_t    = {v_t} : population # mV\n            a      = {a} : population # ms**-1\n            b      = {b} : population # nS\n            c      = {c} : population # mV\n            d      = {d} : population # pA\n            v_peak = {v_peak} : population # mV\n            I_app  = {I_app} # pA\n        \"\"\"\n\n        dv = \"0\"\n        affix = f\"I_inf = {_dv_default}\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(dv=dv, affix=affix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_voltage_clamp\",\n            description=\"\"\"\n                Neuron model equations from Izhikevich (2007) with voltage clamp\n                to record I_inf.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007Syn","title":"Izhikevich2007Syn","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based synapses.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

10.0 tau_gaba float

Time constant of the GABA synapse.

10.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-90.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007Syn(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based synapses.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 10.0,\n        tau_gaba: float = 10.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -90.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C      = {C} : population # pF\n            k      = {k} : population # pS\n            v_r    = {v_r} : population\n            v_t    = {v_t} : population\n            a      = {a} : population\n            b      = {b} : population\n            c      = {c} : population\n            d      = {d} : population\n            v_peak = {v_peak} : population\n            I_app  = {I_app} # pA\n            tau_ampa = {tau_ampa} : population\n            tau_gaba = {tau_gaba} : population\n            E_ampa   = {E_ampa} : population\n            E_gaba   = {E_gaba} : population\n        \"\"\"\n\n        syn = _syn_default\n        i_v = f\"I_app {_I_syn}\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_syn\",\n            description=\"\"\"\n                Neuron model equations from Izhikevich (2007) with conductance-based\n                AMPA and GABA synapses/currents.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyAmpa","title":"Izhikevich2007NoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

10.0 tau_gaba float

Time constant of the GABA synapse.

10.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-90.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007NoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 10.0,\n        tau_gaba: float = 10.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -90.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"I_app {_I_syn}\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyBase","title":"Izhikevich2007NoisyBase","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the baseline current.

Parameters:

Name Type Description Default C float

Membrane capacitance.

100.0 k float

Scaling factor for the quadratic term in the membrane potential.

0.7 v_r float

Resting membrane potential.

-60.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.03 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

100.0 v_peak float

Spike cut-off value for the membrane potential.

35.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

10.0 tau_gaba float

Time constant of the GABA synapse.

10.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-90.0 base_mean float

Mean of the baseline current.

0.0 base_noise float

Standard deviation of the baseline current noise.

0.0 rate_base_noise float

Rate of the noise update (Poisson distributed) in the baseline current.

0.0 Variables to record
  • offset_base
  • I_base
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007NoisyBase(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the baseline current.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current noise.\n        rate_base_noise (float, optional):\n            Rate of the noise update (Poisson distributed) in the baseline current.\n\n    Variables to record:\n        - offset_base\n        - I_base\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 100.0,\n        k: float = 0.7,\n        v_r: float = -60.0,\n        v_t: float = -40.0,\n        a: float = 0.03,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 100.0,\n        v_peak: float = 35.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 10.0,\n        tau_gaba: float = 10.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -90.0,\n        base_mean: float = 0.0,\n        base_noise: float = 0.0,\n        rate_base_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            base_mean      = {base_mean}\n            base_noise     = {base_noise}\n            rate_base_noise = {rate_base_noise}\n        \"\"\"\n\n        syn = _syn_default\n        i_v = f\"I_app {_I_syn} + I_base\"\n        prefix = _I_base_noise\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, prefix=prefix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_noisy_base\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents and noisy\n                baseline current.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007FsiNoisyAmpa","title":"Izhikevich2007FsiNoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model for fast-spiking neurons, with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 v_b float

Instantaneous activation threshold potential for the recovery variable u.

-55.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007FsiNoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    for fast-spiking neurons, with conductance-based AMPA and GABA synapses with noise\n    in the AMPA conductance.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        v_b (float, optional):\n            Instantaneous activation threshold potential for the recovery variable u.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        v_b: float = -55.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            v_b            = {v_b} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"I_app {_I_syn}\"\n        du = \"if v<v_b: -a * u else: a * (b * (v - v_b)**3 - u)\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, du=du),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_FSI_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007CorbitFsiNoisyAmpa","title":"Izhikevich2007CorbitFsiNoisyAmpa","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the AMPA conductance. Additional slow currents were added to fit the striatal FSI neuron model from Corbit et al. (2016). The additional currents should allow the neuron to produce late spiking.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 b_n float

Sensitivity of the slow current n to the difference between the slow current s and the recovery variable u.

0.1 a_s float

Time scale of the slow current s.

0.1 a_n float

Time scale of the slow current n.

0.1 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 nonlin float

Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)

0.1 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 Variables to record
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • s
  • n
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007CorbitFsiNoisyAmpa(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.\n    Additional slow currents were added to fit the striatal FSI neuron model from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016). The\n    additional currents should allow the neuron to produce late spiking.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        b_n (float, optional):\n            Sensitivity of the slow current n to the difference between the slow current\n            s and the recovery variable u.\n        a_s (float, optional):\n            Time scale of the slow current s.\n        a_n (float, optional):\n            Time scale of the slow current n.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        nonlin (float, optional):\n            Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n\n    Variables to record:\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - s\n        - n\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        b_n: float = 0.1,\n        a_s: float = 0.1,\n        a_n: float = 0.1,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        nonlin: float = 0.1,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            b_n            = {b_n} : population\n            a_s            = {a_s} : population\n            a_n            = {a_n} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            nonlin         = {nonlin} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"root_func(I_app {_I_syn}, nonlin) - n\"\n        affix = \"\"\"\n            ds/dt     = a_s*(pos(u)**0.1 - s)\n            dn/dt     = a_n*(b_n*(pos(u)**0.1-s) - n)\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, affix=affix),\n            functions=\"\"\"\n                root_func(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_Corbit_FSI_noisy_AMPA\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance. Additional slow currents were added to fit\n                the striatal FSI neuron model from Corbit et al. (2016).\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007CorbitFsiNoisyBase","title":"Izhikevich2007CorbitFsiNoisyBase","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the baseline current. Additional slow currents were added to fit the striatal FSI neuron model from Corbit et al. (2016). The additional currents should allow the neuron to produce late spiking.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 b_n float

Sensitivity of the slow current n to the difference between the slow current s and the recovery variable u.

0.1 a_s float

Time scale of the slow current s.

0.1 a_n float

Time scale of the slow current n.

0.1 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 nonlin float

Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)

0.1 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 base_mean float

Mean of the baseline current.

0.0 base_noise float

Standard deviation of the baseline current noise.

0.0 rate_base_noise float

Rate of the noise update (Poisson distributed) in the baseline current.

0.0 Variables to record
  • offset_base
  • I_base
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • s
  • n
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007CorbitFsiNoisyBase(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the baseline current.\n    Additional slow currents were added to fit the striatal FSI neuron model from\n    [Corbit et al. (2016)](https://doi.org/10.1523/JNEUROSCI.0339-16.2016). The\n    additional currents should allow the neuron to produce late spiking.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        b_n (float, optional):\n            Sensitivity of the slow current n to the difference between the slow current\n            s and the recovery variable u.\n        a_s (float, optional):\n            Time scale of the slow current s.\n        a_n (float, optional):\n            Time scale of the slow current n.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        nonlin (float, optional):\n            Nonlinearity of the input current. (1.0 = linear, 2.0 = square, etc.)\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        base_mean (float, optional):\n            Mean of the baseline current.\n        base_noise (float, optional):\n            Standard deviation of the baseline current noise.\n        rate_base_noise (float, optional):\n            Rate of the noise update (Poisson distributed) in the baseline current.\n\n    Variables to record:\n        - offset_base\n        - I_base\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - s\n        - n\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        b_n: float = 0.1,\n        a_s: float = 0.1,\n        a_n: float = 0.1,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        nonlin: float = 0.1,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        base_mean: float = 0.0,\n        base_noise: float = 0.0,\n        rate_base_noise: float = 0.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            b_n            = {b_n} : population\n            a_s            = {a_s} : population\n            a_n            = {a_n} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            nonlin         = {nonlin} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            base_mean      = {base_mean}\n            base_noise     = {base_noise}\n            rate_base_noise = {rate_base_noise}\n        \"\"\"\n\n        syn = _syn_default\n        i_v = f\"root_func(I_app {_I_syn}, nonlin) - n + I_base\"\n        prefix = _I_base_noise\n        affix = \"\"\"\n            ds/dt     = a_s*(pos(u)**0.1 - s)\n            dn/dt     = a_n*(b_n*(pos(u)**0.1-s) - n)\n        \"\"\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(\n                syn=syn, i_v=i_v, prefix=prefix, affix=affix\n            ),\n            functions=\"\"\"\n                root_func(x,y)=((abs(x))**(1/y))/((x+1e-20)/(abs(x)+ 1e-20))\n            \"\"\",\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_Corbit_FSI_noisy_base\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in the baseline current. Additional slow currents were added to fit\n                the striatal FSI neuron model from Corbit et al. (2016).\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/neuron_models/#CompNeuroPy.neuron_models.final_models.izhikevich_2007_like_nm.Izhikevich2007NoisyAmpaOscillating","title":"Izhikevich2007NoisyAmpaOscillating","text":"

Bases: Neuron

TEMPLATE

Izhikevich (2007)-like neuron model with conductance-based AMPA and GABA synapses with noise in the AMPA conductance. An additional oscillating current was added to the model.

Parameters:

Name Type Description Default C float

Membrane capacitance.

20.0 k float

Scaling factor for the quadratic term in the membrane potential.

1.0 v_r float

Resting membrane potential.

-55.0 v_t float

Instantaneous activation threshold potential.

-40.0 a float

Time scale of the recovery variable u.

0.1 b float

Sensitivity of the recovery variable u to the the membrane potential v.

-2.0 c float

After-spike reset value of the membrane potential.

-50.0 d float

After-spike change of the recovery variable u.

2.0 v_peak float

Spike cut-off value for the membrane potential.

25.0 I_app float

External applied input current.

0.0 tau_ampa float

Time constant of the AMPA synapse.

2.0 tau_gaba float

Time constant of the GABA synapse.

5.0 E_ampa float

Reversal potential of the AMPA synapse.

0.0 E_gaba float

Reversal potential of the GABA synapse.

-80.0 increase_noise float

Increase of AMPA conductance due to noise (equal to a Poisson distributed spike train as input).

0.0 rates_noise float

Rate of the noise in the AMPA conductance.

0.0 freq float

Frequency of the oscillating current.

0.0 amp float

Amplitude of the oscillating current.

300.0 Variables to record
  • osc
  • g_ampa
  • g_gaba
  • I_v
  • v
  • u
  • r
Source code in CompNeuroPy/neuron_models/final_models/izhikevich_2007_like_nm.py
class Izhikevich2007NoisyAmpaOscillating(Neuron):\n    \"\"\"\n    TEMPLATE\n\n    [Izhikevich (2007)](https://isbnsearch.org/isbn/9780262090438)-like neuron model\n    with conductance-based AMPA and GABA synapses with noise in the AMPA conductance.\n    An additional oscillating current was added to the model.\n\n    Parameters:\n        C (float, optional):\n            Membrane capacitance.\n        k (float, optional):\n            Scaling factor for the quadratic term in the membrane potential.\n        v_r (float, optional):\n            Resting membrane potential.\n        v_t (float, optional):\n            Instantaneous activation threshold potential.\n        a (float, optional):\n            Time scale of the recovery variable u.\n        b (float, optional):\n            Sensitivity of the recovery variable u to the the membrane potential v.\n        c (float, optional):\n            After-spike reset value of the membrane potential.\n        d (float, optional):\n            After-spike change of the recovery variable u.\n        v_peak (float, optional):\n            Spike cut-off value for the membrane potential.\n        I_app (float, optional):\n            External applied input current.\n        tau_ampa (float, optional):\n            Time constant of the AMPA synapse.\n        tau_gaba (float, optional):\n            Time constant of the GABA synapse.\n        E_ampa (float, optional):\n            Reversal potential of the AMPA synapse.\n        E_gaba (float, optional):\n            Reversal potential of the GABA synapse.\n        increase_noise (float, optional):\n            Increase of AMPA conductance due to noise (equal to a Poisson distributed\n            spike train as input).\n        rates_noise (float, optional):\n            Rate of the noise in the AMPA conductance.\n        freq (float, optional):\n            Frequency of the oscillating current.\n        amp (float, optional):\n            Amplitude of the oscillating current.\n\n    Variables to record:\n        - osc\n        - g_ampa\n        - g_gaba\n        - I_v\n        - v\n        - u\n        - r\n    \"\"\"\n\n    # For reporting\n    _instantiated = []\n\n    def __init__(\n        self,\n        C: float = 20.0,\n        k: float = 1.0,\n        v_r: float = -55.0,\n        v_t: float = -40.0,\n        a: float = 0.1,\n        b: float = -2.0,\n        c: float = -50.0,\n        d: float = 2.0,\n        v_peak: float = 25.0,\n        I_app: float = 0.0,\n        tau_ampa: float = 2.0,\n        tau_gaba: float = 5.0,\n        E_ampa: float = 0.0,\n        E_gaba: float = -80.0,\n        increase_noise: float = 0.0,\n        rates_noise: float = 0.0,\n        freq: float = 0.0,\n        amp: float = 300.0,\n    ):\n        # Create the arguments\n        parameters = f\"\"\"\n            C              = {C} : population\n            k              = {k} : population\n            v_r            = {v_r} : population\n            v_t            = {v_t} : population\n            a              = {a} : population\n            b              = {b} : population\n            c              = {c} : population\n            d              = {d} : population\n            v_peak         = {v_peak} : population\n            tau_ampa       = {tau_ampa} : population\n            tau_gaba       = {tau_gaba} : population\n            E_ampa         = {E_ampa} : population\n            E_gaba         = {E_gaba} : population\n            I_app          = {I_app} # pA\n            increase_noise = {increase_noise} : population\n            rates_noise    = {rates_noise}\n            freq           = {freq}\n            amp            = {amp}\n        \"\"\"\n\n        syn = _syn_noisy\n        i_v = f\"I_app {_I_syn} + osc\"\n        prefix = \"osc = amp * sin(t * 2 * pi * (freq  /1000))\"\n\n        super().__init__(\n            parameters=parameters,\n            equations=_get_equation_izhikevich_2007(syn=syn, i_v=i_v, prefix=prefix),\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"Izhikevich2007_noisy_AMPA_oscillating\",\n            description=\"\"\"\n                Standard neuron model from Izhikevich (2007) with additional\n                conductance based synapses for AMPA and GABA currents with noise\n                in AMPA conductance. An additional oscillating current was added\n                to the model.\n            \"\"\",\n        )\n\n        # For reporting\n        self._instantiated.append(True)\n
"},{"location":"built_in/synapse_models/","title":"Synapse Models","text":""},{"location":"built_in/synapse_models/#CompNeuroPy.synapse_models.synapse_models.FactorSynapse","title":"FactorSynapse","text":"

Bases: Synapse

Synapse which scales the transmitted value by a specified factor. Factor is equivalent to the connection weight if weight==1.

Parameters:

Name Type Description Default max_trans float

Maximum value that can be transmitted. Default: None.

None mod_factor float

Factor by which the weight value is multiplied. Default: 0.

0 Source code in CompNeuroPy/synapse_models/synapse_models.py
class FactorSynapse(Synapse):\n    \"\"\"\n    Synapse which scales the transmitted value by a specified factor. Factor is\n    equivalent to the connection weight if weight==1.\n\n    Parameters:\n        max_trans (float, optional):\n            Maximum value that can be transmitted. Default: None.\n        mod_factor (float, optional):\n            Factor by which the weight value is multiplied. Default: 0.\n    \"\"\"\n\n    def __init__(self, max_trans: None | float = None, mod_factor: float = 0):\n        super().__init__(\n            parameters=f\"\"\"\n            {f\"max_trans  = {max_trans}\" if max_trans is not None else \"\"}\n            mod_factor = {mod_factor}\n        \"\"\",\n            equations=\"\",\n            pre_spike=f\"\"\"\n            g_target += w * mod_factor {\": max = max_trans\" if max_trans is not None else \"\"}\n        \"\"\",\n            name=\"factor_synapse\",\n            description=\"\"\"\n            Synapse which scales the transmitted value by a specified factor. Factor is\n            equivalent to the connection weight if weight==1.\n        \"\"\",\n        )\n
"},{"location":"examples/dbs/","title":"DBS Simulator","text":""},{"location":"examples/dbs/#simple-example","title":"Simple example","text":""},{"location":"examples/dbs/#introduction","title":"Introduction","text":"

This example demonstrates how to use the DBSstimulator class to implement DBS in a network. It is shown how to create a DBSstimulator, how to use it and how to update pointers. In this simple example only the depolarization of the stimulated population is demostrated. All other possible DBS mechanisms are demonstrated in the other example dbs_stimulator.py.

"},{"location":"examples/dbs/#code","title":"Code","text":"
from ANNarchy import Population, Izhikevich, compile, simulate\nfrom CompNeuroPy import DBSstimulator\n\nfrom ANNarchy import setup\nfrom CompNeuroPy import CompNeuroMonitors, PlotRecordings\n\nsetup(dt=0.1)\n\n# create populations\npopulation1 = Population(10, neuron=Izhikevich, name=\"my_pop1\")\npopulation2 = Population(10, neuron=Izhikevich, name=\"my_pop2\")\n\n# create DBS stimulator\ndbs = DBSstimulator(\n    stimulated_population=population1,\n    population_proportion=0.5,\n    dbs_depolarization=30,\n    auto_implement=True,\n)\n\n# if you work with names of populations/projections everything will work, but if you\n# want to work with pointers you have to update them after calling the DBSstimulator\npopulation1, population2 = dbs.update_pointers(pointer_list=[population1, population2])\n\n# compile network\ncompile()\n\n# create monitors\nmonitors = CompNeuroMonitors({\"my_pop1\": \"v\", \"my_pop2\": \"v\"})\nmonitors.start()\n\n# run simulation\n# 1000 ms without dbs\nsimulate(1000)\n# 1000 ms with dbs\ndbs.on()\nsimulate(1000)\n# 1000 ms without dbs\ndbs.off()\nsimulate(1000)\n\n# plot recordings\nPlotRecordings(\n    figname=\"dbs_stimulator_simple.png\",\n    recordings=monitors.get_recordings(),\n    recording_times=monitors.get_recording_times(),\n    chunk=0,\n    shape=(2, 1),\n    plan={\n        \"position\": [1, 2],\n        \"compartment\": [\"my_pop1\", \"my_pop2\"],\n        \"variable\": [\"v\", \"v\"],\n        \"format\": [\"matrix\", \"matrix\"],\n    },\n)\n
"},{"location":"examples/dbs/#console-output","title":"Console Output","text":"
$ python dbs_stimulator_simple.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nCompiling ...  OK \nGenerate fig dbs_stimulator_simple.png... Done\n
"},{"location":"examples/dbs/#complex-example","title":"Complex Example","text":""},{"location":"examples/dbs/#introduction_1","title":"Introduction","text":"

In this example, the DBS stimulator is tested with a simple spiking and rate-coded model. The spiking model is based on the Izhikevich model with conductance-based synapses. The rate-coded model is based on neurons including membrane potential and a resulting firing rate. The DBS stimulator is tested with different stimulation parameters. The resulting activity of the populations is compared to the expected activity (not part of example, included for testing purposes only). The resulting activity of the populations is plotted. The figures are saved in the DBS_spiking_figure and DBS_rate_figure folders. The different DBS conditions are: - no stimulation - orthodromic stimulation of efferents - orthodromic stimulation of afferents - orthodromic stimulation of efferents and afferents - orthodromic stimulation of passing fibres - depolarization of the stimulated population - antidromic stimulation of efferents - antidromic stimulation of afferents - antidromic stimulation of efferents and afferents - antidromic stimulation of passing fibres - antidromic stimulation of passing fibres with lower strength - full dbs stimulation - full dbs stimulation without axon spikes (only effective for spiking model) - full dbs stimulation without axon_rate_amp (only effective for rate-coded model)

Warning

For rate-coded models, antidromic stimulation of projections is not available.

"},{"location":"examples/dbs/#code_1","title":"Code","text":"
from ANNarchy import (\n    Neuron,\n    Population,\n    setup,\n    simulate,\n    Projection,\n    get_population,\n    get_projection,\n    DefaultRateCodedSynapse,\n    DefaultSpikingSynapse,\n    dt,\n    Constant,\n)\nfrom CompNeuroPy import (\n    CompNeuroMonitors,\n    PlotRecordings,\n    CompNeuroModel,\n    cnp_clear,\n    DBSstimulator,\n)\nfrom CompNeuroPy.monitors import RecordingTimes\nimport numpy as np\n\n### setup ANNarchy\nsetup(dt=0.1, seed=12345)\n\n\n### create dbs test model\nclass dbs_test_model_class:\n    \"\"\"\n    Class to create dbs test model.\n\n    The used neuron models have the following constraints:\n        The neuron model has to contain the following parameters:\n        - base_mean: mean of the base current\n        - base_noise: standard deviation of the base current noise\n        Spiking neuron models have to contain conductance based synapses using the\n        following conductance variables:\n        - g_ampa: excitatory synapse\n        - g_gaba: inhibitory synapse\n        Rate neuron models have to contain the following input variables:\n        - sum(ampa): excitatory input\n        - sum(gaba): inhibitory input\n        For DBS rate-coded models have to contain a membrane potential variable mp\n        and spiking models have to be Izhihkevich models.\n\n    Model structure:\n    -------------------------\n            POP1       POP2\n            |          |\n            o          v\n    DBS--->POP3------oPOP4\n                .----.\n                |    |\n            POP5   '-->POP6\n\n    -o = inhibitory synapse\n    -> = excitatory synapse\n    .-> = passing fibre excitatory synapse\n\n    Attributes:\n        model (CompNeuroModel):\n            dbs test model\n    \"\"\"\n\n    def __init__(self, mode) -> None:\n        \"\"\"\n        Initialize dbs test model\n\n        Args:\n            mode (str):\n                Mode of the dbs test model, either \"spiking\" or \"rate-coded\"\n        \"\"\"\n        ### constants should still be available after DBSstimulator recreates the model\n        ### test this by creating this constant\n        Constant(\"my_important_const\", 0.0)\n\n        ### check if model to create is spiking or rate-coded\n        if mode == \"spiking\":\n            self.model = CompNeuroModel(\n                model_creation_function=self.create_model,\n                model_kwargs={\n                    \"neuron_model\": self.get_neuron_model_spiking(),\n                    \"base_current_list\": [40, 100, 200, 50, 40, 40],\n                    \"base_current_noise\": 40,\n                },\n                name=\"dbs_test_spiking\",\n                description=\"Simple spiking model to test dbs\",\n                do_compile=False,\n            )\n        elif mode == \"rate-coded\":\n            self.model = CompNeuroModel(\n                model_creation_function=self.create_model,\n                model_kwargs={\n                    \"neuron_model\": self.get_neuron_model_rate_coded(),\n                    \"base_current_list\": [0.35, 0.7, 1.1, 0.85, 0.35, 0.35],\n                    \"base_current_noise\": 0.01,\n                    \"weight_list\": [0.3, 0.4, 0.3, 0.1],\n                    \"prob_list\": [0.5, 0.7, 0.7, 0.5],\n                },\n                name=\"dbs_test_rate-coded\",\n                description=\"Simple rate-coded model to test dbs\",\n                do_compile=False,\n            )\n        else:\n            raise ValueError(\"Neuron model not recognized\")\n\n    def create_model(\n        self,\n        neuron_model: Neuron,\n        pop_size: int = 10,\n        base_current_list: list = [0, 0, 0, 0, 0, 0],\n        base_current_noise: float = 0.0,\n        prob_list: list = [0.5, 0.5, 0.5, 0.5],\n        weight_list: list = [1.0, 1.0, 1.0, 1.0],\n    ):\n        \"\"\"\n        Create dbs test model\n\n        Args:\n            neuron_model (Neuron):\n                Neuron model to use for the dbs test model\n            pop_size (int, optional):\n                Number of neurons in each population. Default: 10\n            base_current_list (list, optional):\n                List of base currents for the four populations.\n                Default: [0, 0, 0, 0, 0, 0]\n            base_current_noise (float, optional):\n                Standard deviation of the base current noise. Default: 0\n            prob_list (list, optional):\n                List of connection probabilities for the inhibitory and excitatory path.\n                Default: [0.5, 0.5, 0.5, 0.5]\n            weight_list (list, optional):\n                List of connection weights for the inhibitory and excitatory path.\n                Default: [0.1, 0.1, 0.1, 0.1]\n        \"\"\"\n        ### create populations\n        pop1 = Population(pop_size, neuron_model, name=f\"pop1_{neuron_model.name}\")\n        pop2 = Population(pop_size, neuron_model, name=f\"pop2_{neuron_model.name}\")\n        pop3 = Population(pop_size, neuron_model, name=f\"pop3_{neuron_model.name}\")\n        pop4 = Population(pop_size, neuron_model, name=f\"pop4_{neuron_model.name}\")\n        pop5 = Population(pop_size, neuron_model, name=f\"pop5_{neuron_model.name}\")\n        pop6 = Population(pop_size, neuron_model, name=f\"pop6_{neuron_model.name}\")\n\n        ### create projections of inhhibitory path\n        proj_1_3 = Projection(\n            pre=pop1,\n            post=pop3,\n            target=\"gaba\",\n            name=f\"proj_1_3_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_1_3.connect_fixed_probability(\n            probability=prob_list[0],\n            weights=weight_list[0],\n        )\n        proj_3_4 = Projection(\n            pre=pop3,\n            post=pop4,\n            target=\"gaba\",\n            name=f\"proj_3_4_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_3_4.connect_fixed_probability(\n            probability=prob_list[1],\n            weights=weight_list[1],\n        )\n        ### create projections of excitatory path\n        proj_2_4 = Projection(\n            pre=pop2,\n            post=pop4,\n            target=\"ampa\",\n            name=f\"proj_2_4_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_2_4.connect_fixed_probability(\n            probability=prob_list[2],\n            weights=weight_list[2],\n        )\n        ### create projection of passing fibres\n        proj_5_6 = Projection(\n            pre=pop5,\n            post=pop6,\n            target=\"ampa\",\n            name=f\"proj_5_6_{neuron_model.name}\",\n            synapse=self.get_synapse(neuron_model.name),\n        )\n        proj_5_6.connect_fixed_probability(\n            probability=prob_list[3],\n            weights=weight_list[3],\n        )\n\n        ### set baseline activity parameters\n        pop1.base_mean = base_current_list[0]\n        pop2.base_mean = base_current_list[1]\n        pop3.base_mean = base_current_list[2]\n        pop4.base_mean = base_current_list[3]\n        pop5.base_mean = base_current_list[4]\n        pop6.base_mean = base_current_list[5]\n        pop1.base_noise = base_current_noise\n        pop2.base_noise = base_current_noise\n        pop3.base_noise = base_current_noise\n        pop4.base_noise = base_current_noise\n        pop5.base_noise = base_current_noise\n        pop6.base_noise = base_current_noise\n\n    def get_neuron_model_spiking(self):\n        \"\"\"\n        Get neuron model with spiking dynamics\n\n        Returns\n            neuron_model (Neuron):\n                Neuron model with spiking dynamics\n        \"\"\"\n        neuron_model = Neuron(\n            parameters=\"\"\"\n                C      = 100     : population # pF\n                k      = 0.7     : population # pS * mV**-1\n                v_r    = -60     : population # mV\n                v_t    = -40     : population # mV\n                a      = 0.03     : population # ms**-1\n                b      = -2     : population # nS\n                c      = -50     : population # mV\n                d      = 100     : population # pA\n                v_peak = 35     : population # mV\n                I_app  = 0     # pA\n                tau_ampa = 10  : population # ms\n                tau_gaba = 10  : population # ms\n                E_ampa   = 0   : population # mV\n                E_gaba   = -90 : population # mV\n                base_mean       = 0 # pA\n                base_noise      = 0 # pA\n                rate_base_noise = 100 # Hz\n            \"\"\",\n            equations=\"\"\"\n                ### noisy base input\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0., 1.) * base_noise)\n                I_base      = base_mean + offset_base + my_important_const\n                ### input conductances\n                dg_ampa/dt = -g_ampa/tau_ampa\n                dg_gaba/dt = -g_gaba/tau_gaba\n                ### input currents\n                I = I_app - g_ampa*neg(v - E_ampa) - g_gaba*pos(v - E_gaba) + I_base\n                ### membrane potential and recovery variable\n                C * dv/dt  = k*(v - v_r)*(v - v_t) - u + I\n                du/dt      = a*(b*(v - v_r) - u)\n            \"\"\",\n            spike=\"v >= v_peak\",\n            reset=\"\"\"\n                v = c\n                u = u + d\n            \"\"\",\n            name=\"spiking\",\n            description=\"\"\"\n                Simple neuron model equations from Izhikevich (2007) using regular-spiking parameters\n                with conductance-based AMPA and GABA synapses/currents.\n            \"\"\",\n        )\n        return neuron_model\n\n    def get_neuron_model_rate_coded(self):\n        \"\"\"\n        Get neuron model with rate-coded dynamics\n\n        Returns:\n            neuron_model (Neuron):\n                Neuron model with rate-coded dynamics\n        \"\"\"\n        neuron_model = Neuron(\n            parameters=\"\"\"\n                tau = 10.0 : population\n                sigma = 0.6 : population\n                I_0 = 0.2 : population\n                I_app = 0.\n                base_mean       = 0\n                base_noise      = 0\n                rate_base_noise = 100 # Hz\n                # = (sigma*I_0 + I_0)/(sigma - sigma*I_0) : population\n                c = (0.6*0.2 + 0.2)/(0.6 - 0.6*0.2) : population\n            \"\"\",\n            equations=\"\"\"\n                ### noisy base input\n                offset_base = ite(Uniform(0.0, 1.0) * 1000.0 / dt > rate_base_noise, offset_base, Normal(0., 1.) * base_noise)\n                I_base      = base_mean + offset_base + my_important_const\n                ### input currents\n                I = sum(ampa) - sum(gaba) + I_base + I_app\n                ### membrane potential\n                tau * dmp/dt = -mp + I\n                mp_r = mp: min=-0.99*sigma\n                ### activation function\n                r = activation(mp_r,sigma,c) : max=1., min=0.\n            \"\"\",\n            name=\"rate-coded\",\n            functions=\"\"\"\n                activation(x,sigma,c) = ((sigma*x + x)/(sigma + x)) * (1 + c) - c\n            \"\"\",\n            description=\"Rate-coded neuron with excitatory (ampa) and inhibitory (gaba) inputs plus baseline and noise.\",\n        )\n        return neuron_model\n\n    def get_synapse(self, mode):\n        \"\"\"\n        Create a synapse.\n\n        Args:\n            mode (str):\n                Mode of the dbs test model, either \"spiking\" or \"rate-coded\"\n\n        Returns:\n            synapse (DefaultRateCodedSynapse or DefaultSpikingSynapse):\n                Synapse object\n        \"\"\"\n        if mode == \"rate-coded\":\n            return DefaultRateCodedSynapse()\n        elif mode == \"spiking\":\n            return DefaultSpikingSynapse()\n        else:\n            raise ValueError(\"Neuron model not recognized\")\n\n\ndef do_simulation(\n    mon: CompNeuroMonitors,\n    dbs: DBSstimulator,\n    dbs_val_list: list[list],\n    dbs_key_list: list[str],\n):\n    \"\"\"\n    Do the simulation\n\n    Args:\n        mon (CompNeuroMonitors):\n            CompNeuroMonitors object\n        dbs (DBSstimulator):\n            DBS stimulator object\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        dbs_key_list (list[str]):\n            List of DBS stimulation keys used by the dbs.on() function\n\n    Returns:\n        recordings (list):\n            List of recordings from the monitors\n        recording_times (RecordingTimes):\n            Recording times object\n    \"\"\"\n    ### run initial ramp up simulation\n    simulate(2000.0)\n\n    ### start monitors\n    mon.start()\n\n    ### loop over trials\n    for trial in range(len(dbs_val_list)):\n        ### 1000 ms with DBS off\n        simulate(1000.0)\n        ### 500 ms with DBS on\n        dbs.on(\n            **{\n                dbs_key_list[i]: dbs_val_list[trial][i]\n                for i in range(len(dbs_key_list))\n            }\n        )\n        simulate(500.0)\n        ### 1000 ms with DBS off\n        dbs.off()\n        simulate(1000.0)\n        mon.reset(model=False)\n\n    ### get data from monitors\n    recordings = mon.get_recordings()\n    recording_times = mon.get_recording_times()\n\n    return recordings, recording_times\n\n\ndef check_dbs_effects_spiking(\n    dbs_val_list: list[list],\n    recordings: list,\n    model: CompNeuroModel,\n    recording_times: RecordingTimes,\n):\n    \"\"\"\n    Check if the dbs effects are as expecteds.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        model (CompNeuroModel):\n            Model used for the simulation\n        recording_times (RecordingTimes):\n            Recording times object\n    \"\"\"\n    ### effects_on_activity_list contains the expected effects of dbs on the activity of the populations for each trial\n    ### 0 means no effect, 1 means increase, -1 means decrease\n    effects_on_activity = [\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, -1, 0, 0],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, -1, -1, 0, 0],\n        [0, 0, 0, 0, 0, 1],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, -1, 1, 0, 0],\n        [-1, 0, 0, 0, 0, 0],\n        [-1, 0, -1, 1, 0, 0],\n        [0, 0, 0, 0, -1, 0],\n        [0, 0, 0, 0, 0, 0],\n        [-1, 0, -1, -1, -1, 1],\n        [0, 0, -1, 1, 0, 0],\n        [-1, 0, -1, -1, -1, 1],\n    ]\n    ### check if the expected effects are present in the data\n    effect_list = []\n    high_effect_list = []\n    low_effect_list = []\n    for trial_idx, trial in enumerate(range(len(dbs_val_list))):\n        effect_list.append([])\n        for pop_name_idx, pop_name in enumerate(model.populations):\n            v_arr = recordings[trial][f\"{pop_name};v\"]\n            ### mean over neurons\n            v_arr = np.mean(v_arr, axis=1)\n            ### mean of first period\n            v_mean_1 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            v_std_1 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            ### mean of second period\n            v_mean_2 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt()))\n                ]\n            )\n            ### mean of third period\n            v_mean_3 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            v_std_3 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            ### get meand depending on dbs\n            mean_on = v_mean_2\n            mean_off = (v_mean_1 + v_mean_3) / 2\n            std_off = (v_std_1 + v_std_3) / 2\n            ### calculate effect\n            effect = (mean_on - mean_off) / std_off\n            if effect > 1:\n                high_effect_list.append(abs(effect))\n                effect = 1\n            elif effect < -1:\n                high_effect_list.append(abs(effect))\n                effect = -1\n            else:\n                low_effect_list.append(abs(effect))\n                effect = 0\n\n            effect_list[trial_idx].append(effect)\n\n    assert (\n        np.array(effects_on_activity).astype(int) == np.array(effect_list).astype(int)\n    ).all(), \"Effects on activity not as expected for spiking model\"\n\n\ndef check_dbs_effects_rate_coded(\n    dbs_val_list: list[list],\n    recordings: list,\n    model: CompNeuroModel,\n    recording_times: RecordingTimes,\n):\n    \"\"\"\n    Check if the dbs effects are as expected.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        model (CompNeuroModel):\n            Model used for the simulation\n        recording_times (RecordingTimes):\n            Recording times object\n    \"\"\"\n    ### effects_on_activity_list contains the expected effects of dbs on the activity of the populations for each trial\n    ### 0 means no effect, 1 means increase, -1 means decrease\n    effects_on_activity = [\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, -1, 0, 0],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, -1, -1, 0, 0],\n        [0, 0, 0, 0, 0, 1],\n        [0, 0, -1, 1, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, 0, 0, 0, 0],\n        [0, 0, -1, -1, 0, 1],\n        [0, 0, -1, -1, 0, 1],\n        [0, 0, -1, 1, 0, 0],\n    ]\n    ### check if the expected effects are present in the data\n    effect_list = []\n    high_effect_list = []\n    low_effect_list = []\n    for trial_idx, trial in enumerate(range(len(dbs_val_list))):\n        effect_list.append([])\n        for pop_name_idx, pop_name in enumerate(model.populations):\n            v_arr = recordings[trial][f\"{pop_name};r\"]\n            ### mean over neurons\n            v_arr = np.mean(v_arr, axis=1)\n            ### mean of first period\n            v_mean_1 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            v_std_1 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt()))\n                ]\n            )\n            ### mean of second period\n            v_mean_2 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1000 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt()))\n                ]\n            )\n            ### mean of third period\n            v_mean_3 = np.mean(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            v_std_3 = np.std(\n                v_arr[\n                    recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(1500 / dt())) : recording_times.idx_lims(chunk=trial)[0]\n                    + int(round(2000 / dt()))\n                ]\n            )\n            ### get meand depending on dbs\n            mean_on = v_mean_2\n            mean_off = (v_mean_1 + v_mean_3) / 2\n            std_off = (v_std_1 + v_std_3) / 2\n            ### calculate effect\n            effect = (mean_on - mean_off) / std_off\n            if effect > 2.5:\n                high_effect_list.append(abs(effect))\n                effect = 1\n            elif effect < -2.5:\n                high_effect_list.append(abs(effect))\n                effect = -1\n            else:\n                low_effect_list.append(abs(effect))\n                effect = 0\n\n            effect_list[trial_idx].append(effect)\n    assert (\n        np.array(effects_on_activity).astype(int) == np.array(effect_list).astype(int)\n    ).all(), \"Effects on activity not as expected for rate-coded model\"\n\n\ndef plot_spiking(\n    dbs_val_list: list[list],\n    recordings: list,\n    recording_times: RecordingTimes,\n    model: CompNeuroModel,\n    plotting: bool,\n):\n    \"\"\"\n    Plot spiking data.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        recording_times (RecordingTimes):\n            Recording times object\n        model (CompNeuroModel):\n            Model used for the simulation\n        plotting (bool):\n            If True, plots are created\n    \"\"\"\n    if not plotting:\n        return\n\n    ### plot data\n    for trial in range(len(dbs_val_list)):\n        PlotRecordings(\n            figname=f\"DBS_spiking_figure/membrane_trial_{trial}.png\",\n            recordings=recordings,\n            recording_times=recording_times,\n            chunk=trial,\n            shape=(3, 2),\n            plan={\n                \"position\": np.arange(len(model.populations), dtype=int) + 1,\n                \"compartment\": model.populations,\n                \"variable\": [\"v\"] * len(model.populations),\n                \"format\": [\"matrix\"] * len(model.populations),\n            },\n            time_lim=(\n                recording_times.time_lims(chunk=trial)[0] + 500,\n                recording_times.time_lims(chunk=trial)[1] - 500,\n            ),\n        )\n        PlotRecordings(\n            figname=f\"DBS_spiking_figure/axon_spikes_{trial}.png\",\n            recordings=recordings,\n            recording_times=recording_times,\n            chunk=trial,\n            shape=(3, 2),\n            plan={\n                \"position\": np.arange(len(model.populations), dtype=int) + 1,\n                \"compartment\": model.populations,\n                \"variable\": [\"axon_spike\"] * len(model.populations),\n                \"format\": [\"raster\"] * len(model.populations),\n            },\n            time_lim=(\n                recording_times.time_lims(chunk=trial)[0] + 1000,\n                recording_times.time_lims(chunk=trial)[0] + 1030,\n            ),\n        )\n\n\ndef plot_rate_coded(\n    dbs_val_list: list[list],\n    recordings: list,\n    recording_times: RecordingTimes,\n    model: CompNeuroModel,\n    plotting: bool,\n):\n    \"\"\"\n    Plot rate-coded data.\n\n    Args:\n        dbs_val_list (list[list]):\n            List of lists with DBS stimulation values used by the dbs.on() function\n        recordings (list):\n            List of recordings from the monitors\n        recording_times (RecordingTimes):\n            Recording times object\n        model (CompNeuroModel):\n            Model used for the simulation\n        plotting (bool):\n            If True, plots are created\n    \"\"\"\n    if not plotting:\n        return\n\n    ### plot data\n    for trial in range(len(dbs_val_list)):\n        PlotRecordings(\n            figname=f\"DBS_rate_figure/activity_trial_{trial}.png\",\n            recordings=recordings,\n            recording_times=recording_times,\n            chunk=trial,\n            shape=(3, 2),\n            plan={\n                \"position\": np.arange(len(model.populations), dtype=int) + 1,\n                \"compartment\": model.populations,\n                \"variable\": [\"r\"] * len(model.populations),\n                \"format\": [\"matrix\"] * len(model.populations),\n            },\n            time_lim=(\n                recording_times.time_lims(chunk=trial)[0] + 500,\n                recording_times.time_lims(chunk=trial)[1] - 500,\n            ),\n        )\n\n\ndef main(plotting: bool = False):\n    \"\"\"\n    Main function\n\n    Args:\n        plotting (bool, optional):\n            If True, plots are created. Default: False\n    \"\"\"\n    ### define simulations\n    ### i.e. the parameters for the dbs stimulator on function\n    ### do simulate calls repeatedly dbs.on() and dbs.off() with different parameters\n    ### specified in dbs_val_list\n    dbs_key_list = [\n        \"population_proportion\",\n        \"dbs_depolarization\",\n        \"orthodromic\",\n        \"antidromic\",\n        \"efferents\",\n        \"afferents\",\n        \"passing_fibres\",\n        \"passing_fibres_strength\",\n        \"axon_spikes_per_pulse\",\n        \"axon_rate_amp\",\n    ]\n    dbs_val_list = [\n        # 0 - nothing\n        [None, 0, False, False, False, False, False, 0.2, 1, 1],\n        # 1 - orthodromic efferents\n        [None, 0, True, False, True, False, False, 0.2, 1, 1],\n        # 2 - orthodromic afferents\n        [None, 0, True, False, False, True, False, 0.2, 1, 1],\n        # 3 - orthodromic efferents and afferents\n        [None, 0, True, False, True, True, False, 0.2, 1, 1],\n        # 4 - orthodromic passing fibres\n        [None, 0, True, False, False, False, True, 0.2, 1, 1],\n        # 5 - depolarization\n        [None, 100, False, False, False, False, False, 0.2, 1, 1],\n        # 6 - antidromic efferents\n        [None, 0, False, True, True, False, False, 0.2, 1, 1],\n        # 7 - antidromic afferents\n        [None, 0, False, True, False, True, False, 0.2, 1, 1],\n        # 8 - antidromic efferents and afferents\n        [None, 0, False, True, True, True, False, 0.2, 1, 1],\n        # 9 - antidromic passing fibres\n        [None, 0, False, True, False, False, True, 0.2, 1, 1],\n        # 10 - antidromic passing fibres lower strength\n        [None, 0, False, True, False, False, True, 0.01, 1, 1],\n        # 11 - all\n        [None, 100, True, True, True, True, True, 0.2, 1, 1],\n        # 12 - all without axon spikes, should not affect rate-coded model\n        [None, 100, True, True, True, True, True, 0.2, 0, 1],\n        # 13 - all without axon_rate_amp, should not affect spiking model\n        [None, 100, True, True, True, True, True, 0.2, 1, 0],\n    ]\n\n    spiking_model = True\n    rate_coded_model = True\n\n    if spiking_model:\n        ### create the spiking network\n        model = dbs_test_model_class(\"spiking\").model\n        dbs = DBSstimulator(\n            stimulated_population=get_population(\"pop3_spiking\"),\n            passing_fibres_list=[get_projection(\"proj_5_6_spiking\")],\n            passing_fibres_strength=0.2,\n            auto_implement=True,\n            model=model,\n        )\n        model = dbs.model\n\n        ### compile model\n        model.compile(compile_folder_name=\"DBS_test_spiking\")\n\n        ### create monitors\n        mon_dict = {}\n        for pop_name in model.populations:\n            mon_dict[pop_name] = [\"v\", \"spike\", \"axon_spike\"]\n        mon = CompNeuroMonitors(mon_dict)\n\n        ### run simulation and get data from monitors\n        recordings, recording_times = do_simulation(\n            mon, dbs, dbs_val_list, dbs_key_list\n        )\n\n        ### plot data\n        plot_spiking(\n            dbs_val_list=dbs_val_list,\n            recordings=recordings,\n            recording_times=recording_times,\n            model=model,\n            plotting=plotting,\n        )\n\n        ### check dbs effects\n        check_dbs_effects_spiking(\n            dbs_val_list,\n            recordings,\n            model,\n            recording_times,\n        )\n\n    if rate_coded_model:\n        ### create the rate-coded network\n        cnp_clear()\n        model = dbs_test_model_class(\"rate-coded\").model\n        dbs = DBSstimulator(\n            stimulated_population=get_population(\"pop3_rate-coded\"),\n            passing_fibres_list=[get_projection(\"proj_5_6_rate-coded\")],\n            passing_fibres_strength=0.2,\n            model=model,\n            auto_implement=True,\n        )\n        model = dbs.model\n\n        ### compile model\n        model.compile(compile_folder_name=\"DBS_test_rate_coded\")\n\n        ### create monitors\n        mon_dict = {}\n        for pop_name in model.populations:\n            mon_dict[pop_name] = [\"r\"]\n        mon = CompNeuroMonitors(mon_dict)\n\n        ### run simulation and get data from monitors\n        recordings, recording_times = do_simulation(\n            mon, dbs, dbs_val_list, dbs_key_list\n        )\n\n        ### plot data\n        plot_rate_coded(\n            dbs_val_list=dbs_val_list,\n            recordings=recordings,\n            recording_times=recording_times,\n            model=model,\n            plotting=plotting,\n        )\n\n        ### check dbs effects\n        check_dbs_effects_rate_coded(\n            dbs_val_list,\n            recordings,\n            model,\n            recording_times,\n        )\n    return 1\n\n\nif __name__ == \"__main__\":\n    main(plotting=True)\n
"},{"location":"examples/dbs/#console-output_1","title":"Console Output","text":"
$ python dbs_stimulator.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\n\nWARNING during compile of model dbs_test_spiking_dbs: There are initialized models which are not created, thus not compiled! models:\ndbs_test_spiking\n\nCompiling ...  OK \nGenerate fig DBS_spiking_figure/membrane_trial_0.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_0.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_1.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_1.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_2.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_2.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_3.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_3.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_4.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_4.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_5.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_5.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_6.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_6.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_7.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_7.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_8.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_8.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_9.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_9.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_10.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_10.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_11.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_11.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_12.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_12.png... \n  WARNING PlotRecordings: pop1_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop3_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop5_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\nGenerate fig DBS_spiking_figure/membrane_trial_13.png... Done\n\nGenerate fig DBS_spiking_figure/axon_spikes_13.png... \n  WARNING PlotRecordings: pop2_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop4_spiking does not contain any spikes in the given time interval.\n\n  WARNING PlotRecordings: pop6_spiking does not contain any spikes in the given time interval.\nDone\n\n\nWARNING during compile of model dbs_test_rate-coded_dbs: There are initialized models which are not created, thus not compiled! models:\ndbs_test_spiking\ndbs_test_spiking_dbs\ndbs_test_rate-coded\n\nCompiling ...  OK \nGenerate fig DBS_rate_figure/activity_trial_0.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_1.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_2.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_3.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_4.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_5.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_6.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_7.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_8.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_9.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_10.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_11.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_12.png... Done\n\nGenerate fig DBS_rate_figure/activity_trial_13.png... Done\n
"},{"location":"examples/experiment/","title":"Define Experiments","text":""},{"location":"examples/experiment/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroExp class to combine simulations, model and recordings in an experiment. It is shown how to define an experiment, how to run it and how to get the results.

"},{"location":"examples/experiment/#code","title":"Code","text":"
from CompNeuroPy import (\n    CompNeuroExp,\n    CompNeuroSim,\n    CompNeuroMonitors,\n    CompNeuroModel,\n    current_step,\n    current_ramp,\n    PlotRecordings,\n)\nfrom CompNeuroPy.full_models import HHmodelBischop\nfrom ANNarchy import dt, setup, get_population\n\n\n### combine both simulations and recordings in an experiment\nclass MyExp(CompNeuroExp):\n    \"\"\"\n    Define an experiment by inheriting from CompNeuroExp.\n\n    CompNeuroExp provides the attributes:\n\n        monitors (CompNeuroMonitors):\n            a CompNeuroMonitors object to do recordings, define during init otherwise\n            None\n        data (dict):\n            a dictionary for storing any optional data\n\n    and the functions:\n        reset():\n            resets the model and monitors\n        results():\n            returns a results object\n    \"\"\"\n\n    def __init__(\n        self,\n        model: CompNeuroModel,\n        sim_step: CompNeuroSim,\n        sim_ramp: CompNeuroSim,\n        monitors: CompNeuroMonitors,\n    ):\n        \"\"\"\n        Initialize the experiment and additionally store the model and simulations.\n\n        Args:\n            model (CompNeuroModel):\n                a CompNeuroModel object\n            sim_step (CompNeuroSim):\n                a CompNeuroSim object for the step simulation\n            sim_ramp (CompNeuroSim):\n                a CompNeuroSim object for the ramp simulation\n            monitors (CompNeuroMonitors):\n                a CompNeuroMonitors object\n        \"\"\"\n        self.model = model\n        self.sim_step = sim_step\n        self.sim_ramp = sim_ramp\n        super().__init__(monitors)\n\n    def run(self, E_L: float = -68.0):\n        \"\"\"\n        Do the simulations and recordings.\n\n        To use the CompNeuroExp class, you need to define a run function which\n        does the simulations and recordings. The run function should return the\n        results object which can be obtained by calling self.results().\n\n        Args:\n            E_L (float, optional):\n                leak reversal potential of the population, which is set at the beginning\n                of the experiment run. Default: -68 mV\n\n        Returns:\n            results (CompNeuroExp._ResultsCl):\n                results object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        ### call reset at the beginning of the experiment to ensure that the model\n        ### is in the same state at the beginning of each experiment run\n        self.reset()\n\n        ### also always start the monitors, they are stopped automatically at the end\n        self.monitors.start()\n\n        ### set the leak reversal potential of the population, be aware that this\n        ### will be undone by the reset function if you don't set the parameters\n        ### argument to False\n        get_population(self.model.populations[0]).E_L = E_L\n\n        ### SIMULATION START\n        sim_step.run()\n        ### if you want to reset the model, you should use the objects reset()\n        ### it's the same as the ANNarchy reset + it resets the CompNeuroMonitors\n        ### creating a new chunk, optionally not changing the parameters\n        self.reset(parameters=False)\n        sim_ramp.run()\n        ### SIMULATION END\n\n        ### optional: store anything you want in the data dict, for example information\n        ### about the simulations\n        self.data[\"sim\"] = [sim_step.simulation_info(), sim_ramp.simulation_info()]\n        self.data[\"population_name\"] = self.model.populations[0]\n        self.data[\"time_step\"] = dt()\n\n        ### return results using self.results()\n        return self.results()\n\n\nif __name__ == \"__main__\":\n    ### create and compile a model\n    setup(dt=0.01)\n    model = HHmodelBischop()\n\n    ### define recordings before experiment\n    monitors = CompNeuroMonitors({model.populations[0]: [\"v\"]})\n\n    ### define some simulations e.g. using CompNeuroSim\n    sim_step = CompNeuroSim(\n        simulation_function=current_step,\n        simulation_kwargs={\n            \"pop\": model.populations[0],\n            \"t1\": 500,\n            \"t2\": 500,\n            \"a1\": 0,\n            \"a2\": 50,\n        },\n    )\n    sim_ramp = CompNeuroSim(\n        simulation_function=current_ramp,\n        simulation_kwargs={\n            \"pop\": model.populations[0],\n            \"a0\": 0,\n            \"a1\": 100,\n            \"dur\": 1000,\n            \"n\": 50,\n        },\n    )\n\n    ### init and run the experiment\n    my_exp = MyExp(monitors=monitors, model=model, sim_step=sim_step, sim_ramp=sim_ramp)\n\n    ### one use case is to run an experiment multiple times e.g. with different\n    ### parameters\n    results_run1 = my_exp.run()\n    results_run2 = my_exp.run(E_L=-90.0)\n\n    ### plot of the membrane potential from the first and second chunk using results\n    ### experiment run 1\n    PlotRecordings(\n        figname=\"example_experiment_sim_step.png\",\n        recordings=results_run1.recordings,\n        recording_times=results_run1.recording_times,\n        chunk=0,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run1.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n    PlotRecordings(\n        figname=\"example_experiment_sim_ramp.png\",\n        recordings=results_run1.recordings,\n        recording_times=results_run1.recording_times,\n        chunk=1,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run1.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n    ### experiment run 2\n    PlotRecordings(\n        figname=\"example_experiment2_sim_step.png\",\n        recordings=results_run2.recordings,\n        recording_times=results_run2.recording_times,\n        chunk=0,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run2.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n    PlotRecordings(\n        figname=\"example_experiment2_sim_ramp.png\",\n        recordings=results_run2.recordings,\n        recording_times=results_run2.recording_times,\n        chunk=1,\n        shape=(1, 1),\n        plan={\n            \"position\": [1],\n            \"compartment\": [results_run2.data[\"population_name\"]],\n            \"variable\": [\"v\"],\n            \"format\": [\"line\"],\n        },\n    )\n\n    ### print data and mon_dict from results\n    print(\"\\nrun1:\")\n    print(\"    data:\")\n    for key, value in results_run1.data.items():\n        print(f\"        {key}:\", value)\n    print(\"    mon_dict:\")\n    for key, value in results_run1.mon_dict.items():\n        print(f\"        {key}:\", value)\n    print(\"\\nrun2:\")\n    print(\"    data:\")\n    for key, value in results_run2.data.items():\n        print(f\"        {key}:\", value)\n    print(\"    mon_dict:\")\n    for key, value in results_run2.mon_dict.items():\n        print(f\"        {key}:\", value)\n
"},{"location":"examples/experiment/#console-output","title":"Console Output","text":"
$ python experiment.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nCompiling ...  OK \nGenerate fig example_experiment_sim_step.png... Done\n\nGenerate fig example_experiment_sim_ramp.png... Done\n\nGenerate fig example_experiment2_sim_step.png... Done\n\nGenerate fig example_experiment2_sim_ramp.png... Done\n\n\nrun1:\n    data:\n        sim: [<CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfb700>, <CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfad40>]\n        population_name: HH_Bischop\n        time_step: 0.01\n    mon_dict:\n        HH_Bischop: ['v']\n\nrun2:\n    data:\n        sim: [<CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfb700>, <CompNeuroPy.generate_simulation.SimInfo object at 0x7f4798dfad40>]\n        population_name: HH_Bischop\n        time_step: 0.01\n    mon_dict:\n        HH_Bischop: ['v']\n
"},{"location":"examples/generate_models/","title":"Generate Models","text":""},{"location":"examples/generate_models/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroModel class to create and compile models. It is shown how to define a model creation function, how to initialize, create, compile a model and how to get information about the model.

The model \"my_model\" is imported in other examples run_and_monitor_simulations.py.

"},{"location":"examples/generate_models/#code","title":"Code","text":"
from ANNarchy import Population\nfrom CompNeuroPy import CompNeuroModel\nfrom CompNeuroPy.neuron_models import PoissonNeuron\nfrom tabulate import tabulate\n\n\n### define model_creation_function\ndef two_poisson(params, a):\n    \"\"\"\n    Generates two Poisson neuron populations.\n\n    Args:\n        params (dict):\n            Dictionary containing some paramters for the model with following keys:\n                's1'/'s2' : sizes of pop1/pop2\n                'n1'/'n2' : names of pop1/pop2\n        a (int):\n            Unused parameter for demonstration purposes only.\n    \"\"\"\n    ### create two populations\n    Population(params[\"s1\"], neuron=PoissonNeuron, name=params[\"n1\"])\n    Population(params[\"s2\"], neuron=PoissonNeuron, name=params[\"n2\"])\n    ### print unused parameter\n    print(f\"created model, other parameters: {a}\")\n\n\n### Let's initialize a first model\n### define the parameters argument of the model creation function\nparams = {\"s1\": 3, \"s2\": 3, \"n1\": \"first_poisson\", \"n2\": \"second_poisson\"}\n\n### use CompNeuroModel to initialize the model, not create or compile it yet\nmy_model = CompNeuroModel(\n    model_creation_function=two_poisson,\n    model_kwargs={\n        \"params\": params,\n        \"a\": 1,\n    },\n    name=\"my_model\",\n    description=\"my simple Poisson neuron model\",\n    do_create=False,\n    do_compile=False,\n    compile_folder_name=\"annarchy_my_model\",\n)\n\n### this initialized the first model\n### we could now create and compile it, but we will do this inside main\n### it could also be imported in other scripts and then created/compiled there\n\n\ndef main():\n    ### initialize a second model\n    ### this time directly create it, but not compile it yet, models can only be created\n    ### if not compiled yet\n    params = {\"s1\": 1, \"s2\": 1, \"n1\": \"pop1\", \"n2\": \"pop2\"}\n    my_model2 = CompNeuroModel(\n        model_creation_function=two_poisson,\n        model_kwargs={\"params\": params, \"a\": 2},\n        do_compile=False,\n    )\n\n    ### now create also first model, and compile everything (automatically since we did\n    ### not set do_compile=False)\n    my_model.create()\n\n    ### print some name, description, populations and projections of the models in\n    ### tabular form\n    models_data = [\n        [\n            my_model.name,\n            my_model.description,\n            my_model.populations,\n            my_model.projections,\n        ],\n        [\n            my_model2.name,\n            my_model2.description,\n            my_model2.populations,\n            my_model2.projections,\n        ],\n    ]\n    headers = [\"Model\", \"Description\", \"Populations\", \"Projections\"]\n    print(tabulate(models_data, headers, tablefmt=\"grid\"))\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/generate_models/#console-output","title":"Console Output","text":"
$ python create_model.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\ncreated model, other parameters: 2\ncreated model, other parameters: 1\nCompiling ...  OK \n+----------+--------------------------------+-------------------------------------+---------------+\n| Model    | Description                    | Populations                         | Projections   |\n+==========+================================+=====================================+===============+\n| my_model | my simple Poisson neuron model | ['first_poisson', 'second_poisson'] | []            |\n+----------+--------------------------------+-------------------------------------+---------------+\n| model1   |                                | ['pop1', 'pop2']                    | []            |\n+----------+--------------------------------+-------------------------------------+---------------+\n
"},{"location":"examples/monitor_recordings/","title":"Monitor Recordings","text":""},{"location":"examples/monitor_recordings/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroMonitors class to record variables. It is shown how to start/pause monitors, how to split recordings into chunks and optionally reset the model and how to get recordings during and after simulation.

"},{"location":"examples/monitor_recordings/#code","title":"Code","text":"
from ANNarchy import Population, setup, simulate, compile\nfrom CompNeuroPy import (\n    CompNeuroMonitors,\n    PlotRecordings,\n)\nfrom CompNeuroPy.neuron_models import Izhikevich2007\n\n\ndef main():\n    ### setup ANNarchy timestep and create results folder\n    setup(dt=0.1)\n\n    ### first we create two populations, each consist of 1 neuron\n    Population(1, neuron=Izhikevich2007(I_app=0), name=\"my_pop1\")\n    Population(1, neuron=Izhikevich2007(I_app=52), name=\"my_pop2\")\n\n    ### compile\n    compile()\n\n    ### after compilation we can define the monitors using the monitor_dictionary\n    ### and the CompNeuroMonitors class\n    ### for my_pop1 we use a recording period of 2 ms\n    ### for my_pop2 we do not give a recording preiod, therefore record every timestep\n    monitor_dictionary = {\"my_pop1;2\": [\"v\", \"spike\"], \"my_pop2\": [\"v\"]}\n    mon = CompNeuroMonitors(monitor_dictionary)\n\n    ### In this part we demonstrate starting/pausing all monitors\n    ### simulate for 100 ms [0, 100]\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [100, 200]\n    mon.start()\n    simulate(100)\n\n    ### pause all monitors and simulate for 100 ms [200, 300]\n    mon.pause()\n    simulate(100)\n\n    ### In this part we demonstrate starting single monitors\n    ### start only monitor for my_pop1 and simulate for 100 ms [300, 400]\n    mon.start(compartment_list=[\"my_pop1\"])\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [400, 500]\n    mon.start()\n    simulate(100)\n\n    ### In this part we demonstrate pausing single monitors\n    ### pause monitor for my_pop1 and simulate for 100 ms [500, 600]\n    mon.pause(compartment_list=[\"my_pop1\"])\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [600, 700]\n    mon.start()\n    simulate(100)\n\n    ### In this part we demonstrate chunking recordings by reset\n    ### reset WITHOUT model, creating new chunk --> first chunk [0, 700]\n    ### also in this chunk do not record the first 100 ms\n    ### WITHOUT model --> time continues at 700 ms [700, 800]\n    mon.reset(model=False)\n    mon.pause()\n    simulate(100)\n\n    ### start all monitors and simulate for 700 ms [800, 1500]\n    mon.start()\n    simulate(700)\n\n    ### reset WITH model, creating new chunk --> second chunk [700, 1500]\n    ### in third chunk time is reset to 0 ms\n    ### also in this chunk do not record the first 100 ms [0, 100]\n    mon.reset(model=True)\n    mon.pause()\n    simulate(100)\n\n    ### start all monitors and simulate for 700 ms [100, 800]\n    mon.start()\n    simulate(700)\n\n    ### Next we demonstrate getting recordings DURING SIMULATION by using\n    ### get_recordings_and_clear\n    ### this also resets the monitors back to their initialized state, i.e. there are no\n    ### recordings and they are not started yet\n    ### recordings1 consists of 3 chunks, third chunk [0, 800]\n    recordings1, recording_times1 = mon.get_recordings_and_clear()\n\n    ### Now continue simulation, creating NEW RECORDINGS, monitors are not started yet\n    ### model was not reset, so time continues at 800 ms\n    ### simulate for 100 ms [800, 900]\n    simulate(100)\n\n    ### start all monitors and simulate for 100 ms [900, 1000]\n    mon.start()\n    simulate(100)\n\n    ### reset monitors and model, creating new chunk --> first chunk [800, 1000]\n    ### simulate for 100 ms [0, 100]\n    mon.reset(model=True)\n    simulate(100)\n\n    ### get recordings using get_recordings_and_clear\n    ### this time directly start recording again\n    ### recordings2 consists of 2 chunks, second chunk [0, 100]\n    recordings2, recording_times2 = mon.get_recordings_and_clear()\n\n    ### Now continue simulation, creating NEW RECORDINGS\n    ### directly start monitors and reset model so time is reset to 0 ms\n    ### simulate for 100 ms [0, 100]\n    mon.start()\n    mon.reset(model=True)\n    simulate(100)\n\n    ### get recordings the normal way (simultions are finished)\n    ### recordings3 consists of 1 chunk [0, 100]\n    recordings3 = mon.get_recordings()\n    recording_times3 = mon.get_recording_times()\n\n    ### print the idx and time lims of the recordings and the sizes of the recorded\n    ### arrays\n    print(\"#################### ALL RECORDINGS INFO ####################\")\n    recordings_list = [recordings1, recordings2, recordings3]\n    for all_times_idx, all_times in enumerate(\n        [recording_times1.all(), recording_times2.all(), recording_times3.all()]\n    ):\n        print(f\"recordings{all_times_idx+1}\")\n        for chunk in range(len(all_times)):\n            print(f\"\\tchunk: {chunk}\")\n            for pop_name in [\"my_pop1\", \"my_pop2\"]:\n                print(f\"\\t\\tpop_name: {pop_name}\")\n                print(\n                    f\"\\t\\trecording_array_size: {recordings_list[all_times_idx][chunk][f'{pop_name};v'].shape}\"\n                )\n                for time_point in [\"start\", \"stop\"]:\n                    print(f\"\\t\\t\\ttime_point: {time_point}\")\n                    for unit in [\"ms\", \"idx\"]:\n                        print(f\"\\t\\t\\t\\tunit: {unit}\")\n                        for period in range(\n                            len(all_times[chunk][pop_name][time_point][unit])\n                        ):\n                            print(\n                                f\"\\t\\t\\t\\t\\tperiod {period}: {all_times[chunk][pop_name][time_point][unit][period]}\"\n                            )\n    print(\"#############################################################\")\n\n    ### plot recordings 1 consisting of 3 chunks\n    for chunk in range(len(recordings1)):\n        ### using plot_recordings which plots the recordings of one chunk\n        PlotRecordings(\n            figname=f\"monitor_recordings_1_chunk{chunk}.png\",\n            recordings=recordings1,\n            recording_times=recording_times1,\n            shape=(2, 2),\n            plan={\n                \"position\": [1, 2, 3],\n                \"compartment\": [\"my_pop1\", \"my_pop2\", \"my_pop1\"],\n                \"variable\": [\"v\", \"v\", \"spike\"],\n                \"format\": [\"line\", \"line\", \"raster\"],\n            },\n            chunk=chunk,\n        )\n\n    ### plot recordings 2 consisting of 2 chunks\n    for chunk in range(len(recordings2)):\n        ### using plot_recordings which plots the recordings of one chunk\n        PlotRecordings(\n            figname=f\"monitor_recordings_2_chunk{chunk}.png\",\n            recordings=recordings2,\n            recording_times=recording_times2,\n            shape=(2, 2),\n            plan={\n                \"position\": [1, 2, 3],\n                \"compartment\": [\"my_pop1\", \"my_pop2\", \"my_pop1\"],\n                \"variable\": [\"v\", \"v\", \"spike\"],\n                \"format\": [\"line\", \"line\", \"raster\"],\n            },\n            chunk=chunk,\n        )\n\n    ### plot recordings 3 consisting of 1 chunk\n    for chunk in range(len(recordings3)):\n        ### using plot_recordings which plots the recordings of one chunk\n        PlotRecordings(\n            figname=f\"monitor_recordings_3_chunk{chunk}.png\",\n            recordings=recordings3,\n            recording_times=recording_times3,\n            shape=(2, 2),\n            plan={\n                \"position\": [1, 2, 3],\n                \"compartment\": [\"my_pop1\", \"my_pop2\", \"my_pop1\"],\n                \"variable\": [\"v\", \"v\", \"spike\"],\n                \"format\": [\"line\", \"line\", \"raster\"],\n            },\n            chunk=chunk,\n        )\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/monitor_recordings/#conosole-output","title":"Conosole Output","text":"
$ python monitor_recordings.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nCompiling ...  OK \n#################### ALL RECORDINGS INFO ####################\nrecordings1\n    chunk: 0\n        pop_name: my_pop1\n        recording_array_size: (200, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                    period 1: 300.0\n                    period 2: 600.0\n                unit: idx\n                    period 0: 0\n                    period 1: 50\n                    period 2: 150\n            time_point: stop\n                unit: ms\n                    period 0: 198.0\n                    period 1: 498.0\n                    period 2: 698.0\n                unit: idx\n                    period 0: 49\n                    period 1: 149\n                    period 2: 199\n        pop_name: my_pop2\n        recording_array_size: (4000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                    period 1: 400.0\n                unit: idx\n                    period 0: 0\n                    period 1: 1000\n            time_point: stop\n                unit: ms\n                    period 0: 199.9\n                    period 1: 699.9\n                unit: idx\n                    period 0: 999\n                    period 1: 3999\n    chunk: 1\n        pop_name: my_pop1\n        recording_array_size: (350, 1)\n            time_point: start\n                unit: ms\n                    period 0: 800.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 1498.0\n                unit: idx\n                    period 0: 349\n        pop_name: my_pop2\n        recording_array_size: (7000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 800.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 1499.9\n                unit: idx\n                    period 0: 6999\n    chunk: 2\n        pop_name: my_pop1\n        recording_array_size: (350, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 798.0\n                unit: idx\n                    period 0: 349\n        pop_name: my_pop2\n        recording_array_size: (7000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 100.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 799.9\n                unit: idx\n                    period 0: 6999\nrecordings2\n    chunk: 0\n        pop_name: my_pop1\n        recording_array_size: (50, 1)\n            time_point: start\n                unit: ms\n                    period 0: 900.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 998.0\n                unit: idx\n                    period 0: 49\n        pop_name: my_pop2\n        recording_array_size: (1000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 900.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 999.9\n                unit: idx\n                    period 0: 999\n    chunk: 1\n        pop_name: my_pop1\n        recording_array_size: (50, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 98.0\n                unit: idx\n                    period 0: 49\n        pop_name: my_pop2\n        recording_array_size: (1000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 99.9\n                unit: idx\n                    period 0: 999\nrecordings3\n    chunk: 0\n        pop_name: my_pop1\n        recording_array_size: (50, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 98.0\n                unit: idx\n                    period 0: 49\n        pop_name: my_pop2\n        recording_array_size: (1000, 1)\n            time_point: start\n                unit: ms\n                    period 0: 0.0\n                unit: idx\n                    period 0: 0\n            time_point: stop\n                unit: ms\n                    period 0: 99.9\n                unit: idx\n                    period 0: 999\n#############################################################\nGenerate fig monitor_recordings_1_chunk0.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_1_chunk1.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_1_chunk2.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_2_chunk0.png... \n  WARNING PlotRecordings: my_pop1 does not contain any spikes in the given time interval.\nDone\n\nGenerate fig monitor_recordings_2_chunk1.png... Done\n\nGenerate fig monitor_recordings_3_chunk0.png... Done\n
"},{"location":"examples/opt_neuron/","title":"Optimize a neuron model","text":""},{"location":"examples/opt_neuron/#optimize-neuron-model-from-data","title":"Optimize neuron model from data","text":""},{"location":"examples/opt_neuron/#introduction","title":"Introduction","text":"

This example demonstrates how to use the OptNeuron class to fit an ANNarchy neuron model to some experimental data.

"},{"location":"examples/opt_neuron/#code","title":"Code","text":"
from CompNeuroPy import CompNeuroExp, CompNeuroSim, current_step, rmse\nfrom CompNeuroPy.opt_neuron import OptNeuron\nimport numpy as np\nfrom ANNarchy import Neuron, dt\n\n\n### in this example we want to fit an ANNarchy neuron model to some data (which ca be\n### somehow obtained by simulating the neuron and recording variables) for this example,\n### we have the following simple neuron model\nmy_neuron = Neuron(\n    parameters=\"\"\"\n        I_app = 0\n        a = 0 : population\n        b = 0 : population\n    \"\"\",\n    equations=\"\"\"\n        r = a*I_app + b\n    \"\"\",\n)\n\n\n### Now we need some \"experimental data\" which will be provided to the OptNeuron class\n### with the argument results_soll.\ndef get_experimental_data():\n    \"\"\"\n    Return experimental data.\n\n    Assume we have two recordings of the rate r of a single neuron from two different\n    current step experiments. Both have length = 1000 ms and after 500 ms the current is\n    changed, thus also the rate.\n\n    Returns:\n        return_dict (dict):\n            Dictionary with keys \"results_soll\" and \"time_step\" and values the\n            experimental data and the time step in ms with which the date was obtained,\n            respectively.\n    \"\"\"\n    r_arr = np.empty((2, 1000))\n    ### first recording\n    r_arr[0, :500] = 2\n    r_arr[0, 500:] = 6\n    ### second recording\n    r_arr[1, :500] = 2\n    r_arr[1, 500:] = 10\n    ### time step in ms\n    time_step = 1\n\n    return_dict = {\"results_soll\": r_arr, \"time_step\": time_step}\n    return return_dict\n\n\n### We know how our experimental data was obtained. This is what we have to define as an\n### CompNeuroExp for the OptNeuron class.\nclass my_exp(CompNeuroExp):\n    \"\"\"\n    Define an experiment by inheriting from CompNeuroExp.\n\n    CompNeuroExp provides the attributes:\n\n        monitors (CompNeuroMonitors):\n            a CompNeuroMonitors object to do recordings, define during init otherwise\n            None\n        data (dict):\n            a dictionary for storing any optional data\n\n    and the functions:\n        reset():\n            resets the model and monitors\n        results():\n            returns a results object\n    \"\"\"\n\n    def run(self, population_name):\n        \"\"\"\n        Do the simulations and recordings.\n\n        To use the CompNeuroExp class, you need to define a run function which\n        does the simulations and recordings. The run function should return the\n        results object which can be obtained by calling self.results().\n\n        For using the CompNeuroExp for OptNeuron, the run function should have\n        one argument which is the name of the population which is automatically created\n        by OptNeuron, containing a single neuron of the model which should be optimized.\n\n        Args:\n            population_name (str):\n                name of the population which contains a single neuron, this will be\n                automatically provided by OptNeuron\n\n        Returns:\n            results (CompNeuroExp._ResultsCl):\n                results object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        ### For OptNeuron you have to reset the model and monitors at the beginning of\n        ### the run function! Do not reset the parameters, otherwise the optimization\n        ### will not work!\n        self.reset(parameters=False)\n\n        ### you have to start monitors within the run function, otherwise nothing will\n        ### be recorded\n        self.monitors.start()\n\n        ### do simulations and recordings using the provided CompNeuroMonitors object\n        ### (recording the varables specified during the initialization of OptNeuron\n        ### class) and e.g. the CompNeuroSim class\n        sim_step = CompNeuroSim(\n            simulation_function=current_step,\n            simulation_kwargs={\n                \"pop\": population_name,\n                \"t1\": 500,\n                \"t2\": 500,\n                \"a1\": 0,\n                \"a2\": 5,\n            },\n            kwargs_warning=False,\n            name=\"test\",\n            monitor_object=self.monitors,\n        )\n\n        ### run the simulation, remember setting parameters=False in the reset function!\n        sim_step.run()\n        self.reset(parameters=False)\n        sim_step.run({\"a2\": 10})\n\n        ### optional: store anything you want in the data dict. For example infomration\n        ### about the simulations. This is not used for the optimization but can be\n        ### retrieved after the optimization is finished\n        self.data[\"sim\"] = sim_step.simulation_info()\n        self.data[\"population_name\"] = population_name\n        self.data[\"time_step\"] = dt()\n\n        ### return results, use the object's self.results()\n        return self.results()\n\n\n### Next, the OptNeuron class needs a function to calculate the loss.\ndef get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll):\n    \"\"\"\n    Function which has to have the arguments results_ist and results_soll and should\n    calculate and return the loss. This structure is needed for the OptNeuron class.\n\n    Args:\n        results_ist (object):\n            the results object returned by the run function of experiment (see above)\n        results_soll (any):\n            the target data directly provided to OptNeuron during initialization\n\n    Returns:\n        loss (float or list of floats):\n            the loss\n    \"\"\"\n    ### get the recordings and other important things for calculating the loss from\n    ### results_ist, we do not use all available information here, but you could\n    rec_ist = results_ist.recordings\n    pop_ist = results_ist.data[\"population_name\"]\n    neuron = 0\n\n    ### get the data for calculating the loss from the results_soll\n    r_target_0 = results_soll[0]\n    r_target_1 = results_soll[1]\n\n    ### get the data for calculating the loss from the recordings of the\n    ### optimized neuron model\n    r_ist_0 = rec_ist[0][f\"{pop_ist};r\"][:, neuron]\n    r_ist_1 = rec_ist[1][f\"{pop_ist};r\"][:, neuron]\n\n    ### calculate the loss, e.g. the root mean squared error\n    rmse1 = rmse(r_target_0, r_ist_0)\n    rmse2 = rmse(r_target_1, r_ist_1)\n\n    ### return the loss, one can return a singel value or a list of values which will\n    ### be summed during the optimization\n    return [rmse1, rmse2]\n\n\n### now we need to define which variables should be optimized and between which bounds\nvariables_bounds = {\"a\": [-10, 10], \"b\": [-10, 10]}\n\n\ndef main():\n    ### get experimental data\n    experimental_data = get_experimental_data()\n\n    ### intitialize optimization\n    opt = OptNeuron(\n        experiment=my_exp,\n        get_loss_function=get_loss,\n        variables_bounds=variables_bounds,\n        neuron_model=my_neuron,\n        results_soll=experimental_data[\"results_soll\"],\n        time_step=experimental_data[\"time_step\"],\n        compile_folder_name=\"annarchy_opt_neuron_example_from_data\",\n        method=\"hyperopt\",\n        record=[\"r\"],\n    )\n\n    ### run the optimization, define how often the experiment should be repeated\n    fit = opt.run(max_evals=1000, results_file_name=\"best_from_data\")\n\n    ### print optimized parameters, we should get around a=0.8 and b=2\n    print(\"a\", fit[\"a\"])\n    print(\"b\", fit[\"b\"])\n    print(list(fit.keys()))\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/opt_neuron/#console-output","title":"Console Output","text":"
$ python run_opt_neuron_from_data.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nOptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\nOptNeuron: WARNING: attributes ['I_app', 'r'] are not used/initialized.\nchecking neuron_models, experiment, get_loss...Done\n\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:45<00:00, 21.99trial/s, best loss: 0.31922683758789056]\na 0.7609542202637395\nb 2.171783070482363\n['a', 'b', 'loss', 'all_loss', 'std', 'results', 'results_soll']\n
"},{"location":"examples/opt_neuron/#optimize-neuron-model-from-other-neuron-model","title":"Optimize neuron model from other neuron model","text":""},{"location":"examples/opt_neuron/#introduction_1","title":"Introduction","text":"

This example demonstrates how to use the OptNeuron class to fit an ANNarchy neuron model to the dynamics of another ANNarchy neuron model in a specific experiment.

The experiment and variable_bounds used are imported from the other example run_opt_neuron_from_data.py.

"},{"location":"examples/opt_neuron/#code_1","title":"Code","text":"
from CompNeuroPy import CompNeuroExp, rmse\nfrom CompNeuroPy.opt_neuron import OptNeuron\nfrom ANNarchy import Neuron\n\n### import the experiment and variables_bounds\nfrom run_opt_neuron_from_data import my_exp, variables_bounds\nfrom run_opt_neuron_from_data import my_neuron as simple_neuron\n\n\n### for this example we want to fit a simple neuron model to replicate the dynamics of a\n### more complex neuron model, the simple model is imported from the other example\n### 'run_opt_neuron_from_data.py' and the complex model is defined here\ncomplex_neuron = Neuron(\n    parameters=\"\"\"\n        I_app = 0\n        f = 6.0542364610842572e-002 : population\n        e = 3.7144041714209490e+000 : population\n        d = -4.9446336126026436e-001 : population\n        c = 9.0909599124334911e-002 : population\n        b = -4.4497411506061648e-003 : population\n        a = -6.2239117460540167e-005 : population\n    \"\"\",\n    equations=\"\"\"\n        r = a*I_app**5 + b*I_app**4 + c*I_app**3 + d*I_app**2 + e*I_app**1 + f\n    \"\"\",\n)\n\n\n### Next, the OptNeuron class needs a function to calculate the loss.\ndef get_loss(\n    results_ist: CompNeuroExp._ResultsCl, results_soll: CompNeuroExp._ResultsCl\n):\n    \"\"\"\n    Function which has to have the arguments results_ist and results_soll and should\n    calculate and return the loss. This structure is needed for the OptNeuron class.\n\n    Args:\n        results_ist (object):\n            the results object returned by the run function of experiment (see above),\n            conducting the experiment with the optimized neuron model\n        results_soll (any):\n            the results object returned by the run function of experiment (see above),\n            conducting the experiment with the target neuron model\n\n    Returns:\n        loss (float or list of floats):\n            the loss\n    \"\"\"\n\n    ### get the recordings and other important things from the results_ist (results\n    ### generated during the optimization using the defrined CompNeuroExp from above)\n    rec_ist = results_ist.recordings\n    pop_ist = results_ist.data[\"population_name\"]\n    rec_soll = results_soll.recordings\n    pop_soll = results_soll.data[\"population_name\"]\n    neuron = 0\n\n    ### get the data for calculating the loss from the recordings of the\n    ### target neuron model\n    v_soll_0 = rec_soll[0][pop_soll + \";r\"][:, neuron]\n    v_soll_1 = rec_soll[1][pop_soll + \";r\"][:, neuron]\n\n    ### get the data for calculating the loss from the recordings of the\n    ### optimized neuron model\n    v_ist_0 = rec_ist[0][pop_ist + \";r\"][:, neuron]\n    v_ist_1 = rec_ist[1][pop_ist + \";r\"][:, neuron]\n\n    ### calculate the loss, e.g. the root mean squared error\n    rmse1 = rmse(v_soll_0, v_ist_0)\n    rmse2 = rmse(v_soll_1, v_ist_1)\n\n    ### return the loss, one can return a singel value or a list of values which will\n    ### be summed during the optimization\n    return [rmse1, rmse2]\n\n\ndef main():\n    ### define optimization\n    opt = OptNeuron(\n        experiment=my_exp,\n        get_loss_function=get_loss,\n        variables_bounds=variables_bounds,\n        neuron_model=simple_neuron,\n        target_neuron_model=complex_neuron,\n        time_step=1,\n        compile_folder_name=\"annarchy_opt_neuron_example_from_neuron\",\n        method=\"hyperopt\",\n        record=[\"r\"],\n    )\n\n    ### run the optimization, define how often the experiment should be repeated\n    fit = opt.run(max_evals=1000, results_file_name=\"best_from_neuron\")\n\n    ### print optimized parameters, we should get around a=2.8 and b=0.28\n    print(\"a\", fit[\"a\"])\n    print(\"b\", fit[\"b\"])\n    print(list(fit.keys()))\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/opt_neuron/#console-output_1","title":"Console Output","text":"
$ python run_opt_neuron_from_neuron.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nOptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\nOptNeuron: WARNING: attributes ['I_app', 'r'] are not used/initialized.\nchecking neuron_models, experiment, get_loss...Done\n\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1000/1000 [00:47<00:00, 21.10trial/s, best loss: 0.5607444520201438]\na 2.8009641859311354\nb 0.22697565003968234\n['a', 'b', 'loss', 'all_loss', 'std', 'results', 'results_soll']\n
"},{"location":"examples/plot_recordings/","title":"Plot Recordings","text":""},{"location":"examples/plot_recordings/#introduction","title":"Introduction","text":"

This example demonstrates how to plot recordings (from CompNeuroMonitors) using the PlotRecordings class. The different plotting formats for spiking and non-spiking data (populations and projections) are demonstrated.

This example loads data generated with other example run_and_monitor_simulations.py.

"},{"location":"examples/plot_recordings/#code","title":"Code","text":"
from CompNeuroPy import load_variables, PlotRecordings\n\n\ndef main():\n    ### load data generated with other example \"run_and_monitor_simulations.py\"\n    loaded_dict = load_variables(\n        name_list=[\"recordings\", \"recording_times\", \"increase_rates_pop_info\"],\n        path=\"run_and_monitor_simulations/\",\n    )\n\n    ### define what should be plotted in which subplot, here 14 subplots are defined to\n    ### demonstrate the different plotting formats for spiking and non-spiking variables\n    plan_dict = {\n        \"position\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14],\n        \"compartment\": [\n            \"first_poisson\",\n            \"first_poisson\",\n            \"first_poisson\",\n            \"first_poisson\",\n            \"first_poisson\",\n            \"second_poisson\",\n            \"second_poisson\",\n            \"second_poisson\",\n            \"second_poisson\",\n            \"ampa_proj\",\n            \"ampa_proj\",\n            \"ampa_proj\",\n            \"ampa_proj\",\n        ],\n        \"variable\": [\n            \"spike\",\n            \"spike\",\n            \"spike\",\n            \"spike\",\n            \"spike\",\n            \"p\",\n            \"p\",\n            \"p\",\n            \"p\",\n            \"w\",\n            \"w\",\n            \"w\",\n            \"w\",\n        ],\n        \"format\": [\n            \"raster\",\n            \"mean\",\n            \"hybrid\",\n            \"interspike\",\n            \"cv\",\n            \"line\",\n            \"line_mean\",\n            \"matrix\",\n            \"matrix_mean\",\n            \"line\",\n            \"line_mean\",\n            \"matrix\",\n            \"matrix_mean\",\n        ],\n    }\n\n    ### plot first chunk\n    PlotRecordings(\n        figname=\"run_and_monitor_simulations/my_two_poissons_chunk_0.png\",\n        recordings=loaded_dict[\"recordings\"],\n        recording_times=loaded_dict[\"recording_times\"],\n        shape=(3, 5),\n        plan=plan_dict,\n    )\n    ### plot second chunk\n    PlotRecordings(\n        figname=\"run_and_monitor_simulations/my_two_poissons_chunk_1.png\",\n        recordings=loaded_dict[\"recordings\"],\n        recording_times=loaded_dict[\"recording_times\"],\n        shape=(3, 5),\n        plan=plan_dict,\n        chunk=1,\n    )\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/plot_recordings/#console-output","title":"Console Output","text":"
$ python plot_recordings.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\nGenerate fig run_and_monitor_simulations/my_two_poissons_chunk_0.png... Done\n\nGenerate fig run_and_monitor_simulations/my_two_poissons_chunk_1.png... Done\n
"},{"location":"examples/run_and_monitor_simulations/","title":"Generate Simulations","text":""},{"location":"examples/run_and_monitor_simulations/#introduction","title":"Introduction","text":"

This example demonstrates how to use the CompNeuroSim class to define simulations. It is shown how to define the simulation functions, requirements and how to use the simulation information object.

This example imports the \"my_model\" from other example create_model.py and saves recorded data used in other example plot_recordings.py.

"},{"location":"examples/run_and_monitor_simulations/#code","title":"Code","text":"
import numpy as np\nfrom CompNeuroPy import (\n    CompNeuroMonitors,\n    CompNeuroSim,\n    ReqPopHasAttr,\n    save_variables,\n    CompNeuroModel,\n)\nfrom ANNarchy import (\n    simulate,\n    get_population,\n    Population,\n    Neuron,\n    Projection,\n    Synapse,\n    Uniform,\n)\nfrom CompNeuroPy.examples.create_model import my_model\n\n\n### CompNeuroSim is a class to define simulations\n### It requires a simulation function, which we will define here:\ndef set_rates(pop_name: str, rates: float = 0.0, duration: float = 0.0):\n    \"\"\"\n    Sets the rates variable of a population given by pop_name and simulates duration ms.\n\n    Args:\n        pop_name (str):\n            name of the population\n        rates (float, optional):\n            rates variable of the population\n        duration (float, optional):\n            duration of the simulation in ms\n    \"\"\"\n    ### set rates and simulate\n    get_population(pop_name).rates = rates\n    simulate(duration)\n\n\n### Also create a second more complex simulation function\ndef increase_rates(\n    pop_name: str | list[str],\n    rate_step: float = 0.0,\n    time_step: float = 0.0,\n    nr_steps: int = 0,\n):\n    \"\"\"\n    Increase rates variable of population(s).\n\n    Args:\n        pop_name (str or list of str):\n            name of population(s)\n        rate_step (float, optional):\n            increase of rate with each step, initial step = current rates of pop\n        time_step (float, optional):\n            duration of each step in ms\n        nr_steps (int, optional):\n            number of steps\n    \"\"\"\n\n    ### convert single pop into list\n    pop_name_list = pop_name\n    if not (isinstance(pop_name_list, list)):\n        pop_name_list = [pop_name_list]\n\n    ### define initial value for rates for each pop (assume all neurons have same rates)\n    start_rate_arr = np.array(\n        [get_population(pop_name).rates[0] for pop_name in pop_name_list]\n    )\n\n    ### simulate all steps\n    for step in range(nr_steps):\n        ### calculate rates for each pop\n        rates_arr = step * rate_step + start_rate_arr\n        ### set rates variable of all populations\n        for pop_idx, pop_name in enumerate(pop_name_list):\n            set_rates(\n                pop_name, rates=rates_arr[pop_idx], duration=0\n            )  # use already defined simulation set_rates\n        ### then simulate step\n        set_rates(pop_name_list[0], rates=rates_arr[0], duration=time_step)\n\n    ### simulation_functions can return some information which may be helpful later\n    ### the simulation arguments do not need to be returned, since they are accessible\n    ### through the CompNeuroSim object anyway (see below)\n    return {\"duration\": time_step * nr_steps, \"d_rates\": rate_step * nr_steps}\n\n\n### see below why we need this function\ndef extend_model(my_model: CompNeuroModel):\n    \"\"\"\n    Create a simple projections and a projection with decaying weights.\n\n    Args:\n        my_model (CompNeuroModel):\n            model to which the projection should be added\n    \"\"\"\n\n    ### create a simple population for later use\n    Population(1, neuron=Neuron(equations=\"r=0\"), name=\"simple_pop\")\n\n    ### create a projection with decaying weights to demonstrate recording of projection\n    proj = Projection(\n        pre=my_model.populations[0],\n        post=my_model.populations[1],\n        target=\"ampa\",\n        synapse=Synapse(parameters=\"tau=500\", equations=\"dw/dt=-w/tau\"),\n        name=\"ampa_proj\",\n    )\n    proj.connect_all_to_all(weights=Uniform(1.0, 2.0))\n\n\ndef main():\n    ### create and compile the model from other example \"create_model.py\"\n    my_model.create(do_compile=False)\n\n    ### extend the model to demonstrate the functionality of CompNeuroSim requirements\n    ### (see below) and the recording of projections (recorded data will be used in\n    ### other example \"plot_recordings.py\")\n    extend_model(my_model)\n    my_model.compile()\n\n    ### Define Monitors, recording p and spike from both model populations with periods\n    ### of 10 ms and 15 ms and the weights of the ampa projection with period of 10 ms\n    monitor_dictionary = {\n        f\"{my_model.populations[0]};10\": [\"p\", \"spike\"],\n        f\"{my_model.populations[1]};15\": [\"p\", \"spike\"],\n        \"ampa_proj;10\": [\"w\"],\n    }\n    mon = CompNeuroMonitors(monitor_dictionary)\n\n    ### Now use CompNeuroSim to define a simulation. Use the previously defined\n    ### simulation functions and define their arguments as kwargs dictionary. Give the\n    ### simulation a name and description and you can also define requirements for the\n    ### simulation. Here, for example, we require that the populations contain the\n    ### attribute 'rates'. One can define multiple requirements in a list of\n    ### dictionaries. The arguments of the requirements can be inherited from the\n    ### simulation kwargs by using the syntax 'simulation_kwargs.<kwarg_name>'.\n    ### The monitor object is also given to the simulation, so that the simulation\n    ### runs can be automatically associated with the monitor recording chunks.\n    increase_rates_pop = CompNeuroSim(\n        simulation_function=increase_rates,\n        simulation_kwargs={\n            \"pop_name\": my_model.populations[0],\n            \"rate_step\": 10,\n            \"time_step\": 100,\n            \"nr_steps\": 15,\n        },\n        name=\"increase_rates_pop\",\n        description=\"increase rates variable of pop\",\n        requirements=[\n            {\"req\": ReqPopHasAttr, \"pop\": \"simulation_kwargs.pop_name\", \"attr\": \"rates\"}\n        ],\n        monitor_object=mon,\n    )\n\n    ### Now let's use this simulation\n    ### Simulate 500 ms without recordings and then run the simulation\n    simulate(500)\n    mon.start()\n    increase_rates_pop.run()\n\n    ### resetting monitors and model, creating new recording chunk\n    mon.reset()\n\n    ### again simulate 700 ms without recording\n    ### then run the simulation with different simulation kwargs (for all populations)\n    mon.pause()\n    simulate(700)\n    mon.start()\n    increase_rates_pop.run({\"pop_name\": my_model.populations})\n    simulate(500)\n\n    ### now again change the pop_name kwarg but use the simple_pop population without\n    ### the required attribute 'rates'\n    ### this will raise an error\n    try:\n        increase_rates_pop.run({\"pop_name\": \"simple_pop\"})\n    except Exception as e:\n        print(\"\\n###############################################\")\n        print(\n            \"Running simulation with population not containing attribute 'rates' causes the following error:\"\n        )\n        print(e)\n        print(\"###############################################\\n\")\n\n    ### get recordings and recording times from the CompNeuroMonitors object\n    recordings = mon.get_recordings()\n    recording_times = mon.get_recording_times()\n\n    ### get the simulation information object from the CompNeuroSim object\n    increase_rates_pop_info = increase_rates_pop.simulation_info()\n\n    ### save the recordings, recording times and simulation information\n    save_variables(\n        variable_list=[recordings, recording_times, increase_rates_pop_info],\n        name_list=[\"recordings\", \"recording_times\", \"increase_rates_pop_info\"],\n        path=\"run_and_monitor_simulations\",\n    )\n\n    ### print the information contained in the simulation information object\n    print(\"\\nA simulation object contains:\")\n    print(\"name\\n\", increase_rates_pop_info.name)\n    print(\"\\ndescription\\n\", increase_rates_pop_info.description)\n    print(\"\\nstart (for each run)\\n\", increase_rates_pop_info.start)\n    print(\"\\nend (for each run)\\n\", increase_rates_pop_info.end)\n    print(\"\\ninfo (for each run)\\n\", increase_rates_pop_info.info)\n    print(\"\\nkwargs (for each run)\\n\", increase_rates_pop_info.kwargs)\n    print(\"\\nmonitor chunk (for each run)\\n\", increase_rates_pop_info.monitor_chunk)\n\n    return 1\n\n\nif __name__ == \"__main__\":\n    main()\n
"},{"location":"examples/run_and_monitor_simulations/#console-output","title":"Console Output","text":"
$ python run_and_monitor_simulations.py \nANNarchy 4.7 (4.7.3b) on linux (posix).\ncreated model, other parameters: 1\nCompiling ...  OK \n\n###############################################\nRunning simulation with population not containing attribute 'rates' causes the following error:\nPopulation simple_pop does not contain attribute rates!\n\n###############################################\n\n\nA simulation object contains:\nname\n increase_rates_pop\n\ndescription\n increase rates variable of pop\n\nstart (for each run)\n [500.0, 700.0]\n\nend (for each run)\n [2000.0, 2200.0]\n\ninfo (for each run)\n [{'duration': 1500, 'd_rates': 150}, {'duration': 1500, 'd_rates': 150}]\n\nkwargs (for each run)\n [{'pop_name': 'first_poisson', 'rate_step': 10, 'time_step': 100, 'nr_steps': 15}, {'pop_name': ['first_poisson', 'second_poisson'], 'rate_step': 10, 'time_step': 100, 'nr_steps': 15}]\n\nmonitor chunk (for each run)\n [0, 1]\n
"},{"location":"main/dbs_stimulator/","title":"DBS Stimulator","text":""},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator","title":"CompNeuroPy.dbs.DBSstimulator","text":"

Class for stimulating a population with DBS.

Warning

If you use auto_implement, pointers to the populations and projections of the model are not valid anymore (new populations and projections are created)! Use a CompNeuroPy model working with names of populations and projections anyway (recommended) or use the update_pointers method.

Examples:

from ANNarchy import Population, Izhikevich, compile, simulate, setup\nfrom CompNeuroPy import DBSstimulator\n\n# setup ANNarchy\nsetup(dt=0.1)\n\n# create populations\npopulation1 = Population(10, neuron=Izhikevich, name=\"my_pop1\")\npopulation2 = Population(10, neuron=Izhikevich, name=\"my_pop2\")\n>>>\n# create DBS stimulator\ndbs = DBSstimulator(\n    stimulated_population=population1,\n    population_proportion=0.5,\n    dbs_depolarization=30,\n    auto_implement=True,\n)\n\n# update pointers to correct populations\npopulation1, population2 = dbs.update_pointers(\n    pointer_list=[population1, population2]\n)\n\n# compile network\ncompile()\n\n# run simulation\n# 1000 ms without dbs\nsimulate(1000)\n# 1000 ms with dbs\ndbs.on()\nsimulate(1000)\n# 1000 ms without dbs\ndbs.off()\nsimulate(1000)\n
Source code in CompNeuroPy/dbs.py
class DBSstimulator:\n    \"\"\"\n    Class for stimulating a population with DBS.\n\n    !!! warning\n        If you use auto_implement, pointers to the populations and projections of\n        the model are not valid anymore (new populations and projections are\n        created)! Use a CompNeuroPy model working with names of populations and\n        projections anyway (recommended) or use the update_pointers method.\n\n    Examples:\n        ```python\n        from ANNarchy import Population, Izhikevich, compile, simulate, setup\n        from CompNeuroPy import DBSstimulator\n\n        # setup ANNarchy\n        setup(dt=0.1)\n\n        # create populations\n        population1 = Population(10, neuron=Izhikevich, name=\"my_pop1\")\n        population2 = Population(10, neuron=Izhikevich, name=\"my_pop2\")\n        >>>\n        # create DBS stimulator\n        dbs = DBSstimulator(\n            stimulated_population=population1,\n            population_proportion=0.5,\n            dbs_depolarization=30,\n            auto_implement=True,\n        )\n\n        # update pointers to correct populations\n        population1, population2 = dbs.update_pointers(\n            pointer_list=[population1, population2]\n        )\n\n        # compile network\n        compile()\n\n        # run simulation\n        # 1000 ms without dbs\n        simulate(1000)\n        # 1000 ms with dbs\n        dbs.on()\n        simulate(1000)\n        # 1000 ms without dbs\n        dbs.off()\n        simulate(1000)\n        ```\n    \"\"\"\n\n    @check_types()\n    def __init__(\n        self,\n        stimulated_population: Population,\n        population_proportion: float = 1.0,\n        excluded_populations_list: list[Population] = [],\n        dbs_depolarization: float = 0.0,\n        orthodromic: bool = False,\n        antidromic: bool = False,\n        efferents: bool = False,\n        afferents: bool = False,\n        passing_fibres: bool = False,\n        passing_fibres_list: list[Projection] = [],\n        passing_fibres_strength: float | list[float] = 1.0,\n        sum_branches: bool = True,\n        dbs_pulse_frequency_Hz: float = 130.0,\n        dbs_pulse_width_us: float = 300.0,\n        axon_spikes_per_pulse: float = 1.0,\n        axon_rate_amp: float | dict[Population | str, float] = 1.0,\n        seed: int | None = None,\n        auto_implement: bool = False,\n        model: generate_model | None = None,\n    ) -> None:\n        \"\"\"\n        Initialize DBS stimulator.\n\n        !!! warning\n            Do this before compiling the model!\n\n        Args:\n            stimulated_population (Population):\n                Population which is stimulated by DBS\n            population_proportion (float, optional):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly. Default: 1.0.\n            excluded_populations_list (list, optional):\n                List of populations which are excluded from DBS effects, they are not\n                affected and their axons do not generate axon spikes. Default: [].\n            dbs_depolarization (float, optional):\n                Depolarization effect of the DBS pulse to local soma. Default: 0.0.\n            orthodromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded orthodromically.\n                Default: False.\n            antidromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded antidromically,\n                only available in spiking networks. Default: False.\n            efferents (bool, optional):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: False.\n            afferents (bool, optional):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: False.\n            passing_fibres (bool, optional):\n                If True, DBS affects the passing fibres of the stimulated region defined\n                in passing_fibres_list (orthodromic and/or antidromic have to be True\n                too). Default: False.\n            passing_fibres_list (list of Projections, optional):\n                List of projections which pass the DBS stimulated region and therefore\n                are activated by DBS. Default: [], also set passing_fibres True!\n            passing_fibres_strength (float or list of float, optional):\n                Single value or list of float values between 0 and 1 defining how strong\n                the passing fibres are activated by DBS (0: not activated, 1: fully\n                activated like the projections in the DBS stimulated region).\n                Default: 1.\n            sum_branches (bool, optional):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n                Default: True.\n            dbs_pulse_frequency_Hz (float, optional):\n                Frequency of the DBS pulse. Default: 130 Hz.\n            dbs_pulse_width_us (float, optional):\n                Width of the DBS pulse. Default: 300 us.\n            axon_spikes_per_pulse (float, optional):\n                Number of average axon spikes per DBS pulse. Default: 1.\n            axon_rate_amp (float or dict of float, optional):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n                axons caused by DBS. You can specify this for each population\n                individually by using a dictionary (keys = Population instances)\n                axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n                of 1.5 during DBS (all other affected projections forward the default\n                value)\n                You can specify the default value by using the key \"default\", e.g.\n                {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except\n                pop forward a rate of 1.0 during DBS. Default: 1.0.\n            seed (int, optional):\n                Seed for the random distribution of affected neurons based on\n                population_proportion. Default: None.\n            auto_implement (bool, optional):\n                If True, automatically implement DBS mechanisms to the model. Only\n                supported for Izhikevich spiking models and rate-coded models.\n                Default: False.\n                TODO test what happens with mixed models\n            model (generate_model, optional):\n                CompNeuroPy model which is used to automatically implement DBS\n                mechanisms, should not be compiled!. Default: None, i.e., use all\n                populations and projections of the current magic model\n        \"\"\"\n\n        if auto_implement:\n            ### recreate model with DBS mechanisms\n            ### give all variables containing Populations and Projections\n            ### and also recreate them during recreating the model\n            ### variables are:\n            ### - stimulated_population\n            ### - excluded_populations_list\n            ### - passing_fibres_list\n            ### - axon_rate_amp\n            if not isinstance(model, type(None)):\n                ### CompNeuroPy model given\n                ### recreate model with DBS mechanisms\n                create_dbs_model_obj = _CreateDBSmodelcnp(\n                    model,\n                    stimulated_population,\n                    excluded_populations_list,\n                    passing_fibres_list,\n                    axon_rate_amp,\n                )\n                ### get the new CompNeuroPy model\n                model = create_dbs_model_obj.model\n            else:\n                ### no CompNeuroPy model given --> use all populations and projections of the current magic model\n                ### recreate model with DBS mechanisms\n                create_dbs_model_obj = _CreateDBSmodel(\n                    stimulated_population,\n                    excluded_populations_list,\n                    passing_fibres_list,\n                    axon_rate_amp,\n                )\n            ### get the new variables containing Populations and Projections\n            stimulated_population = create_dbs_model_obj.stimulated_population\n            excluded_populations_list = create_dbs_model_obj.excluded_populations_list\n            passing_fibres_list = create_dbs_model_obj.passing_fibres_list\n            axon_rate_amp = create_dbs_model_obj.axon_rate_amp\n\n        ### set parameters\n        self.stimulated_population = stimulated_population\n        self.population_proportion = population_proportion\n        self.excluded_populations_list = excluded_populations_list\n        self.dbs_depolarization = dbs_depolarization\n        self.orthodromic = orthodromic\n        self.antidromic = antidromic\n        self.efferents = efferents\n        self.afferents = afferents\n        self.passing_fibres = passing_fibres\n        self.passing_fibres_list = passing_fibres_list\n        self.passing_fibres_strength = passing_fibres_strength\n        self.sum_branches = sum_branches\n        self.dbs_pulse_width_us = dbs_pulse_width_us\n        self.axon_spikes_per_pulse = axon_spikes_per_pulse\n        self.axon_rate_amp = axon_rate_amp\n        self.seed = seed\n        self.model = model\n\n        ### ANNarchy constants for DBS\n        self._set_constants(dbs_pulse_frequency_Hz)\n\n        ### randomly select affected neurons i.e. create dbs_on_array\n        self.dbs_on_array = self._create_dbs_on_array(population_proportion, seed)\n\n    def _create_dbs_on_array(self, population_proportion: float, seed: int):\n        \"\"\"\n        Create an array with the shape of the stimulated population with ones and zeros\n        randomly distributed with the specified population_proportion.\n\n        Args:\n            population_proportion (float):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly\n            seed (int):\n                Seed for the random distribution of affected neurons based on\n                population_proportion\n\n        Returns:\n            dbs_on_array (np.array):\n                Array with the shape of the stimulated population with ones and zeros\n                randomly distributed with the specified population_proportion\n        \"\"\"\n        ### create random number generator\n        rng = np.random.default_rng(seed)\n        ### create an array of zeros with the shape of the population, then flatten it\n        dbs_on_array = np.zeros(self.stimulated_population.geometry).flatten()\n        ### get the number of affected neurons based on the population_proportion\n        number_of_affected_neurons = population_proportion * dbs_on_array.size\n        ### randomly ceil or floor the number of affected neurons\n        number_of_affected_neurons = int(\n            rng.choice(\n                [\n                    np.ceil(number_of_affected_neurons),\n                    np.floor(number_of_affected_neurons),\n                ]\n            )\n        )\n        ### insert ones to the dbs_on_array\n        dbs_on_array[:number_of_affected_neurons] = 1\n        ### shuffle array\n        rng.shuffle(dbs_on_array)\n        ### reshape array to the shape of the population\n        dbs_on_array = dbs_on_array.reshape(self.stimulated_population.geometry)\n        ### return array\n        return dbs_on_array\n\n    def _set_constants(self, dbs_pulse_frequency_Hz: float):\n        \"\"\"\n        Set constants for DBS.\n\n        Args:\n            dbs_pulse_frequency_Hz (float):\n                Frequency of the DBS pulse in Hz\n        \"\"\"\n        # pulse frequency:\n        Constant(\"dbs_pulse_frequency_Hz\", dbs_pulse_frequency_Hz)\n        # pulse width:\n        # Neumant et al.. 2023: 60us but Meier et al. 2022: 300us... 60us = 0.06ms is very small for ANNarchy simulations\n        Constant(\"dbs_pulse_width_us\", self.dbs_pulse_width_us)\n\n        ### add global function for DBS pulse\n        add_function(\n            \"pulse(time_ms) = ite(modulo(time_ms*1000, 1000000./dbs_pulse_frequency_Hz) < dbs_pulse_width_us, 1., 0.)\"\n        )\n\n    def _axon_spikes_per_pulse_to_prob(self, axon_spikes_per_pulse: float):\n        \"\"\"\n        Convert number of axon spikes per pulse to probability of axon spikes per\n        timestep during DBS pulse\n\n        Args:\n            axon_spikes_per_pulse (float):\n                Number of average axon spikes per DBS pulse\n\n        Returns:\n            prob_axon_spike_time_step (float):\n                Probability of axon spikes per timestep during DBS pulse\n        \"\"\"\n        return np.clip(\n            (axon_spikes_per_pulse * 1000 * dt() / self.dbs_pulse_width_us), 0, 1\n        )\n\n    def _set_depolarization(self, dbs_depolarization: float | None = None):\n        \"\"\"\n        Set depolarization of population.\n\n        Args:\n            dbs_depolarization (float, optional):\n                Depolarization effect of the DBS pulse to local soma. Default: None,\n                i.e., use value from initialization\n        \"\"\"\n        ### either use given depolarization or use default value\n        if isinstance(dbs_depolarization, type(None)):\n            dbs_depolarization = self.dbs_depolarization\n\n        ### set depolarization of population\n        for pop in populations():\n            if pop == self.stimulated_population:\n                pop.dbs_depolarization = dbs_depolarization\n            else:\n                pop.dbs_depolarization = 0\n\n    def _set_axon_spikes(\n        self,\n        orthodromic: bool | None = None,\n        antidromic: bool | None = None,\n        efferents: bool | None = None,\n        afferents: bool | None = None,\n        passing_fibres: bool | None = None,\n        passing_fibres_strength: float | list[float] | None = None,\n        sum_branches: bool | None = None,\n        axon_spikes_per_pulse: float | None = None,\n        axon_rate_amp: float | dict[Population | str, float] | None = None,\n    ):\n        \"\"\"\n        Set axon spikes forwarding orthodromic\n\n        Args:\n            orthodromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded orthodromically,\n                Default: None, i.e., use value from initialization\n            antidromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded antidromically,\n                only available in spiking networks. Default: None, i.e., use value from\n                initialization\n            efferents (bool, optional):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            afferents (bool, optional):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            passing_fibres (bool, optional):\n                If True, DBS affects the passing fibres of the stimulated region defined\n                in passing_fibres_list (orthodromic and/or antidromic have to be True\n                too). Default: None, i.e., use value from initialization\n            passing_fibres_strength (float | list[float], optional):\n                Single value or list of float values between 0 and 1 defining how strong\n                the passing fibres are activated by DBS (0: not activated, 1: fully\n                activated like the projections in the DBS stimulated region).\n                Default: None, i.e., use value from initialization\n            sum_branches (bool, optional):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n                Default: None, i.e., use value from initialization\n            axon_spikes_per_pulse (float, optional):\n                Number of average axon spikes per DBS pulse. Default: None, i.e., use\n                value from initialization\n            axon_rate_amp (float | dict[Population | str, float], optional):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n                axons caused by DBS. You can specify this for each population\n                individually by using a dictionary (keys = Population instances)\n                axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n                of 1.5 during DBS (all other affected projections forward the default\n                value)\n                You can specify the default value by using the key \"default\", e.g.\n                {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except\n                pop forward a rate of 1.0 during DBS. Default: None, i.e., use value\n                from initialization\n        \"\"\"\n\n        ### either use given orthodromic or use default value\n        if isinstance(orthodromic, type(None)):\n            orthodromic = self.orthodromic\n        ### either use given antidromic or use default value\n        if isinstance(antidromic, type(None)):\n            antidromic = self.antidromic\n        ### either use given efferents or use default value\n        if isinstance(efferents, type(None)):\n            efferents = self.efferents\n        ### either use given afferents or use default value\n        if isinstance(afferents, type(None)):\n            afferents = self.afferents\n        ### either use given afferents or use default value\n        if isinstance(passing_fibres, type(None)):\n            passing_fibres = self.passing_fibres\n        ### either use given passing_fibres_strength or use default value\n        if isinstance(passing_fibres_strength, type(None)):\n            passing_fibres_strength = self.passing_fibres_strength\n        ### either use given sum_branches or use default value\n        if isinstance(sum_branches, type(None)):\n            sum_branches = self.sum_branches\n        ### either use given axon_spikes_per_pulse or use default value\n        if isinstance(axon_spikes_per_pulse, type(None)):\n            axon_spikes_per_pulse = self.axon_spikes_per_pulse\n        ### either use given axon_rate_amp or use default value\n        if isinstance(axon_rate_amp, type(None)):\n            axon_rate_amp = self.axon_rate_amp\n\n        ### check if passing_fibres_strength is a list\n        if not isinstance(passing_fibres_strength, list):\n            passing_fibres_strength = [passing_fibres_strength] * len(\n                self.passing_fibres_list\n            )\n        ### check if axon_rate_amp is a dict or float\n        if isinstance(axon_rate_amp, dict):\n            ### check if default key is missing\n            if \"default\" not in axon_rate_amp.keys():\n                ### add the key \"default\" with the value 1.0 to the dict\n                axon_rate_amp[\"default\"] = 1.0\n        else:\n            ### create dict with default value\n            axon_rate_amp = {\"default\": axon_rate_amp}\n\n        ### deactivate DBS axon transmission\n        self._deactivate_axon_DBS()\n\n        ### activate orthodromic transmission for all projections\n        if orthodromic:\n            self._set_orthodromic(\n                efferents,\n                afferents,\n                passing_fibres,\n                passing_fibres_strength,\n                axon_spikes_per_pulse,\n                axon_rate_amp,\n            )\n\n        ### activate antidromic transmission for all populations\n        if antidromic:\n            self._set_antidromic(\n                efferents,\n                afferents,\n                passing_fibres,\n                passing_fibres_strength,\n                sum_branches,\n                axon_spikes_per_pulse,\n            )\n\n    def _deactivate_axon_DBS(self):\n        \"\"\"\n        Deactivate axon spikes forwarding for both orthodromic and antidromic.\n        \"\"\"\n        for pop in populations():\n            ### deactivate axon spike genearation for all populations\n            pop.prob_axon_spike = 0\n            pop.axon_rate_amp = 0\n            ### deactivate antidromic transmission for all populations\n            pop.antidromic = 0\n            pop.antidromic_prob = 0\n\n        ### deactivate orthodromic transmission for all projections\n        for proj in projections():\n            proj.axon_transmission = 0\n            proj.p_axon_spike_trans = 0\n\n    def _set_orthodromic(\n        self,\n        efferents: bool,\n        afferents: bool,\n        passing_fibres: bool,\n        passing_fibres_strength: list[float],\n        axon_spikes_per_pulse: float,\n        axon_rate_amp: dict[Population | str, float],\n    ):\n        \"\"\"\n        Set orthodromic axon spikes forwarding.\n\n        Args:\n            efferents (bool):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            afferents (bool):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            passing_fibres (bool):\n                If True, DBS affects the passing fibres of the stimulated population\n                (orthodromic and/or antidromic have to be True too and there have to\n                be passing fibres in the passing_fibres_list)\n            passing_fibres_strength (list[float]):\n                List of float values between 0 and 1 defining how strong the passing\n                fibres are activated by DBS (0: not activated, 1: fully activated\n                like projections in DBS stimulated region)\n            axon_spikes_per_pulse (float):\n                Number of average axon spikes per DBS pulse\n            axon_rate_amp (dict[Population | str, float]):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded\n                on axons caused by DBS. The dictionary has to contain the key\n                \"default\" with the default value for all projections and can contain\n                keys for each population with a different value for the axon_rate of\n                the efferent axons of this population.\n        \"\"\"\n        if efferents:\n            ### activate all efferent projections\n            projection_list = projections(pre=self.stimulated_population)\n            for proj in projection_list:\n                ### skip excluded populations\n                if proj.post in self.excluded_populations_list:\n                    continue\n                ### activate axon transmission\n                proj.axon_transmission = 1\n                proj.p_axon_spike_trans = 1\n                ### set prob_axon_spike for spiking model\n                proj.pre.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n                ### set axon_rate_amp for rate-coded model\n                if proj.pre in axon_rate_amp.keys():\n                    ### axon_rate_amp is specified for this population\n                    proj.pre.axon_rate_amp = axon_rate_amp[proj.pre]\n                else:\n                    ### axon_rate_amp is not specified for this population, use default value\n                    proj.pre.axon_rate_amp = axon_rate_amp[\"default\"]\n\n        if afferents:\n            ### activate all afferent projections\n            projection_list = projections(post=self.stimulated_population)\n            for proj in projection_list:\n                ### skip excluded populations\n                if proj.pre in self.excluded_populations_list:\n                    continue\n                ### activate axon transmission\n                proj.axon_transmission = 1\n                proj.p_axon_spike_trans = 1\n                ### set prob_axon_spike for spiking model\n                proj.pre.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n                ### set axon_rate_amp for rate-coded model\n                if proj.pre in axon_rate_amp.keys():\n                    ### axon_rate_amp is specified for this population\n                    proj.pre.axon_rate_amp = axon_rate_amp[proj.pre]\n                else:\n                    ### axon_rate_amp is not specified for this population, use default value\n                    proj.pre.axon_rate_amp = axon_rate_amp[\"default\"]\n\n        if passing_fibres:\n            ### activate all passing projections\n            for proj_idx, proj in enumerate(self.passing_fibres_list):\n                proj.axon_transmission = 1\n                proj.p_axon_spike_trans = passing_fibres_strength[proj_idx]\n                ### set prob_axon_spike for spiking model\n                proj.pre.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n                ### set axon_rate_amp for rate-coded model\n                if proj.pre in axon_rate_amp.keys():\n                    ### axon_rate_amp is specified for this population\n                    proj.pre.axon_rate_amp = axon_rate_amp[proj.pre]\n                else:\n                    ### axon_rate_amp is not specified for this population, use default value\n                    proj.pre.axon_rate_amp = axon_rate_amp[\"default\"]\n\n    def _set_antidromic(\n        self,\n        efferents: bool,\n        afferents: bool,\n        passing_fibres: bool,\n        passing_fibres_strength: list[float],\n        sum_branches: bool,\n        axon_spikes_per_pulse: float,\n    ):\n        \"\"\"\n        Set antidromic axon spikes forwarding.\n\n        Args:\n            efferents (bool):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            afferents (bool):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too)\n            passing_fibres (bool):\n                If True, DBS affects the passing fibres of the stimulated population\n                (orthodromic and/or antidromic have to be True too and there have to\n                be passing fibres in the passing_fibres_list)\n            passing_fibres_strength (list[float]):\n                List of float values between 0 and 1 defining how strong the passing\n                fibres are activated by DBS (0: not activated, 1: fully activated\n                like projections in DBS stimulated region)\n            sum_branches (bool):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n            axon_spikes_per_pulse (float):\n                Number of average axon spikes per DBS pulse\n        \"\"\"\n\n        if efferents:\n            ### activate all efferent projections, i.e. antodromic activation of stimulated population\n            pop = self.stimulated_population\n            pop.antidromic = 1\n            pop.antidromic_prob = 1\n            pop.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                axon_spikes_per_pulse\n            )\n        if afferents:\n            ### activate all afferent projections, i.e. all presynaptic populations of stimulated population\n            ### get presynaptic projections\n            projection_list = projections(post=self.stimulated_population)\n            ### get presynaptic populations from projections\n            presyn_pop_list = []\n            presyn_pop_name_list = []\n            for proj in projection_list:\n                ### skip excluded populations\n                if proj.pre in self.excluded_populations_list:\n                    continue\n                ### check if presynaptic population is already in list\n                if proj.pre.name not in presyn_pop_name_list:\n                    presyn_pop_name_list.append(proj.pre.name)\n                    presyn_pop_list.append(proj.pre)\n            ### set antidromic for all presynaptic populations\n            for pop in presyn_pop_list:\n                pop.antidromic = 1\n                pop.antidromic_prob = np.mean(self.stimulated_population.dbs_on)\n                pop.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n        if passing_fibres:\n            ### get presynaptic populations from passing fibres projections\n            presyn_pop_list = []\n            presyn_pop_name_list = []\n            for proj in self.passing_fibres_list:\n                ### check if presynaptic population is already in list\n                if proj.pre.name not in presyn_pop_name_list:\n                    presyn_pop_name_list.append(proj.pre.name)\n                    presyn_pop_list.append(proj.pre)\n            ### get antidomic_prob for each presynatic population with the passing_fibres_strength\n            antidromic_prob_list = [0] * len(presyn_pop_list)\n            for pop_idx, pop in enumerate(presyn_pop_list):\n                ### get all passing fibres and their strength of a presynaptic pop\n                passing_fibres_strength_of_pop_list = []\n                for proj_idx, proj in enumerate(self.passing_fibres_list):\n                    if proj.pre.name == pop.name:\n                        passing_fibres_strength_of_pop_list.append(\n                            passing_fibres_strength[proj_idx]\n                        )\n                ### check if the probs of the single axon branches should be summed up\n                ### if for example a presynaptic pop contributes to two passing fibres, the axons of the presynaptic pop split up into two branches\n                ### thus, if these two branches are both stimulated, they both forward APs antidromic\n                ### thus, sum up the antidromic_prob of the single branches to obtain the antidromic spikes which affect the presynaptic pop\n                ### if sum_branches is False, then this would represent that the stimulation at the axon is before it splits up into multiple branches and there should not be different passing_fibres_strengths for the same presynaptic pop\n                if sum_branches:\n                    antidromic_prob_list[pop_idx] = sum(\n                        passing_fibres_strength_of_pop_list\n                    )\n                else:\n                    if len(set(passing_fibres_strength_of_pop_list)) != 1:\n                        ### list contains different values\n                        raise ValueError(\n                            \"Different passing fibres strengths for the same presynaptic population detected. This is not possible if sum_branches is False.\"\n                        )\n                    ### all values are the same, thus take the first one\n                    antidromic_prob_list[pop_idx] = passing_fibres_strength_of_pop_list[\n                        0\n                    ]\n\n                ### TODO\n                ### if summing axon branches leads to a prob > 1, then\n                ### the prob should be set to 1\n                ### the axon spike generation in this pop should be increased\n                ### and all axon spike transmissions from this pop should be decreased by the same factor\n                ### this is not implemented yet... maybe in the future\n                if antidromic_prob_list[pop_idx] > 1:\n                    raise ValueError(\n                        \"Summing the passing fibres strengths of a presynaptic population leads to a antidromic spike probability > 1. This is not possible yet.\"\n                    )\n\n            ### set antidromic for all presynaptic populations\n            for pop_idx, pop in enumerate(presyn_pop_list):\n                pop.antidromic = 1\n                pop.antidromic_prob = antidromic_prob_list[pop_idx]\n                pop.prob_axon_spike = self._axon_spikes_per_pulse_to_prob(\n                    axon_spikes_per_pulse\n                )\n\n    @check_types()\n    def on(\n        self,\n        population_proportion: float | None = None,\n        dbs_depolarization: float | None = None,\n        orthodromic: bool | None = None,\n        antidromic: bool | None = None,\n        efferents: bool | None = None,\n        afferents: bool | None = None,\n        passing_fibres: bool | None = None,\n        passing_fibres_strength: float | list[float] | None = None,\n        sum_branches: bool | None = None,\n        axon_spikes_per_pulse: float | None = None,\n        axon_rate_amp: float | dict[Population | str, float] | None = None,\n        seed: int | None = None,\n    ):\n        \"\"\"\n        Activate DBS.\n\n        Args:\n            population_proportion (float, optional):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly. Default: None, i.e., use value from\n                initialization\n            dbs_depolarization (float, optional):\n                Depolarization effect of the DBS pulse to local soma. Default: None,\n                i.e., use value from initialization\n            orthodromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded orthodromically.\n                Default: None, i.e., use value from initialization\n            antidromic (bool, optional):\n                If True, DBS causes axonal spikes which are forwarded antidromically,\n                only available in spiking networks. Default: None, i.e., use value from\n                initialization\n            efferents (bool, optional):\n                If True, DBS affects the efferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            afferents (bool, optional):\n                If True, DBS affects the afferents of the stimulated population\n                (orthodromic and/or antidromic have to be True too). Default: None,\n                i.e., use value from initialization\n            passing_fibres (bool, optional):\n                If True, DBS affects the passing fibres of the stimulated region defined\n                in passing_fibres_list (orthodromic and/or antidromic have to be True\n                too). Default: None, i.e., use value from initialization\n            passing_fibres_strength (float | list[float], optional):\n                Single value or list of float values between 0 and 1 defining how strong\n                the passing fibres are activated by DBS (0: not activated, 1: fully\n                activated like the projections in the DBS stimulated region).\n                Default: None, i.e., use value from initialization\n            sum_branches (bool, optional):\n                If True, the antidromic_prob of a presynaptic population (defining how\n                many axon spikes affect the pop antidromically) of passing fibres is\n                the sum of the passing_fibres_strengths of the single axon branches.\n                Default: None, i.e., use value from initialization\n            axon_spikes_per_pulse (float, optional):\n                Number of average axon spikes per DBS pulse. Default: None, i.e., use\n                value from initialization\n            axon_rate_amp (float | dict[Population | str, float], optional):\n                Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n                axons caused by DBS. You can specify this for each population\n                individually by using a dictionary (keys = Population instances)\n                axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n                of 1.5 during DBS (all other affected projections forward the default\n                value). You can specify the default value by using the key \"default\",\n                e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations\n                except pop forward a rate of 1.0 during DBS. Default: None, i.e., use\n                value from initialization\n            seed (int, optional):\n                Seed for the random number generator. Default: None, i.e., use value\n                from initialization\n        \"\"\"\n\n        ### set DBS on for all populations\n        ### also sets the proportion of affected neurons, call this before set_depolarization and set_axon_spikes!\n        self._set_dbs_on(population_proportion, seed)\n\n        ### set depolarization of population\n        self._set_depolarization(dbs_depolarization)\n\n        ### set axon spikes forwarding\n        self._set_axon_spikes(\n            orthodromic,\n            antidromic,\n            efferents,\n            afferents,\n            passing_fibres,\n            passing_fibres_strength,\n            sum_branches,\n            axon_spikes_per_pulse,\n            axon_rate_amp,\n        )\n\n    def _set_dbs_on(self, population_proportion: float | None, seed: int | None):\n        \"\"\"\n        Set DBS on for all populations, for the stimulated population only the specified\n        proportion is affected by DBS.\n\n        Args:\n            population_proportion (float, optional):\n                Proportion of the stimulated population which is affected by DBS,\n                neurons are distributed randomly. Default: None, i.e., use value from\n                initialization\n            seed (int, optional):\n                Seed for the random number generator. Default: None, i.e., use value\n                from initialization\n        \"\"\"\n        ### set parameters for the creation of the DBS on array\n        ### either use given population_proportion or use default value\n        if isinstance(population_proportion, type(None)):\n            population_proportion = self.population_proportion\n        ### either use given seed or use default value\n        if isinstance(seed, type(None)):\n            seed = self.seed\n\n        ### if seed and population_propotion are the same as in the initialization, use the same dbs_on_array\n        if seed == self.seed and population_proportion == self.population_proportion:\n            ### use the same dbs_on_array as in the initialization\n            dbs_on_array = self.dbs_on_array\n        else:\n            ### create new dbs_on_array\n            dbs_on_array = self._create_dbs_on_array(population_proportion, seed)\n\n        ### set DBS on for all populations\n        for pop in populations():\n            ### of the stimulated population only the specified proportion is affected by DBS\n            if pop == self.stimulated_population:\n                pop.dbs_on = dbs_on_array\n            else:\n                pop.dbs_on = 1\n\n    def off(self):\n        \"\"\"\n        Deactivate DBS.\n        \"\"\"\n        ### set DBS off for all populations\n        for pop in populations():\n            pop.dbs_on = 0\n            pop.prob_axon_spike = 0\n            pop.axon_rate_amp = 0\n\n        ### deactivate DBS axon transmission\n        self._deactivate_axon_DBS()\n\n    def update_pointers(self, pointer_list):\n        \"\"\"\n        Update pointers to populations and projections after recreating the model.\n\n        Args:\n            pointer_list (list):\n                List of pointers to populations and projections\n\n        Returns:\n            pointer_list_new (list):\n                List of pointers to populations and projections of the new model\n        \"\"\"\n        ### update pointers\n        pointer_list_new: list[Population | Projection] = []\n        for pointer in pointer_list:\n            compartment_name = pointer.name\n            if isinstance(pointer, Population):\n                pointer_list_new.append(get_population(compartment_name))\n            elif isinstance(pointer, Projection):\n                pointer_list_new.append(get_projection(compartment_name))\n            else:\n                raise TypeError(\n                    f\"Pointer {pointer} is neither a Population nor a Projection\"\n                )\n        return pointer_list_new\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.__init__","title":"__init__(stimulated_population, population_proportion=1.0, excluded_populations_list=[], dbs_depolarization=0.0, orthodromic=False, antidromic=False, efferents=False, afferents=False, passing_fibres=False, passing_fibres_list=[], passing_fibres_strength=1.0, sum_branches=True, dbs_pulse_frequency_Hz=130.0, dbs_pulse_width_us=300.0, axon_spikes_per_pulse=1.0, axon_rate_amp=1.0, seed=None, auto_implement=False, model=None)","text":"

Initialize DBS stimulator.

Warning

Do this before compiling the model!

Parameters:

Name Type Description Default stimulated_population Population

Population which is stimulated by DBS

required population_proportion float

Proportion of the stimulated population which is affected by DBS, neurons are distributed randomly. Default: 1.0.

1.0 excluded_populations_list list

List of populations which are excluded from DBS effects, they are not affected and their axons do not generate axon spikes. Default: [].

[] dbs_depolarization float

Depolarization effect of the DBS pulse to local soma. Default: 0.0.

0.0 orthodromic bool

If True, DBS causes axonal spikes which are forwarded orthodromically. Default: False.

False antidromic bool

If True, DBS causes axonal spikes which are forwarded antidromically, only available in spiking networks. Default: False.

False efferents bool

If True, DBS affects the efferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: False.

False afferents bool

If True, DBS affects the afferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: False.

False passing_fibres bool

If True, DBS affects the passing fibres of the stimulated region defined in passing_fibres_list (orthodromic and/or antidromic have to be True too). Default: False.

False passing_fibres_list list of Projections

List of projections which pass the DBS stimulated region and therefore are activated by DBS. Default: [], also set passing_fibres True!

[] passing_fibres_strength float or list of float

Single value or list of float values between 0 and 1 defining how strong the passing fibres are activated by DBS (0: not activated, 1: fully activated like the projections in the DBS stimulated region). Default: 1.

1.0 sum_branches bool

If True, the antidromic_prob of a presynaptic population (defining how many axon spikes affect the pop antidromically) of passing fibres is the sum of the passing_fibres_strengths of the single axon branches. Default: True.

True dbs_pulse_frequency_Hz float

Frequency of the DBS pulse. Default: 130 Hz.

130.0 dbs_pulse_width_us float

Width of the DBS pulse. Default: 300 us.

300.0 axon_spikes_per_pulse float

Number of average axon spikes per DBS pulse. Default: 1.

1.0 axon_rate_amp float or dict of float

Similar to prob_axon_spike in spiking model. Which rate is forwarded on axons caused by DBS. You can specify this for each population individually by using a dictionary (keys = Population instances) axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate of 1.5 during DBS (all other affected projections forward the default value) You can specify the default value by using the key \"default\", e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except pop forward a rate of 1.0 during DBS. Default: 1.0.

1.0 seed int

Seed for the random distribution of affected neurons based on population_proportion. Default: None.

None auto_implement bool

If True, automatically implement DBS mechanisms to the model. Only supported for Izhikevich spiking models and rate-coded models. Default: False. TODO test what happens with mixed models

False model generate_model

CompNeuroPy model which is used to automatically implement DBS mechanisms, should not be compiled!. Default: None, i.e., use all populations and projections of the current magic model

None Source code in CompNeuroPy/dbs.py
@check_types()\ndef __init__(\n    self,\n    stimulated_population: Population,\n    population_proportion: float = 1.0,\n    excluded_populations_list: list[Population] = [],\n    dbs_depolarization: float = 0.0,\n    orthodromic: bool = False,\n    antidromic: bool = False,\n    efferents: bool = False,\n    afferents: bool = False,\n    passing_fibres: bool = False,\n    passing_fibres_list: list[Projection] = [],\n    passing_fibres_strength: float | list[float] = 1.0,\n    sum_branches: bool = True,\n    dbs_pulse_frequency_Hz: float = 130.0,\n    dbs_pulse_width_us: float = 300.0,\n    axon_spikes_per_pulse: float = 1.0,\n    axon_rate_amp: float | dict[Population | str, float] = 1.0,\n    seed: int | None = None,\n    auto_implement: bool = False,\n    model: generate_model | None = None,\n) -> None:\n    \"\"\"\n    Initialize DBS stimulator.\n\n    !!! warning\n        Do this before compiling the model!\n\n    Args:\n        stimulated_population (Population):\n            Population which is stimulated by DBS\n        population_proportion (float, optional):\n            Proportion of the stimulated population which is affected by DBS,\n            neurons are distributed randomly. Default: 1.0.\n        excluded_populations_list (list, optional):\n            List of populations which are excluded from DBS effects, they are not\n            affected and their axons do not generate axon spikes. Default: [].\n        dbs_depolarization (float, optional):\n            Depolarization effect of the DBS pulse to local soma. Default: 0.0.\n        orthodromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded orthodromically.\n            Default: False.\n        antidromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded antidromically,\n            only available in spiking networks. Default: False.\n        efferents (bool, optional):\n            If True, DBS affects the efferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: False.\n        afferents (bool, optional):\n            If True, DBS affects the afferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: False.\n        passing_fibres (bool, optional):\n            If True, DBS affects the passing fibres of the stimulated region defined\n            in passing_fibres_list (orthodromic and/or antidromic have to be True\n            too). Default: False.\n        passing_fibres_list (list of Projections, optional):\n            List of projections which pass the DBS stimulated region and therefore\n            are activated by DBS. Default: [], also set passing_fibres True!\n        passing_fibres_strength (float or list of float, optional):\n            Single value or list of float values between 0 and 1 defining how strong\n            the passing fibres are activated by DBS (0: not activated, 1: fully\n            activated like the projections in the DBS stimulated region).\n            Default: 1.\n        sum_branches (bool, optional):\n            If True, the antidromic_prob of a presynaptic population (defining how\n            many axon spikes affect the pop antidromically) of passing fibres is\n            the sum of the passing_fibres_strengths of the single axon branches.\n            Default: True.\n        dbs_pulse_frequency_Hz (float, optional):\n            Frequency of the DBS pulse. Default: 130 Hz.\n        dbs_pulse_width_us (float, optional):\n            Width of the DBS pulse. Default: 300 us.\n        axon_spikes_per_pulse (float, optional):\n            Number of average axon spikes per DBS pulse. Default: 1.\n        axon_rate_amp (float or dict of float, optional):\n            Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n            axons caused by DBS. You can specify this for each population\n            individually by using a dictionary (keys = Population instances)\n            axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n            of 1.5 during DBS (all other affected projections forward the default\n            value)\n            You can specify the default value by using the key \"default\", e.g.\n            {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except\n            pop forward a rate of 1.0 during DBS. Default: 1.0.\n        seed (int, optional):\n            Seed for the random distribution of affected neurons based on\n            population_proportion. Default: None.\n        auto_implement (bool, optional):\n            If True, automatically implement DBS mechanisms to the model. Only\n            supported for Izhikevich spiking models and rate-coded models.\n            Default: False.\n            TODO test what happens with mixed models\n        model (generate_model, optional):\n            CompNeuroPy model which is used to automatically implement DBS\n            mechanisms, should not be compiled!. Default: None, i.e., use all\n            populations and projections of the current magic model\n    \"\"\"\n\n    if auto_implement:\n        ### recreate model with DBS mechanisms\n        ### give all variables containing Populations and Projections\n        ### and also recreate them during recreating the model\n        ### variables are:\n        ### - stimulated_population\n        ### - excluded_populations_list\n        ### - passing_fibres_list\n        ### - axon_rate_amp\n        if not isinstance(model, type(None)):\n            ### CompNeuroPy model given\n            ### recreate model with DBS mechanisms\n            create_dbs_model_obj = _CreateDBSmodelcnp(\n                model,\n                stimulated_population,\n                excluded_populations_list,\n                passing_fibres_list,\n                axon_rate_amp,\n            )\n            ### get the new CompNeuroPy model\n            model = create_dbs_model_obj.model\n        else:\n            ### no CompNeuroPy model given --> use all populations and projections of the current magic model\n            ### recreate model with DBS mechanisms\n            create_dbs_model_obj = _CreateDBSmodel(\n                stimulated_population,\n                excluded_populations_list,\n                passing_fibres_list,\n                axon_rate_amp,\n            )\n        ### get the new variables containing Populations and Projections\n        stimulated_population = create_dbs_model_obj.stimulated_population\n        excluded_populations_list = create_dbs_model_obj.excluded_populations_list\n        passing_fibres_list = create_dbs_model_obj.passing_fibres_list\n        axon_rate_amp = create_dbs_model_obj.axon_rate_amp\n\n    ### set parameters\n    self.stimulated_population = stimulated_population\n    self.population_proportion = population_proportion\n    self.excluded_populations_list = excluded_populations_list\n    self.dbs_depolarization = dbs_depolarization\n    self.orthodromic = orthodromic\n    self.antidromic = antidromic\n    self.efferents = efferents\n    self.afferents = afferents\n    self.passing_fibres = passing_fibres\n    self.passing_fibres_list = passing_fibres_list\n    self.passing_fibres_strength = passing_fibres_strength\n    self.sum_branches = sum_branches\n    self.dbs_pulse_width_us = dbs_pulse_width_us\n    self.axon_spikes_per_pulse = axon_spikes_per_pulse\n    self.axon_rate_amp = axon_rate_amp\n    self.seed = seed\n    self.model = model\n\n    ### ANNarchy constants for DBS\n    self._set_constants(dbs_pulse_frequency_Hz)\n\n    ### randomly select affected neurons i.e. create dbs_on_array\n    self.dbs_on_array = self._create_dbs_on_array(population_proportion, seed)\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.on","title":"on(population_proportion=None, dbs_depolarization=None, orthodromic=None, antidromic=None, efferents=None, afferents=None, passing_fibres=None, passing_fibres_strength=None, sum_branches=None, axon_spikes_per_pulse=None, axon_rate_amp=None, seed=None)","text":"

Activate DBS.

Parameters:

Name Type Description Default population_proportion float

Proportion of the stimulated population which is affected by DBS, neurons are distributed randomly. Default: None, i.e., use value from initialization

None dbs_depolarization float

Depolarization effect of the DBS pulse to local soma. Default: None, i.e., use value from initialization

None orthodromic bool

If True, DBS causes axonal spikes which are forwarded orthodromically. Default: None, i.e., use value from initialization

None antidromic bool

If True, DBS causes axonal spikes which are forwarded antidromically, only available in spiking networks. Default: None, i.e., use value from initialization

None efferents bool

If True, DBS affects the efferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: None, i.e., use value from initialization

None afferents bool

If True, DBS affects the afferents of the stimulated population (orthodromic and/or antidromic have to be True too). Default: None, i.e., use value from initialization

None passing_fibres bool

If True, DBS affects the passing fibres of the stimulated region defined in passing_fibres_list (orthodromic and/or antidromic have to be True too). Default: None, i.e., use value from initialization

None passing_fibres_strength float | list[float]

Single value or list of float values between 0 and 1 defining how strong the passing fibres are activated by DBS (0: not activated, 1: fully activated like the projections in the DBS stimulated region). Default: None, i.e., use value from initialization

None sum_branches bool

If True, the antidromic_prob of a presynaptic population (defining how many axon spikes affect the pop antidromically) of passing fibres is the sum of the passing_fibres_strengths of the single axon branches. Default: None, i.e., use value from initialization

None axon_spikes_per_pulse float

Number of average axon spikes per DBS pulse. Default: None, i.e., use value from initialization

None axon_rate_amp float | dict[Population | str, float]

Similar to prob_axon_spike in spiking model. Which rate is forwarded on axons caused by DBS. You can specify this for each population individually by using a dictionary (keys = Population instances) axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate of 1.5 during DBS (all other affected projections forward the default value). You can specify the default value by using the key \"default\", e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations except pop forward a rate of 1.0 during DBS. Default: None, i.e., use value from initialization

None seed int

Seed for the random number generator. Default: None, i.e., use value from initialization

None Source code in CompNeuroPy/dbs.py
@check_types()\ndef on(\n    self,\n    population_proportion: float | None = None,\n    dbs_depolarization: float | None = None,\n    orthodromic: bool | None = None,\n    antidromic: bool | None = None,\n    efferents: bool | None = None,\n    afferents: bool | None = None,\n    passing_fibres: bool | None = None,\n    passing_fibres_strength: float | list[float] | None = None,\n    sum_branches: bool | None = None,\n    axon_spikes_per_pulse: float | None = None,\n    axon_rate_amp: float | dict[Population | str, float] | None = None,\n    seed: int | None = None,\n):\n    \"\"\"\n    Activate DBS.\n\n    Args:\n        population_proportion (float, optional):\n            Proportion of the stimulated population which is affected by DBS,\n            neurons are distributed randomly. Default: None, i.e., use value from\n            initialization\n        dbs_depolarization (float, optional):\n            Depolarization effect of the DBS pulse to local soma. Default: None,\n            i.e., use value from initialization\n        orthodromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded orthodromically.\n            Default: None, i.e., use value from initialization\n        antidromic (bool, optional):\n            If True, DBS causes axonal spikes which are forwarded antidromically,\n            only available in spiking networks. Default: None, i.e., use value from\n            initialization\n        efferents (bool, optional):\n            If True, DBS affects the efferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: None,\n            i.e., use value from initialization\n        afferents (bool, optional):\n            If True, DBS affects the afferents of the stimulated population\n            (orthodromic and/or antidromic have to be True too). Default: None,\n            i.e., use value from initialization\n        passing_fibres (bool, optional):\n            If True, DBS affects the passing fibres of the stimulated region defined\n            in passing_fibres_list (orthodromic and/or antidromic have to be True\n            too). Default: None, i.e., use value from initialization\n        passing_fibres_strength (float | list[float], optional):\n            Single value or list of float values between 0 and 1 defining how strong\n            the passing fibres are activated by DBS (0: not activated, 1: fully\n            activated like the projections in the DBS stimulated region).\n            Default: None, i.e., use value from initialization\n        sum_branches (bool, optional):\n            If True, the antidromic_prob of a presynaptic population (defining how\n            many axon spikes affect the pop antidromically) of passing fibres is\n            the sum of the passing_fibres_strengths of the single axon branches.\n            Default: None, i.e., use value from initialization\n        axon_spikes_per_pulse (float, optional):\n            Number of average axon spikes per DBS pulse. Default: None, i.e., use\n            value from initialization\n        axon_rate_amp (float | dict[Population | str, float], optional):\n            Similar to prob_axon_spike in spiking model. Which rate is forwarded on\n            axons caused by DBS. You can specify this for each population\n            individually by using a dictionary (keys = Population instances)\n            axon_rate_amp = {pop: 1.5} --> the efferent axons of pop forward a rate\n            of 1.5 during DBS (all other affected projections forward the default\n            value). You can specify the default value by using the key \"default\",\n            e.g. {pop: 1.5, \"default\": 1.0} -> efferent axons of all populations\n            except pop forward a rate of 1.0 during DBS. Default: None, i.e., use\n            value from initialization\n        seed (int, optional):\n            Seed for the random number generator. Default: None, i.e., use value\n            from initialization\n    \"\"\"\n\n    ### set DBS on for all populations\n    ### also sets the proportion of affected neurons, call this before set_depolarization and set_axon_spikes!\n    self._set_dbs_on(population_proportion, seed)\n\n    ### set depolarization of population\n    self._set_depolarization(dbs_depolarization)\n\n    ### set axon spikes forwarding\n    self._set_axon_spikes(\n        orthodromic,\n        antidromic,\n        efferents,\n        afferents,\n        passing_fibres,\n        passing_fibres_strength,\n        sum_branches,\n        axon_spikes_per_pulse,\n        axon_rate_amp,\n    )\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.off","title":"off()","text":"

Deactivate DBS.

Source code in CompNeuroPy/dbs.py
def off(self):\n    \"\"\"\n    Deactivate DBS.\n    \"\"\"\n    ### set DBS off for all populations\n    for pop in populations():\n        pop.dbs_on = 0\n        pop.prob_axon_spike = 0\n        pop.axon_rate_amp = 0\n\n    ### deactivate DBS axon transmission\n    self._deactivate_axon_DBS()\n
"},{"location":"main/dbs_stimulator/#CompNeuroPy.dbs.DBSstimulator.update_pointers","title":"update_pointers(pointer_list)","text":"

Update pointers to populations and projections after recreating the model.

Parameters:

Name Type Description Default pointer_list list

List of pointers to populations and projections

required

Returns:

Name Type Description pointer_list_new list

List of pointers to populations and projections of the new model

Source code in CompNeuroPy/dbs.py
def update_pointers(self, pointer_list):\n    \"\"\"\n    Update pointers to populations and projections after recreating the model.\n\n    Args:\n        pointer_list (list):\n            List of pointers to populations and projections\n\n    Returns:\n        pointer_list_new (list):\n            List of pointers to populations and projections of the new model\n    \"\"\"\n    ### update pointers\n    pointer_list_new: list[Population | Projection] = []\n    for pointer in pointer_list:\n        compartment_name = pointer.name\n        if isinstance(pointer, Population):\n            pointer_list_new.append(get_population(compartment_name))\n        elif isinstance(pointer, Projection):\n            pointer_list_new.append(get_projection(compartment_name))\n        else:\n            raise TypeError(\n                f\"Pointer {pointer} is neither a Population nor a Projection\"\n            )\n    return pointer_list_new\n
"},{"location":"main/define_experiment/","title":"Define Experiments","text":""},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp","title":"CompNeuroPy.experiment.CompNeuroExp","text":"

Experiment combining simulations and recordings.

Use this class as a parent class for your experiment. You have to additionally implement a run function which runs the simulations and controlls the recordings. The run function should return the results of the experiment by calling the results function of the CompNeuroExp class.

Attributes:

Name Type Description monitors CompNeuroMonitors

CompNeuroMonitors object for recordings

data dict

dict for storing optional data

Examples:

from CompNeuroPy import CompNeuroExp\nfrom ANNarchy import simulate\n\nclass MyExperiment(CompNeuroExp):\n    def run(self):\n        # run simulations and control recordings\n        self.monitors.start()\n        simulate(1000)\n        self.reset()\n        simulate(1000)\n        # store optional data\n        self.data[\"duration\"] = 2000\n        # return results\n        return self.results()\n
Source code in CompNeuroPy/experiment.py
class CompNeuroExp:\n    \"\"\"\n    Experiment combining simulations and recordings.\n\n    Use this class as a parent class for your experiment. You have to additionally\n    implement a run function which runs the simulations and controlls the recordings.\n    The run function should return the results of the experiment by calling the results\n    function of the CompNeuroExp class.\n\n    Attributes:\n        monitors (CompNeuroMonitors):\n            CompNeuroMonitors object for recordings\n        data (dict):\n            dict for storing optional data\n\n    Examples:\n        ```python\n        from CompNeuroPy import CompNeuroExp\n        from ANNarchy import simulate\n\n        class MyExperiment(CompNeuroExp):\n            def run(self):\n                # run simulations and control recordings\n                self.monitors.start()\n                simulate(1000)\n                self.reset()\n                simulate(1000)\n                # store optional data\n                self.data[\"duration\"] = 2000\n                # return results\n                return self.results()\n        ```\n    \"\"\"\n\n    def __init__(\n        self,\n        monitors: CompNeuroMonitors | None = None,\n    ):\n        \"\"\"\n        Initialize the experiment.\n\n        Args:\n            monitors (CompNeuroMonitors):\n                CompNeuroMonitors object for recordings\n        \"\"\"\n        self.recordings = {}  # save dict for monitor recordings\n        self.monitors = monitors\n        self.data = {}  # dict for optional data\n\n    def reset(\n        self,\n        populations=True,\n        projections=False,\n        synapses=False,\n        model=True,\n        parameters=True,\n    ):\n        \"\"\"\n        Reset the ANNarchy model and monitors and the CompNeuroMonitors used for the\n        experiment.\n\n        !!! warning\n            If you want the network to have the same state at the beginning of each\n            experiment run, you should call this function at the beginning of the run\n            function of the CompNeuroExp class! If you only want to have the same time\n            for the network at the beginning of each experiment run, set populations,\n            projections, and synapses to False.\n\n        Args:\n            populations (bool, optional):\n                reset populations. Defaults to True.\n            projections (bool, optional):\n                reset projections. Defaults to False.\n            synapses (bool, optional):\n                reset synapses. Defaults to False.\n            model (bool, optional):\n                If False, do ignore the arguments populations, projections, and\n                synapses (the network state doesn't change) and only reset the\n                CompNeuroMonitors Default: True.\n            parameters (bool, optional):\n                If False, do not reset the parameters of the model. Default: True.\n        \"\"\"\n        reset_kwargs = {}\n        reset_kwargs[\"populations\"] = populations\n        reset_kwargs[\"projections\"] = projections\n        reset_kwargs[\"synapses\"] = synapses\n        reset_kwargs[\"monitors\"] = True\n\n        ### reset CompNeuroMonitors and ANNarchy model\n        if self.monitors is not None:\n            self.monitors.reset(model=model, parameters=parameters, **reset_kwargs)\n        elif model is True:\n            if parameters is False:\n                ### if parameters=False, get parameters before reset and set them after\n                ### reset\n                parameters = mf._get_all_parameters()\n            reset(**reset_kwargs)\n            if parameters is False:\n                ### if parameters=False, set parameters after reset\n                mf._set_all_parameters(parameters)\n\n    def results(self):\n        \"\"\"\n        !!! warning\n            Call this function at the end of the run function of the CompNeuroExp class!\n\n        !!! warning\n            Calling this function resets the CompNeuroMonitors. For example, if you\n            simulate two recording chunks in the run function and you run the experiment\n            twice, you will get two recording chunks for each experiment run (not two\n            for the first and four for the second run). But ANNarchy is not resetted\n            automatically! So the network time and state (activity etc.) at the\n            beginning of the second run is the same as at the end of the first run. To\n            prevent this use the reset function of the CompNeuroExp class.\n\n        Returns:\n            results_obj (CompNeuroExp._ResultsCl):\n                Object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        obj = self._ResultsCl()\n        if self.monitors is not None:\n            (\n                obj.recordings,\n                obj.recording_times,\n            ) = self.monitors.get_recordings_and_clear()\n            obj.mon_dict = self.monitors.mon_dict\n        else:\n            obj.recordings = []\n            obj.recording_times = None\n            obj.mon_dict = {}\n        obj.data = self.data\n\n        return obj\n\n    class _ResultsCl:\n        \"\"\"\n        Class for storing the results of the experiment.\n\n        Attributes:\n            recordings (list):\n                list of recordings\n            recording_times (recording_times_cl):\n                recording times object\n            mon_dict (dict):\n                dict of recorded variables of the monitors\n            data (dict):\n                dict with optional data stored during the experiment\n        \"\"\"\n\n        def __init__(self) -> None:\n            self.recordings: list\n            self.recording_times: RecordingTimes\n            self.mon_dict: dict\n            self.data: dict\n\n    def run(self) -> _ResultsCl:\n        \"\"\"\n        !!! warning\n            This function has to be implemented by the user!\n        \"\"\"\n        raise NotImplementedError(\n            \"\"\"\n                You have to implement a run function which runs the simulations and\n                controlls the recordings. The run function should return the results of\n                the experiment by calling the results function of the CompNeuroExp class.\n            \"\"\"\n        )\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.__init__","title":"__init__(monitors=None)","text":"

Initialize the experiment.

Parameters:

Name Type Description Default monitors CompNeuroMonitors

CompNeuroMonitors object for recordings

None Source code in CompNeuroPy/experiment.py
def __init__(\n    self,\n    monitors: CompNeuroMonitors | None = None,\n):\n    \"\"\"\n    Initialize the experiment.\n\n    Args:\n        monitors (CompNeuroMonitors):\n            CompNeuroMonitors object for recordings\n    \"\"\"\n    self.recordings = {}  # save dict for monitor recordings\n    self.monitors = monitors\n    self.data = {}  # dict for optional data\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.reset","title":"reset(populations=True, projections=False, synapses=False, model=True, parameters=True)","text":"

Reset the ANNarchy model and monitors and the CompNeuroMonitors used for the experiment.

Warning

If you want the network to have the same state at the beginning of each experiment run, you should call this function at the beginning of the run function of the CompNeuroExp class! If you only want to have the same time for the network at the beginning of each experiment run, set populations, projections, and synapses to False.

Parameters:

Name Type Description Default populations bool

reset populations. Defaults to True.

True projections bool

reset projections. Defaults to False.

False synapses bool

reset synapses. Defaults to False.

False model bool

If False, do ignore the arguments populations, projections, and synapses (the network state doesn't change) and only reset the CompNeuroMonitors Default: True.

True parameters bool

If False, do not reset the parameters of the model. Default: True.

True Source code in CompNeuroPy/experiment.py
def reset(\n    self,\n    populations=True,\n    projections=False,\n    synapses=False,\n    model=True,\n    parameters=True,\n):\n    \"\"\"\n    Reset the ANNarchy model and monitors and the CompNeuroMonitors used for the\n    experiment.\n\n    !!! warning\n        If you want the network to have the same state at the beginning of each\n        experiment run, you should call this function at the beginning of the run\n        function of the CompNeuroExp class! If you only want to have the same time\n        for the network at the beginning of each experiment run, set populations,\n        projections, and synapses to False.\n\n    Args:\n        populations (bool, optional):\n            reset populations. Defaults to True.\n        projections (bool, optional):\n            reset projections. Defaults to False.\n        synapses (bool, optional):\n            reset synapses. Defaults to False.\n        model (bool, optional):\n            If False, do ignore the arguments populations, projections, and\n            synapses (the network state doesn't change) and only reset the\n            CompNeuroMonitors Default: True.\n        parameters (bool, optional):\n            If False, do not reset the parameters of the model. Default: True.\n    \"\"\"\n    reset_kwargs = {}\n    reset_kwargs[\"populations\"] = populations\n    reset_kwargs[\"projections\"] = projections\n    reset_kwargs[\"synapses\"] = synapses\n    reset_kwargs[\"monitors\"] = True\n\n    ### reset CompNeuroMonitors and ANNarchy model\n    if self.monitors is not None:\n        self.monitors.reset(model=model, parameters=parameters, **reset_kwargs)\n    elif model is True:\n        if parameters is False:\n            ### if parameters=False, get parameters before reset and set them after\n            ### reset\n            parameters = mf._get_all_parameters()\n        reset(**reset_kwargs)\n        if parameters is False:\n            ### if parameters=False, set parameters after reset\n            mf._set_all_parameters(parameters)\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.results","title":"results()","text":"

Warning

Call this function at the end of the run function of the CompNeuroExp class!

Warning

Calling this function resets the CompNeuroMonitors. For example, if you simulate two recording chunks in the run function and you run the experiment twice, you will get two recording chunks for each experiment run (not two for the first and four for the second run). But ANNarchy is not resetted automatically! So the network time and state (activity etc.) at the beginning of the second run is the same as at the end of the first run. To prevent this use the reset function of the CompNeuroExp class.

Returns:

Name Type Description results_obj _ResultsCl

Object with attributes: recordings (list): list of recordings recording_times (recording_times_cl): recording times object mon_dict (dict): dict of recorded variables of the monitors data (dict): dict with optional data stored during the experiment

Source code in CompNeuroPy/experiment.py
def results(self):\n    \"\"\"\n    !!! warning\n        Call this function at the end of the run function of the CompNeuroExp class!\n\n    !!! warning\n        Calling this function resets the CompNeuroMonitors. For example, if you\n        simulate two recording chunks in the run function and you run the experiment\n        twice, you will get two recording chunks for each experiment run (not two\n        for the first and four for the second run). But ANNarchy is not resetted\n        automatically! So the network time and state (activity etc.) at the\n        beginning of the second run is the same as at the end of the first run. To\n        prevent this use the reset function of the CompNeuroExp class.\n\n    Returns:\n        results_obj (CompNeuroExp._ResultsCl):\n            Object with attributes:\n                recordings (list):\n                    list of recordings\n                recording_times (recording_times_cl):\n                    recording times object\n                mon_dict (dict):\n                    dict of recorded variables of the monitors\n                data (dict):\n                    dict with optional data stored during the experiment\n    \"\"\"\n    obj = self._ResultsCl()\n    if self.monitors is not None:\n        (\n            obj.recordings,\n            obj.recording_times,\n        ) = self.monitors.get_recordings_and_clear()\n        obj.mon_dict = self.monitors.mon_dict\n    else:\n        obj.recordings = []\n        obj.recording_times = None\n        obj.mon_dict = {}\n    obj.data = self.data\n\n    return obj\n
"},{"location":"main/define_experiment/#CompNeuroPy.experiment.CompNeuroExp.run","title":"run()","text":"

Warning

This function has to be implemented by the user!

Source code in CompNeuroPy/experiment.py
def run(self) -> _ResultsCl:\n    \"\"\"\n    !!! warning\n        This function has to be implemented by the user!\n    \"\"\"\n    raise NotImplementedError(\n        \"\"\"\n            You have to implement a run function which runs the simulations and\n            controlls the recordings. The run function should return the results of\n            the experiment by calling the results function of the CompNeuroExp class.\n        \"\"\"\n    )\n
"},{"location":"main/generate_models/","title":"Generate Models","text":""},{"location":"main/generate_models/#introduction","title":"Introduction","text":"

One can create a CompNeuroPy-model using the CompNeuroModel class. The CompNeuroModel class takes as one argument the model_creation_function. In this function a classical ANNarchy model is created (populations, projections). The CompNeuroModel class only adds a framework to the model. Neccessary for a CompNeuroPy-model is to define unique names for all populations and projections. Models are created in three steps:

  1. model initialization: the initialization of the CompNeuroModel object, initializes the framework of the model without creating the ANNarchy objects (populations, projections)
  2. model creation: create the ANNarchy objects (populations, projections), i.e., run the model_creation function
  3. model compilation: compile all created models
"},{"location":"main/generate_models/#example","title":"Example","text":"
from CompNeuroPy import CompNeuroModel\nmy_model = CompNeuroModel(model_creation_function=create_model,  ### the most important part, this function creates the model (populations, projections)\n                          model_kwargs={'a':1, 'b':2},           ### define the two arguments a and b of function create_model\n                          name='my_model',                       ### you can give the model a name\n                          description='my simple example model', ### you can give the model a description\n                          do_create=True,                        ### create the model directly\n                          do_compile=True,                       ### let the model (and all models created before) compile directly\n                          compile_folder_name='my_model')        ### name of the saved compilation folder\n

The following function could be the corresponding model_creation_function:

from ANNarchy import Population, Izhikevich\ndef create_model(a, b):\n    pop = Population(geometry=a, neuron=Izhikevich, name='Izh_pop_a') ### first population, size a\n    pop.b = 0                                                         ### some parameter adjustment\n    Population(geometry=b, neuron=Izhikevich, name='Izh_pop_b')       ### second population, size b\n

Here, two populations are created (both use built-in Izhikevich neuron model of ANNarchy). The function does not require a return value. It is important that all populations and projections have unique names.

A more detailed example is available in the Examples.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel","title":"CompNeuroPy.generate_model.CompNeuroModel","text":"

Class for creating and compiling a model.

Attributes:

Name Type Description name str

name of the model

description str

description of the model

model_creation_function function

function which creates the model

compile_folder_name str

name of the folder in which the model is compiled

model_kwargs dict

keyword arguments for model_creation_function

populations list

list of names of all populations of the model

projections list

list of names of all projections of the model

created bool

True if the model is created

compiled bool

True if the model is compiled

attribute_df pandas dataframe

dataframe containing all attributes of the model compartments

Source code in CompNeuroPy/generate_model.py
class CompNeuroModel:\n    \"\"\"\n    Class for creating and compiling a model.\n\n    Attributes:\n        name (str):\n            name of the model\n        description (str):\n            description of the model\n        model_creation_function (function):\n            function which creates the model\n        compile_folder_name (str):\n            name of the folder in which the model is compiled\n        model_kwargs (dict):\n            keyword arguments for model_creation_function\n        populations (list):\n            list of names of all populations of the model\n        projections (list):\n            list of names of all projections of the model\n        created (bool):\n            True if the model is created\n        compiled (bool):\n            True if the model is compiled\n        attribute_df (pandas dataframe):\n            dataframe containing all attributes of the model compartments\n    \"\"\"\n\n    _initialized_models = {}\n    _compiled_models = {}\n    _compiled_models_updated = False\n\n    @check_types()\n    def __init__(\n        self,\n        model_creation_function: Callable,\n        model_kwargs: dict | None = None,\n        name: str = \"model\",\n        description: str = \"\",\n        do_create: bool = True,\n        do_compile: bool = True,\n        compile_folder_name: str = \"annarchy\",\n    ):\n        \"\"\"\n        Initializes the CompNeuroModel class.\n\n        Args:\n            model_creation_function (function):\n                Function which creates the model.\n            model_kwargs (dict):\n                Keyword arguments for model_creation_function. Default: None.\n            name (str):\n                Name of the model. Default: \"model\".\n            description (str):\n                Description of the model. Default: \"\".\n            do_create (bool):\n                If True the model is created directly. Default: True.\n            do_compile (bool):\n                If True the model is compiled directly. Default: True.\n            compile_folder_name (str):\n                Name of the folder in which the model is compiled. Default: \"annarchy\".\n        \"\"\"\n        self.name = name\n        if name == \"model\":\n            self.name = name + str(self._nr_models())\n        self.description = description\n        self.model_creation_function = model_creation_function\n        self.compile_folder_name = compile_folder_name\n        self.model_kwargs = model_kwargs\n        self.populations = []\n        self.projections = []\n        self.created = False\n        self.compiled = False\n        self._attribute_df = None\n        self._attribute_df_compiled = False\n        if do_create:\n            self.create(do_compile=do_compile, compile_folder_name=compile_folder_name)\n\n    @property\n    def compiled(self):\n        \"\"\"\n        True if the model is compiled.\n        \"\"\"\n        ### check if ANNarchy was compiled and _compiled_models is not updated yet\n        if mf.annarchy_compiled() and not self._compiled_models_updated:\n            self._update_compiled_models()\n        return self._compiled_models[self.name]\n\n    @compiled.setter\n    def compiled(self, value):\n        \"\"\"\n        Setter for compiled property.\n        \"\"\"\n        self._compiled_models[self.name] = value\n\n    @property\n    def created(self):\n        \"\"\"\n        True if the model is created.\n        \"\"\"\n        return self._initialized_models[self.name]\n\n    @created.setter\n    def created(self, value):\n        \"\"\"\n        Setter for created property.\n        \"\"\"\n        self._initialized_models[self.name] = value\n\n    @property\n    def attribute_df(self):\n        \"\"\"\n        Dataframe containing all attributes of the model compartments.\n        \"\"\"\n        ### check if ANNarchy was compiled and _attribute_df is not updated yet\n        if mf.annarchy_compiled() and not self._attribute_df_compiled:\n            self._update_attribute_df_weights()\n        return self._attribute_df\n\n    def _update_compiled_models(self):\n        \"\"\"\n        Updates _compiled_models to True for all models.\n        \"\"\"\n        ### update _compiled_models\n        for key in self._compiled_models.keys():\n            self._compiled_models[key] = True\n        self._compiled_models_updated = True\n\n    def _update_attribute_df_weights(self):\n        \"\"\"\n        Updates _attribute_df for the weights of all projections.\n        \"\"\"\n        for proj_name in self.projections:\n            values = get_projection(proj_name).w\n            self._update_attribute_df(\n                compartment=proj_name, parameter_name=\"w\", parameter_value=values\n            )\n        self._attribute_df_compiled = True\n\n    def compile(self, compile_folder_name=None):\n        \"\"\"\n        Compiles a created model.\n\n        Args:\n            compile_folder_name (str, optional):\n                Name of the folder in which the model is compiled. Default: value from\n                initialization.\n        \"\"\"\n        ### check if this model is created\n        if self.created:\n            if compile_folder_name == None:\n                compile_folder_name = self.compile_folder_name\n\n            ### check if other models were initialized but not created --> warn that they are not compiled\n            not_created_model_list = self._check_if_models_created()\n            if len(not_created_model_list) > 0:\n                print(\n                    \"\\nWARNING during compile of model \"\n                    + self.name\n                    + \": There are initialized models which are not created, thus not compiled! models:\\n\"\n                    + \"\\n\".join(not_created_model_list)\n                    + \"\\n\"\n                )\n            mf.compile_in_folder(compile_folder_name)\n            self.compiled = True\n\n            ### update attribute_df to compiled state, since weights are only available\n            ### after compilation\n            self._update_attribute_df_weights()\n        else:\n            print(\"\\n\")\n            assert False, (\n                \"ERROR during compile of model \"\n                + self.name\n                + \": Only compile the model after it has been created!\"\n            )\n\n    def create(self, do_compile=True, compile_folder_name=None):\n        \"\"\"\n        Creates a model and optionally compiles it directly.\n\n        Args:\n            do_compile (bool, optional):\n                If True the model is compiled directly. Default: True.\n            compile_folder_name (str, optional):\n                Name of the folder in which the model is compiled. Default: value from\n                initialization.\n        \"\"\"\n        if self.created:\n            print(\"model\", self.name, \"already created!\")\n        else:\n            initial_existing_model = mf.get_full_model()\n            ### create model populations and projections\n            if self.model_kwargs != None:\n                self.model_creation_function(**self.model_kwargs)\n            else:\n                self.model_creation_function()\n            self.created = True\n\n            ### check which populations and projections have been added\n            post_existing_model = mf.get_full_model()\n            ### save only added not all projections/populations\n            for initial_pop in initial_existing_model[\"populations\"]:\n                post_existing_model[\"populations\"].remove(initial_pop)\n            for initial_proj in initial_existing_model[\"projections\"]:\n                post_existing_model[\"projections\"].remove(initial_proj)\n            self.populations = post_existing_model[\"populations\"]\n            self.projections = post_existing_model[\"projections\"]\n\n            ### check if names of populations and projections are unique\n            self._check_double_compartments()\n\n            ### create parameter dictionary\n            self._attribute_df = self._get_attribute_df()\n\n            if do_compile:\n                self.compile(compile_folder_name)\n\n    def _check_if_models_created(self):\n        \"\"\"\n        Checks which CompNeuroPy models are created\n\n        Returns:\n            not_created_model_list (list):\n                list of names of all initialized CompNeuroPy models which are not\n                created yet\n        \"\"\"\n        not_created_model_list = []\n        for key in self._initialized_models.keys():\n            if self._initialized_models[key] == False:\n                not_created_model_list.append(key)\n\n        return not_created_model_list\n\n    def _nr_models(self):\n        \"\"\"\n        Returns:\n            nr_models (int):\n                The current number of initialized (not considering \"created\")\n                CompNeuroPy models\n        \"\"\"\n        return len(list(self._initialized_models.keys()))\n\n    def set_param(self, compartment, parameter_name, parameter_value):\n        \"\"\"\n        Sets the specified parameter of the specified compartment.\n\n        Args:\n            compartment (str):\n                name of model compartment\n            parameter_name (str):\n                name of parameter of the compartment\n            parameter_value (number or array-like with shape of compartment geometry):\n                the value or values of the parameter\n\n        Raises:\n            AssertionError: if model is not created\n            AssertionError: if compartment is neither a population nor a projection of\n                the model\n        \"\"\"\n        ### catch if model is not created\n        assert (\n            self.created == True\n        ), f\"ERROR set_param: model {self.name} has to be created before setting parameters!\"\n\n        ### check if compartment is in populations or projections\n        comp_in_pop = compartment in self.populations\n        comp_in_proj = compartment in self.projections\n\n        if comp_in_pop:\n            comp_obj = get_population(compartment)\n        elif comp_in_proj:\n            comp_obj = get_projection(compartment)\n        else:\n            assert (\n                comp_in_pop or comp_in_proj\n            ), f\"ERROR set_param: setting parameter {parameter_name} of compartment {compartment}. The compartment is neither a population nor a projection of the model {self.name}!\"\n\n        ### set the parameter value\n        setattr(comp_obj, parameter_name, parameter_value)\n\n        ### update the model attribute_df\n        self._update_attribute_df(compartment, parameter_name, parameter_value)\n\n    def _update_attribute_df(self, compartment, parameter_name, parameter_value):\n        \"\"\"\n        updates the attribute df for a specific paramter\n\n        Args:\n            compartment (str):\n                name of model compartment\n            parameter_name (str):\n                name of parameter of the compartment\n            parameter_value (number or array-like with shape of compartment geometry):\n                the value or values of the parameter\n        \"\"\"\n        paramter_mask = (\n            (self._attribute_df[\"compartment_name\"] == compartment).astype(int)\n            * (self._attribute_df[\"attribute_name\"] == parameter_name).astype(int)\n        ).astype(bool)\n        parameter_idx = np.arange(paramter_mask.size).astype(int)[paramter_mask][0]\n        min_val = af.get_minimum(parameter_value)\n        max_val = af.get_maximum(parameter_value)\n        if min_val != max_val:\n            self._attribute_df.at[parameter_idx, \"value\"] = f\"[{min_val}, {max_val}]\"\n        else:\n            self._attribute_df.at[parameter_idx, \"value\"] = str(min_val)\n        self._attribute_df.at[parameter_idx, \"definition\"] = \"modified\"\n\n    def _check_double_compartments(self):\n        \"\"\"\n        Goes over all compartments of the model and checks if compartment is only a\n        population or a projection and not both.\n\n        Raises:\n            AssertionError: if model is not created\n            AssertionError: if compartment is both a population and a projection\n        \"\"\"\n        ### cach if model is not created, only if created populations and projections are available\n        assert (\n            self.created == True\n        ), f\"ERROR model {self.name}: model has to be created before checking for double compartments!\"\n        ### only have to go over populations and check if they are also projections (go over projections not neccessary)\n        pop_in_projections_list = []\n        pop_in_projections = False\n        for pop_name in self.populations:\n            if pop_name in self.projections:\n                pop_in_projections_list.append(pop_name)\n                pop_in_projections = True\n\n        assert (\n            pop_in_projections == False\n        ), f\"ERROR model {self.name}: One or multiple compartments are both population and projection ({pop_in_projections_list}). Rename them!\"\n\n    def _get_attribute_df(self):\n        \"\"\"\n        Creates a dataframe containing the attributes of all model compartments.\n\n        Returns:\n            attribute_df (pandas dataframe):\n                dataframe containing all attributes of the model compartments\n\n        Raises:\n            AssertionError: if model is not created\n        \"\"\"\n        ### cach if model is not created, only if created populations and projections are available\n        assert (\n            self.created == True\n        ), f\"ERROR model {self.name}: model has to be created before creating paramteer dictionary!\"\n\n        ### create empty paramteter dict\n        attribute_dict = {\n            \"compartment_type\": [],\n            \"compartment_name\": [],\n            \"attribute_name\": [],\n            \"value\": [],\n            \"definition\": [],\n        }\n\n        ### fill paramter dict with population attributes\n        for pop in self.populations:\n            for attribute in vars(get_population(pop))[\"attributes\"]:\n                ### store min and max of attribute\n                ### create numpy array with getattr to use numpy min max function\n                values = np.array(\n                    [getattr(get_population(pop), attribute)]\n                    + [getattr(get_population(pop), attribute)]\n                )\n                attribute_dict[\"compartment_type\"].append(\"population\")\n                attribute_dict[\"compartment_name\"].append(pop)\n                attribute_dict[\"attribute_name\"].append(attribute)\n                if values.min() != values.max():\n                    attribute_dict[\"value\"].append(f\"[{values.min()}, {values.max()}]\")\n                else:\n                    attribute_dict[\"value\"].append(str(values.min()))\n                attribute_dict[\"definition\"].append(\"init\")\n\n        ### fill paramter dict with projection attributes\n        for proj in self.projections:\n            for attribute in vars(get_projection(proj))[\"attributes\"]:\n                ### store min and max of attribute\n                ### create numpy array with getattr to use numpy min max function\n                values = np.array(\n                    [getattr(get_projection(proj), attribute)]\n                    + [getattr(get_projection(proj), attribute)]\n                )\n                attribute_dict[\"compartment_type\"].append(\"projection\")\n                attribute_dict[\"compartment_name\"].append(proj)\n                attribute_dict[\"attribute_name\"].append(attribute)\n                if values.min() != values.max():\n                    attribute_dict[\"value\"].append(f\"[{values.min()}, {values.max()}]\")\n                else:\n                    attribute_dict[\"value\"].append(values.min())\n                attribute_dict[\"definition\"].append(\"init\")\n\n        ### return dataframe\n        return pd.DataFrame(attribute_dict)\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.compiled","title":"compiled property writable","text":"

True if the model is compiled.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.created","title":"created property writable","text":"

True if the model is created.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.attribute_df","title":"attribute_df property","text":"

Dataframe containing all attributes of the model compartments.

"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.__init__","title":"__init__(model_creation_function, model_kwargs=None, name='model', description='', do_create=True, do_compile=True, compile_folder_name='annarchy')","text":"

Initializes the CompNeuroModel class.

Parameters:

Name Type Description Default model_creation_function function

Function which creates the model.

required model_kwargs dict

Keyword arguments for model_creation_function. Default: None.

None name str

Name of the model. Default: \"model\".

'model' description str

Description of the model. Default: \"\".

'' do_create bool

If True the model is created directly. Default: True.

True do_compile bool

If True the model is compiled directly. Default: True.

True compile_folder_name str

Name of the folder in which the model is compiled. Default: \"annarchy\".

'annarchy' Source code in CompNeuroPy/generate_model.py
@check_types()\ndef __init__(\n    self,\n    model_creation_function: Callable,\n    model_kwargs: dict | None = None,\n    name: str = \"model\",\n    description: str = \"\",\n    do_create: bool = True,\n    do_compile: bool = True,\n    compile_folder_name: str = \"annarchy\",\n):\n    \"\"\"\n    Initializes the CompNeuroModel class.\n\n    Args:\n        model_creation_function (function):\n            Function which creates the model.\n        model_kwargs (dict):\n            Keyword arguments for model_creation_function. Default: None.\n        name (str):\n            Name of the model. Default: \"model\".\n        description (str):\n            Description of the model. Default: \"\".\n        do_create (bool):\n            If True the model is created directly. Default: True.\n        do_compile (bool):\n            If True the model is compiled directly. Default: True.\n        compile_folder_name (str):\n            Name of the folder in which the model is compiled. Default: \"annarchy\".\n    \"\"\"\n    self.name = name\n    if name == \"model\":\n        self.name = name + str(self._nr_models())\n    self.description = description\n    self.model_creation_function = model_creation_function\n    self.compile_folder_name = compile_folder_name\n    self.model_kwargs = model_kwargs\n    self.populations = []\n    self.projections = []\n    self.created = False\n    self.compiled = False\n    self._attribute_df = None\n    self._attribute_df_compiled = False\n    if do_create:\n        self.create(do_compile=do_compile, compile_folder_name=compile_folder_name)\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.compile","title":"compile(compile_folder_name=None)","text":"

Compiles a created model.

Parameters:

Name Type Description Default compile_folder_name str

Name of the folder in which the model is compiled. Default: value from initialization.

None Source code in CompNeuroPy/generate_model.py
def compile(self, compile_folder_name=None):\n    \"\"\"\n    Compiles a created model.\n\n    Args:\n        compile_folder_name (str, optional):\n            Name of the folder in which the model is compiled. Default: value from\n            initialization.\n    \"\"\"\n    ### check if this model is created\n    if self.created:\n        if compile_folder_name == None:\n            compile_folder_name = self.compile_folder_name\n\n        ### check if other models were initialized but not created --> warn that they are not compiled\n        not_created_model_list = self._check_if_models_created()\n        if len(not_created_model_list) > 0:\n            print(\n                \"\\nWARNING during compile of model \"\n                + self.name\n                + \": There are initialized models which are not created, thus not compiled! models:\\n\"\n                + \"\\n\".join(not_created_model_list)\n                + \"\\n\"\n            )\n        mf.compile_in_folder(compile_folder_name)\n        self.compiled = True\n\n        ### update attribute_df to compiled state, since weights are only available\n        ### after compilation\n        self._update_attribute_df_weights()\n    else:\n        print(\"\\n\")\n        assert False, (\n            \"ERROR during compile of model \"\n            + self.name\n            + \": Only compile the model after it has been created!\"\n        )\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.create","title":"create(do_compile=True, compile_folder_name=None)","text":"

Creates a model and optionally compiles it directly.

Parameters:

Name Type Description Default do_compile bool

If True the model is compiled directly. Default: True.

True compile_folder_name str

Name of the folder in which the model is compiled. Default: value from initialization.

None Source code in CompNeuroPy/generate_model.py
def create(self, do_compile=True, compile_folder_name=None):\n    \"\"\"\n    Creates a model and optionally compiles it directly.\n\n    Args:\n        do_compile (bool, optional):\n            If True the model is compiled directly. Default: True.\n        compile_folder_name (str, optional):\n            Name of the folder in which the model is compiled. Default: value from\n            initialization.\n    \"\"\"\n    if self.created:\n        print(\"model\", self.name, \"already created!\")\n    else:\n        initial_existing_model = mf.get_full_model()\n        ### create model populations and projections\n        if self.model_kwargs != None:\n            self.model_creation_function(**self.model_kwargs)\n        else:\n            self.model_creation_function()\n        self.created = True\n\n        ### check which populations and projections have been added\n        post_existing_model = mf.get_full_model()\n        ### save only added not all projections/populations\n        for initial_pop in initial_existing_model[\"populations\"]:\n            post_existing_model[\"populations\"].remove(initial_pop)\n        for initial_proj in initial_existing_model[\"projections\"]:\n            post_existing_model[\"projections\"].remove(initial_proj)\n        self.populations = post_existing_model[\"populations\"]\n        self.projections = post_existing_model[\"projections\"]\n\n        ### check if names of populations and projections are unique\n        self._check_double_compartments()\n\n        ### create parameter dictionary\n        self._attribute_df = self._get_attribute_df()\n\n        if do_compile:\n            self.compile(compile_folder_name)\n
"},{"location":"main/generate_models/#CompNeuroPy.generate_model.CompNeuroModel.set_param","title":"set_param(compartment, parameter_name, parameter_value)","text":"

Sets the specified parameter of the specified compartment.

Parameters:

Name Type Description Default compartment str

name of model compartment

required parameter_name str

name of parameter of the compartment

required parameter_value number or array-like with shape of compartment geometry

the value or values of the parameter

required

Raises:

Type Description AssertionError

if model is not created

AssertionError

if compartment is neither a population nor a projection of the model

Source code in CompNeuroPy/generate_model.py
def set_param(self, compartment, parameter_name, parameter_value):\n    \"\"\"\n    Sets the specified parameter of the specified compartment.\n\n    Args:\n        compartment (str):\n            name of model compartment\n        parameter_name (str):\n            name of parameter of the compartment\n        parameter_value (number or array-like with shape of compartment geometry):\n            the value or values of the parameter\n\n    Raises:\n        AssertionError: if model is not created\n        AssertionError: if compartment is neither a population nor a projection of\n            the model\n    \"\"\"\n    ### catch if model is not created\n    assert (\n        self.created == True\n    ), f\"ERROR set_param: model {self.name} has to be created before setting parameters!\"\n\n    ### check if compartment is in populations or projections\n    comp_in_pop = compartment in self.populations\n    comp_in_proj = compartment in self.projections\n\n    if comp_in_pop:\n        comp_obj = get_population(compartment)\n    elif comp_in_proj:\n        comp_obj = get_projection(compartment)\n    else:\n        assert (\n            comp_in_pop or comp_in_proj\n        ), f\"ERROR set_param: setting parameter {parameter_name} of compartment {compartment}. The compartment is neither a population nor a projection of the model {self.name}!\"\n\n    ### set the parameter value\n    setattr(comp_obj, parameter_name, parameter_value)\n\n    ### update the model attribute_df\n    self._update_attribute_df(compartment, parameter_name, parameter_value)\n
"},{"location":"main/generate_simulations/","title":"Generate Simulations","text":""},{"location":"main/generate_simulations/#introduction","title":"Introduction","text":"

A CompNeuroPy-simulation can be created using the CompNeuroSim class. Similar to the CompNeuroModel class, a function must be defined that contains the actual simulation (the simulation_function) and the CompNeuroSim object adds a clear framework. A CompNeuroSim is first initialized and can then be run multiple times.

"},{"location":"main/generate_simulations/#example","title":"Example:","text":"
from CompNeuroPy import CompNeuroSim\nmy_simulation = CompNeuroSim(simulation_function=some_simulation,           ### the most important part, this function defines the simulation\n                            simulation_kwargs={'pop':pop1, 'duration':100}, ### define the two arguments pop and duration of simulation_function\n                            name='my_simulation',                           ### you can give the simulation a name\n                            description='my simple example simulation',     ### you can give the simulation a description\n                            requirements=[req],                             ### a list of requirements for the simulation (here only a single requirement)\n                            kwargs_warning=True,                            ### should a warning be printed if simulation kwargs change in future runs\n                            monitor_object = mon)                           ### the Monitors object which is used to record variables                   \n

A possible simulation_function could be:

def some_simulation(pop, duration=1):\n    get_population(pop).a = 5  ### adjust paramter a of pop\n    get_population(pop).b = 5  ### adjust paramter b of pop\n    simulate(duration)         ### simulate the duration in ms\n\n    ### return some info\n    ### will later be accessible for each run\n    return {'paramter a': a, 'paramter b': b, 'a_x_duration': a*duration} \n

And a corresponding requirement could be:

from CompNeuroPy import ReqPopHasAttr\nreq = {'req':ReqPopHasAttr, 'pop':pop1, 'attr':['a', 'b']}\n
Here, one checks if the population pop1 contains the attributes a and b. The ReqPopHasAttr is a built-in requirements-class of CompNeuroPy (see below).

A more detailed example is available in the Examples.

"},{"location":"main/generate_simulations/#simulation-information","title":"Simulation information","text":"

The function simulation_info() returns a SimInfo object which contains usefull information about the simulation runs (see below). The SimInfo object also provides usefull analysis functions associated with specific simulation functions. Currently it provides the get_current_arr() which returns arrays containing the input current for each time step of the built-in simulation functions current_step(), current_stim(), and current_ramp().

"},{"location":"main/generate_simulations/#simulation-functions","title":"Simulation functions","text":"

Just define a classic ANNarchy simulation in a function. Within the functions, the ANNarchy functions get_population() and get_projection() can be used to access the populations and projections using the population and projection names provided by a CompNeuroModel. The return value of the simulation function can later be retrieved from the SimInfo object (the info attribute) in a list containing the return value for each run of the simulation.

"},{"location":"main/generate_simulations/#example_1","title":"Example:","text":"
from ANNarchy import simulate, get_population\n\ndef current_step(pop, t1=500, t2=500, a1=0, a2=100):\n    \"\"\"\n        stimulates a given population in two periods with two input currents\n\n        pop: population name of population, which should be stimulated with input current\n             neuron model of population has to contain \"I_app\" as input current in pA\n        t1/t2: times in ms before/after current step\n        a1/a2: current amplitudes before/after current step in pA\n    \"\"\"\n\n    ### save prev input current\n    I_prev = get_population(pop).I_app\n\n    ### first/pre current step simulation\n    get_population(pop).I_app = a1\n    simulate(t1)\n\n    ### second/post current step simulation\n    get_population(pop).I_app = a2\n    simulate(t2)\n\n    ### reset input current to previous value\n    get_population(pop).I_app = I_prev\n\n    ### return some additional information which could be usefull\n    return {'duration':t1+t2}\n
"},{"location":"main/generate_simulations/#requirements","title":"Requirements","text":"

In order to perform simulations with models, the models must almost always fulfill certain requirements. For example, if the input current of a population is to be set, this population (or the neuron model) must of course have the corresponding variable. Such preconditions can be tested in advance with the simulation_requirements classes. They only need to contain a function run() to test the requirements (if requirements are not met, cause an error). In CompNeuroPy predefined simulation_requirements classes are available (CompNeuroPy.simulation_requirements; currently only ReqPopHasAttr). In the CompNeuroSim class, the requirements are passed as arguments in a list (see above). Each requirement (list entry) must be defined as a dictionary with keys req (the requirement class) and the arguments of the requirement class (e.g., pop and attr for the ReqPopHasAttr).

Here two requirements are defined (both ReqPopHasAttr). All populations of my_model should contain the attribute (variable or parameter) 'I' and all populations of my_other_model should contain the attribute 'v':

req1 = {'req':ReqPopHasAttr, 'pop':my_model.populations, 'attr':'I'}\nreq2 = {'req':ReqPopHasAttr, 'pop':my_other_model.populations, 'attr':'v'}\nmy_two_model_simulation = CompNeuroSim(..., requirements=[req1, req2])\n

As described above, new simulation_kwargs can be passed to the run() function of a CompNeuroSim object. Thus, one could initially pass a particular model as simulation_kwargs and for a later run pass a different model. If the requirements are defined as shown above, it is not tested again whether the new model (e.g. my_third_model) also fulfills the requirements (because the requirements were defined for my_model and my_other_model). To work around this, an argument for a simulation_requirements class can also be linked to a simulation_kwargs entry. Thus, if new simulation_kwargs are used, also the simulation_requirements arguments adapt. This can be done using a string with the syntax \"simulation_kwargs.<kwarg_name>.<optional_attribute_of_kwarg>\", as shown in this example:

req1 = {'req':ReqPopHasAttr, 'pop':\"simulation_kwargs.model1.populations\", 'attr':'I'}\nreq2 = {'req':ReqPopHasAttr, 'pop':\"simulation_kwargs.model2.populations\", 'attr':'v'}\nmy_two_model_simulation = CompNeuroSim(simulation_kwargs={'model1':my_model, 'model2':my_other_model, 'parameter':5},\n                                        ...,\n                                        requirements=[req1, req2])\n...\nmy_two_model_simulation.run({'model1':my_third_model})\n

Due to the string \"simulation_kwargs.model1.populations\" the pop argument of req1 is now linked to model1 (defined in the simulation_kwargs). Thus, in the run where a different model (my_third_model) is used for model1, req1 is automatically tested for the new model1.

"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim","title":"CompNeuroPy.generate_simulation.CompNeuroSim","text":"

Class for generating a CompNeuroPy simulation.

Source code in CompNeuroPy/generate_simulation.py
class CompNeuroSim:\n    \"\"\"\n    Class for generating a CompNeuroPy simulation.\n    \"\"\"\n\n    _initialized_simulations = []\n\n    def __init__(\n        self,\n        simulation_function: Callable,\n        simulation_kwargs: dict | None = None,\n        name: str = \"simulation\",\n        description: str = \"\",\n        requirements: list | None = None,\n        kwargs_warning: bool = False,\n        monitor_object: CompNeuroMonitors | None = None,\n    ):\n        \"\"\"\n        Args:\n            simulation_function (function):\n                Function which runs the simulation.\n            simulation_kwargs (dict, optional):\n                Dictionary of arguments for the simulation_function. Default: None.\n            name (str, optional):\n                Name of the simulation. Default: \"simulation\".\n            description (str, optional):\n                Description of the simulation. Default: \"\".\n            requirements (list, optional):\n                List of requirements for the simulation. It's a list of dictionaries\n                which contain the requirement class itself (key: \"req\") and the\n                corresponding arguments (keys are the names of the arguments). The\n                arguments can be inherited from the simulation kwargs by using the\n                syntax 'simulation_kwargs.<kwarg_name>'. Default: None.\n            kwargs_warning (bool, optional):\n                If True, a warning is printed if the simulation_kwargs are changed\n                during the simulation. Default: False.\n            monitor_object (CompNeuroMonitors object, optional):\n                CompNeuroMonitors object to automatically track the recording chunk for each\n                simulation run. Default: None.\n        \"\"\"\n        # set simulation function\n        self.name = name\n        if name == \"simulation\":\n            self.name = name + str(self._nr_simulations())\n        self._initialized_simulations.append(self.name)\n        self.description = description\n        self.simulation_function = simulation_function\n        self.simulation_kwargs = simulation_kwargs\n        if requirements is None:\n            self.requirements = []\n        else:\n            self.requirements = requirements\n        self.start = []\n        self.end = []\n        self.info = []\n        self.kwargs = []\n        if kwargs_warning:\n            self._warned = False\n        else:\n            self._warned = True\n        self.monitor_object = monitor_object\n        if monitor_object is not None:\n            self.monitor_chunk = []\n        else:\n            self.monitor_chunk = None\n\n        ### test initial requirements\n        self._test_req(simulation_kwargs=simulation_kwargs)\n\n    def run(self, simulation_kwargs: dict | None = None):\n        \"\"\"\n        Runs the simulation function. With each run extend start, end list containing\n        start and end time of the corresponding run and the info list containing the\n        return value of the simulation function.\n\n        Args:\n            simulation_kwargs (dict, optional):\n                Temporary simulation kwargs which override the initialized simulation\n                kwargs. Default: None, i.e., use values from initialization.\n        \"\"\"\n\n        ### define the current simulation kwargs\n        if simulation_kwargs is not None:\n            if self.simulation_kwargs is not None:\n                ### not replace initialized kwargs completely but only the kwargs which are given\n                tmp_kwargs = self.simulation_kwargs.copy()\n                for key, val in simulation_kwargs.items():\n                    tmp_kwargs[key] = val\n            else:\n                ### there are no initial kwargs --> only use the kwargs which are given\n                tmp_kwargs = simulation_kwargs\n            if not (self._warned) and len(self.requirements) > 0:\n                print(\n                    \"\\nWARNING! run\",\n                    self.name,\n                    \"changed simulation kwargs, initial requirements may no longer be fulfilled!\\n\",\n                )\n                self._warned = True\n        else:\n            tmp_kwargs = self.simulation_kwargs\n\n        ### before each run, test requirements\n        self._test_req(simulation_kwargs=tmp_kwargs)\n\n        ### and append current simulation kwargs to the kwargs variable\n        self.kwargs.append(tmp_kwargs)\n\n        ### and append the current chunk of the monitors object to the chunk variable\n        if self.monitor_object is not None:\n            self.monitor_chunk.append(self.monitor_object.current_chunk())\n\n        ### run the simulation, store start and end simulation time\n        self.start.append(get_time())\n        if tmp_kwargs is not None:\n            self.info.append(self.simulation_function(**tmp_kwargs))\n        else:\n            self.info.append(self.simulation_function())\n        self.end.append(get_time())\n\n    def _nr_simulations(self):\n        \"\"\"\n        Returns the current number of initialized CompNeuroPy simulations.\n        \"\"\"\n        return len(self._initialized_simulations)\n\n    def _test_req(self, simulation_kwargs=None):\n        \"\"\"\n        Tests the initialized requirements with the current simulation_kwargs.\n        \"\"\"\n\n        if simulation_kwargs is None:  # --> use the initial simulation_kwargs\n            simulation_kwargs = self.simulation_kwargs\n\n        for req in self.requirements:\n            ### check if requirement_kwargs are given besides the requirement itself\n            if len(list(req.keys())) > 1:\n                ### remove the requirement itself from the kwargs\n                req_kwargs = ef.remove_key(req, \"req\")\n                ### check if req_kwargs reference to simulation_kwargs, if yes, use the\n                ### current simulation kwargs instead of the intial ones\n                for key, val in req_kwargs.items():\n                    if isinstance(val, str):\n                        val_split = val.split(\".\")\n                        ### check if val is a reference to simulation_kwargs\n                        if val_split[0] == \"simulation_kwargs\":\n                            if len(val_split) == 1:\n                                ### val is only simulation_kwargs\n                                req_kwargs = simulation_kwargs\n                            elif len(val_split) == 2:\n                                ### val is simulation_kwargs.something\n                                req_kwargs[key] = simulation_kwargs[val_split[1]]\n                            else:\n                                ### val is simulation_kwargs.something.something... e.g. key='pops' and val= 'simulation_kwargs.model.populations'\n                                req_kwargs[key] = eval(\n                                    'simulation_kwargs[\"'\n                                    + val_split[1]\n                                    + '\"].'\n                                    + \".\".join(val_split[2:])\n                                )\n                ### run the requirement using the current req_kwargs\n                req[\"req\"](**req_kwargs).run()\n\n            else:\n                ### a requirement is given without kwargs --> just run it\n                req[\"req\"]().run()\n\n    def get_current_arr(self, dt, flat=False):\n        \"\"\"\n        Method exclusively for current_step simulation functions. Gets the current array\n        (input current value for each time step) of all runs.\n\n        !!! warning\n            This method will be removed soon. Use the get_current_arr method of the\n            SimInfo class instead.\n\n        Args:\n            dt (float):\n                Time step size of the simulation.\n            flat (bool, optional):\n                If True, returns a flattened array. Assumes that all runs are run\n                consecutively without brakes. Default: False, i.e., returns a list of\n                arrays.\n\n        Returns:\n            current_arr (list of arrays):\n                List of arrays containing the current values for each time step of each\n                run. If flat=True, returns a flattened array.\n        \"\"\"\n        assert (\n            self.simulation_function.__name__ == \"current_step\"\n        ), 'ERROR get_current_arr: Simulation has to be \"current_step\"!'\n        ### TODO: remove because deprecated\n        print(\n            \"WARNING get_current_arr function will only be available in SimInfo soon.\"\n        )\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            t1 = self.kwargs[run][\"t1\"]\n            t2 = self.kwargs[run][\"t2\"]\n            a1 = self.kwargs[run][\"a1\"]\n            a2 = self.kwargs[run][\"a2\"]\n\n            if t1 > 0 and t2 > 0:\n                current_arr.append(\n                    np.concatenate(\n                        [\n                            np.ones(int(round(t1 / dt))) * a1,\n                            np.ones(int(round(t2 / dt))) * a2,\n                        ]\n                    )\n                )\n            elif t2 > 0:\n                current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n            else:\n                current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n\n    def simulation_info(self):\n        \"\"\"\n        Returns a SimInfo object containing the simulation information.\n\n        Returns:\n            simulation_info_obj (SimInfo):\n                Simulation information object.\n        \"\"\"\n\n        simulation_info_obj = SimInfo(\n            self.name,\n            self.description,\n            self.simulation_function.__name__,\n            self.start,\n            self.end,\n            self.info,\n            self.kwargs,\n            self.monitor_chunk,\n        )\n\n        return simulation_info_obj\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.__init__","title":"__init__(simulation_function, simulation_kwargs=None, name='simulation', description='', requirements=None, kwargs_warning=False, monitor_object=None)","text":"

Parameters:

Name Type Description Default simulation_function function

Function which runs the simulation.

required simulation_kwargs dict

Dictionary of arguments for the simulation_function. Default: None.

None name str

Name of the simulation. Default: \"simulation\".

'simulation' description str

Description of the simulation. Default: \"\".

'' requirements list

List of requirements for the simulation. It's a list of dictionaries which contain the requirement class itself (key: \"req\") and the corresponding arguments (keys are the names of the arguments). The arguments can be inherited from the simulation kwargs by using the syntax 'simulation_kwargs.'. Default: None. None kwargs_warning bool

If True, a warning is printed if the simulation_kwargs are changed during the simulation. Default: False.

False monitor_object CompNeuroMonitors object

CompNeuroMonitors object to automatically track the recording chunk for each simulation run. Default: None.

None Source code in CompNeuroPy/generate_simulation.py
def __init__(\n    self,\n    simulation_function: Callable,\n    simulation_kwargs: dict | None = None,\n    name: str = \"simulation\",\n    description: str = \"\",\n    requirements: list | None = None,\n    kwargs_warning: bool = False,\n    monitor_object: CompNeuroMonitors | None = None,\n):\n    \"\"\"\n    Args:\n        simulation_function (function):\n            Function which runs the simulation.\n        simulation_kwargs (dict, optional):\n            Dictionary of arguments for the simulation_function. Default: None.\n        name (str, optional):\n            Name of the simulation. Default: \"simulation\".\n        description (str, optional):\n            Description of the simulation. Default: \"\".\n        requirements (list, optional):\n            List of requirements for the simulation. It's a list of dictionaries\n            which contain the requirement class itself (key: \"req\") and the\n            corresponding arguments (keys are the names of the arguments). The\n            arguments can be inherited from the simulation kwargs by using the\n            syntax 'simulation_kwargs.<kwarg_name>'. Default: None.\n        kwargs_warning (bool, optional):\n            If True, a warning is printed if the simulation_kwargs are changed\n            during the simulation. Default: False.\n        monitor_object (CompNeuroMonitors object, optional):\n            CompNeuroMonitors object to automatically track the recording chunk for each\n            simulation run. Default: None.\n    \"\"\"\n    # set simulation function\n    self.name = name\n    if name == \"simulation\":\n        self.name = name + str(self._nr_simulations())\n    self._initialized_simulations.append(self.name)\n    self.description = description\n    self.simulation_function = simulation_function\n    self.simulation_kwargs = simulation_kwargs\n    if requirements is None:\n        self.requirements = []\n    else:\n        self.requirements = requirements\n    self.start = []\n    self.end = []\n    self.info = []\n    self.kwargs = []\n    if kwargs_warning:\n        self._warned = False\n    else:\n        self._warned = True\n    self.monitor_object = monitor_object\n    if monitor_object is not None:\n        self.monitor_chunk = []\n    else:\n        self.monitor_chunk = None\n\n    ### test initial requirements\n    self._test_req(simulation_kwargs=simulation_kwargs)\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.run","title":"run(simulation_kwargs=None)","text":"

Runs the simulation function. With each run extend start, end list containing start and end time of the corresponding run and the info list containing the return value of the simulation function.

Parameters:

Name Type Description Default simulation_kwargs dict

Temporary simulation kwargs which override the initialized simulation kwargs. Default: None, i.e., use values from initialization.

None Source code in CompNeuroPy/generate_simulation.py
def run(self, simulation_kwargs: dict | None = None):\n    \"\"\"\n    Runs the simulation function. With each run extend start, end list containing\n    start and end time of the corresponding run and the info list containing the\n    return value of the simulation function.\n\n    Args:\n        simulation_kwargs (dict, optional):\n            Temporary simulation kwargs which override the initialized simulation\n            kwargs. Default: None, i.e., use values from initialization.\n    \"\"\"\n\n    ### define the current simulation kwargs\n    if simulation_kwargs is not None:\n        if self.simulation_kwargs is not None:\n            ### not replace initialized kwargs completely but only the kwargs which are given\n            tmp_kwargs = self.simulation_kwargs.copy()\n            for key, val in simulation_kwargs.items():\n                tmp_kwargs[key] = val\n        else:\n            ### there are no initial kwargs --> only use the kwargs which are given\n            tmp_kwargs = simulation_kwargs\n        if not (self._warned) and len(self.requirements) > 0:\n            print(\n                \"\\nWARNING! run\",\n                self.name,\n                \"changed simulation kwargs, initial requirements may no longer be fulfilled!\\n\",\n            )\n            self._warned = True\n    else:\n        tmp_kwargs = self.simulation_kwargs\n\n    ### before each run, test requirements\n    self._test_req(simulation_kwargs=tmp_kwargs)\n\n    ### and append current simulation kwargs to the kwargs variable\n    self.kwargs.append(tmp_kwargs)\n\n    ### and append the current chunk of the monitors object to the chunk variable\n    if self.monitor_object is not None:\n        self.monitor_chunk.append(self.monitor_object.current_chunk())\n\n    ### run the simulation, store start and end simulation time\n    self.start.append(get_time())\n    if tmp_kwargs is not None:\n        self.info.append(self.simulation_function(**tmp_kwargs))\n    else:\n        self.info.append(self.simulation_function())\n    self.end.append(get_time())\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.get_current_arr","title":"get_current_arr(dt, flat=False)","text":"

Method exclusively for current_step simulation functions. Gets the current array (input current value for each time step) of all runs.

Warning

This method will be removed soon. Use the get_current_arr method of the SimInfo class instead.

Parameters:

Name Type Description Default dt float

Time step size of the simulation.

required flat bool

If True, returns a flattened array. Assumes that all runs are run consecutively without brakes. Default: False, i.e., returns a list of arrays.

False

Returns:

Name Type Description current_arr list of arrays

List of arrays containing the current values for each time step of each run. If flat=True, returns a flattened array.

Source code in CompNeuroPy/generate_simulation.py
def get_current_arr(self, dt, flat=False):\n    \"\"\"\n    Method exclusively for current_step simulation functions. Gets the current array\n    (input current value for each time step) of all runs.\n\n    !!! warning\n        This method will be removed soon. Use the get_current_arr method of the\n        SimInfo class instead.\n\n    Args:\n        dt (float):\n            Time step size of the simulation.\n        flat (bool, optional):\n            If True, returns a flattened array. Assumes that all runs are run\n            consecutively without brakes. Default: False, i.e., returns a list of\n            arrays.\n\n    Returns:\n        current_arr (list of arrays):\n            List of arrays containing the current values for each time step of each\n            run. If flat=True, returns a flattened array.\n    \"\"\"\n    assert (\n        self.simulation_function.__name__ == \"current_step\"\n    ), 'ERROR get_current_arr: Simulation has to be \"current_step\"!'\n    ### TODO: remove because deprecated\n    print(\n        \"WARNING get_current_arr function will only be available in SimInfo soon.\"\n    )\n    current_arr = []\n    for run in range(len(self.kwargs)):\n        t1 = self.kwargs[run][\"t1\"]\n        t2 = self.kwargs[run][\"t2\"]\n        a1 = self.kwargs[run][\"a1\"]\n        a2 = self.kwargs[run][\"a2\"]\n\n        if t1 > 0 and t2 > 0:\n            current_arr.append(\n                np.concatenate(\n                    [\n                        np.ones(int(round(t1 / dt))) * a1,\n                        np.ones(int(round(t2 / dt))) * a2,\n                    ]\n                )\n            )\n        elif t2 > 0:\n            current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n        else:\n            current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n    if flat:\n        return np.concatenate(current_arr)\n    else:\n        return current_arr\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.CompNeuroSim.simulation_info","title":"simulation_info()","text":"

Returns a SimInfo object containing the simulation information.

Returns:

Name Type Description simulation_info_obj SimInfo

Simulation information object.

Source code in CompNeuroPy/generate_simulation.py
def simulation_info(self):\n    \"\"\"\n    Returns a SimInfo object containing the simulation information.\n\n    Returns:\n        simulation_info_obj (SimInfo):\n            Simulation information object.\n    \"\"\"\n\n    simulation_info_obj = SimInfo(\n        self.name,\n        self.description,\n        self.simulation_function.__name__,\n        self.start,\n        self.end,\n        self.info,\n        self.kwargs,\n        self.monitor_chunk,\n    )\n\n    return simulation_info_obj\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.SimInfo","title":"CompNeuroPy.generate_simulation.SimInfo","text":"

Class for storing the simulation information.

Attributes:

Name Type Description name str

Name of the simulation.

description str

Description of the simulation.

simulation_function str

Name of the simulation function.

start list

List of start times of the simulation runs.

end list

List of end times of the simulation runs.

info list

List of return values of the simulation function of each simulation run.

kwargs list

List of simulation kwargs of the simulation function of each simulation run.

monitor_chunk list

List of recording chunks of the used CompNeuroMonitors object of each simulation run.

Source code in CompNeuroPy/generate_simulation.py
class SimInfo:\n    \"\"\"\n    Class for storing the simulation information.\n\n    Attributes:\n        name (str):\n            Name of the simulation.\n        description (str):\n            Description of the simulation.\n        simulation_function (str):\n            Name of the simulation function.\n        start (list):\n            List of start times of the simulation runs.\n        end (list):\n            List of end times of the simulation runs.\n        info (list):\n            List of return values of the simulation function of each simulation run.\n        kwargs (list):\n            List of simulation kwargs of the simulation function of each simulation run.\n        monitor_chunk (list):\n            List of recording chunks of the used CompNeuroMonitors object of each simulation run.\n    \"\"\"\n\n    def __init__(\n        self,\n        name,\n        description,\n        simulation_function,\n        start,\n        end,\n        info,\n        kwargs,\n        monitor_chunk,\n    ):\n        \"\"\"\n        Initialization of the simulation information object.\n\n        Args:\n            name (str):\n                Name of the simulation.\n            description (str):\n                Description of the simulation.\n            simulation_function (str):\n                Name of the simulation function.\n            start (list):\n                List of start times of the simulation runs.\n            end (list):\n                List of end times of the simulation runs.\n            info (list):\n                List of return values of the simulation function of each simulation run.\n            kwargs (list):\n                List of simulation kwargs of the simulation function of each simulation\n                run.\n            monitor_chunk (list):\n                List of recording chunks of the used CompNeuroMonitors object of each simulation\n                run.\n        \"\"\"\n        self.name = name\n        self.description = description\n        self.simulation_function = simulation_function\n        self.start = start\n        self.end = end\n        self.info = info\n        self.kwargs = kwargs\n        self.monitor_chunk = monitor_chunk\n\n    def get_current_arr(self, dt, flat=False):\n        \"\"\"\n        Method exclusively for the following simulation functions (built-in\n        CompNeuroPy):\n            - current_step\n            - current_stim\n            - current_ramp\n        Gets the current array (input current value for each time step) of all runs.\n\n        Args:\n            dt (float):\n                Time step size of the simulation.\n            flat (bool, optional):\n                If True, returns a flattened array. Assumes that all runs are run\n                consecutively without brakes. Default: False, i.e., returns a list of\n                arrays.\n\n        Returns:\n            current_arr (list of arrays):\n                List of arrays containing the current values for each time step of each\n                run. If flat=True, returns a flattened array.\n        \"\"\"\n        assert (\n            self.simulation_function == \"current_step\"\n            or self.simulation_function == \"current_stim\"\n            or self.simulation_function == \"current_ramp\"\n        ), 'ERROR get_current_arr: Simulation has to be \"current_step\", \"current_stim\" or \"current_ramp\"!'\n\n        if self.simulation_function == \"current_step\":\n            current_arr = []\n            for run in range(len(self.kwargs)):\n                t1 = self.kwargs[run][\"t1\"]\n                t2 = self.kwargs[run][\"t2\"]\n                a1 = self.kwargs[run][\"a1\"]\n                a2 = self.kwargs[run][\"a2\"]\n\n                if t1 > 0 and t2 > 0:\n                    current_arr.append(\n                        np.concatenate(\n                            [\n                                np.ones(int(round(t1 / dt))) * a1,\n                                np.ones(int(round(t2 / dt))) * a2,\n                            ]\n                        )\n                    )\n                elif t2 > 0:\n                    current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n                else:\n                    current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n            if flat:\n                return np.concatenate(current_arr)\n            else:\n                return current_arr\n\n        elif self.simulation_function == \"current_stim\":\n            current_arr = []\n            for run in range(len(self.kwargs)):\n                t = self.kwargs[run][\"t\"]\n                a = self.kwargs[run][\"a\"]\n\n                if t > 0:\n                    current_arr.append(np.ones(int(round(t / dt))) * a)\n\n            if flat:\n                return np.concatenate(current_arr)\n            else:\n                return current_arr\n\n        elif self.simulation_function == \"current_ramp\":\n            current_arr = []\n            for run in range(len(self.kwargs)):\n                amp = self.kwargs[run][\"a0\"]\n                current_arr_ramp = []\n                for stim_idx in range(self.kwargs[run][\"n\"]):\n                    t = self.info[run][\"dur_stim\"]\n                    a = amp\n                    current_arr_ramp.append(np.ones(int(round(t / dt))) * a)\n                    amp = amp + self.info[run][\"da\"]\n                current_arr.append(list(np.concatenate(current_arr_ramp)))\n\n            if flat:\n                return np.concatenate(current_arr)\n            else:\n                return current_arr\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.SimInfo.__init__","title":"__init__(name, description, simulation_function, start, end, info, kwargs, monitor_chunk)","text":"

Initialization of the simulation information object.

Parameters:

Name Type Description Default name str

Name of the simulation.

required description str

Description of the simulation.

required simulation_function str

Name of the simulation function.

required start list

List of start times of the simulation runs.

required end list

List of end times of the simulation runs.

required info list

List of return values of the simulation function of each simulation run.

required kwargs list

List of simulation kwargs of the simulation function of each simulation run.

required monitor_chunk list

List of recording chunks of the used CompNeuroMonitors object of each simulation run.

required Source code in CompNeuroPy/generate_simulation.py
def __init__(\n    self,\n    name,\n    description,\n    simulation_function,\n    start,\n    end,\n    info,\n    kwargs,\n    monitor_chunk,\n):\n    \"\"\"\n    Initialization of the simulation information object.\n\n    Args:\n        name (str):\n            Name of the simulation.\n        description (str):\n            Description of the simulation.\n        simulation_function (str):\n            Name of the simulation function.\n        start (list):\n            List of start times of the simulation runs.\n        end (list):\n            List of end times of the simulation runs.\n        info (list):\n            List of return values of the simulation function of each simulation run.\n        kwargs (list):\n            List of simulation kwargs of the simulation function of each simulation\n            run.\n        monitor_chunk (list):\n            List of recording chunks of the used CompNeuroMonitors object of each simulation\n            run.\n    \"\"\"\n    self.name = name\n    self.description = description\n    self.simulation_function = simulation_function\n    self.start = start\n    self.end = end\n    self.info = info\n    self.kwargs = kwargs\n    self.monitor_chunk = monitor_chunk\n
"},{"location":"main/generate_simulations/#CompNeuroPy.generate_simulation.SimInfo.get_current_arr","title":"get_current_arr(dt, flat=False)","text":"

Method exclusively for the following simulation functions (built-in CompNeuroPy): - current_step - current_stim - current_ramp Gets the current array (input current value for each time step) of all runs.

Parameters:

Name Type Description Default dt float

Time step size of the simulation.

required flat bool

If True, returns a flattened array. Assumes that all runs are run consecutively without brakes. Default: False, i.e., returns a list of arrays.

False

Returns:

Name Type Description current_arr list of arrays

List of arrays containing the current values for each time step of each run. If flat=True, returns a flattened array.

Source code in CompNeuroPy/generate_simulation.py
def get_current_arr(self, dt, flat=False):\n    \"\"\"\n    Method exclusively for the following simulation functions (built-in\n    CompNeuroPy):\n        - current_step\n        - current_stim\n        - current_ramp\n    Gets the current array (input current value for each time step) of all runs.\n\n    Args:\n        dt (float):\n            Time step size of the simulation.\n        flat (bool, optional):\n            If True, returns a flattened array. Assumes that all runs are run\n            consecutively without brakes. Default: False, i.e., returns a list of\n            arrays.\n\n    Returns:\n        current_arr (list of arrays):\n            List of arrays containing the current values for each time step of each\n            run. If flat=True, returns a flattened array.\n    \"\"\"\n    assert (\n        self.simulation_function == \"current_step\"\n        or self.simulation_function == \"current_stim\"\n        or self.simulation_function == \"current_ramp\"\n    ), 'ERROR get_current_arr: Simulation has to be \"current_step\", \"current_stim\" or \"current_ramp\"!'\n\n    if self.simulation_function == \"current_step\":\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            t1 = self.kwargs[run][\"t1\"]\n            t2 = self.kwargs[run][\"t2\"]\n            a1 = self.kwargs[run][\"a1\"]\n            a2 = self.kwargs[run][\"a2\"]\n\n            if t1 > 0 and t2 > 0:\n                current_arr.append(\n                    np.concatenate(\n                        [\n                            np.ones(int(round(t1 / dt))) * a1,\n                            np.ones(int(round(t2 / dt))) * a2,\n                        ]\n                    )\n                )\n            elif t2 > 0:\n                current_arr.append(np.ones(int(round(t2 / dt))) * a2)\n            else:\n                current_arr.append(np.ones(int(round(t1 / dt))) * a1)\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n\n    elif self.simulation_function == \"current_stim\":\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            t = self.kwargs[run][\"t\"]\n            a = self.kwargs[run][\"a\"]\n\n            if t > 0:\n                current_arr.append(np.ones(int(round(t / dt))) * a)\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n\n    elif self.simulation_function == \"current_ramp\":\n        current_arr = []\n        for run in range(len(self.kwargs)):\n            amp = self.kwargs[run][\"a0\"]\n            current_arr_ramp = []\n            for stim_idx in range(self.kwargs[run][\"n\"]):\n                t = self.info[run][\"dur_stim\"]\n                a = amp\n                current_arr_ramp.append(np.ones(int(round(t / dt))) * a)\n                amp = amp + self.info[run][\"da\"]\n            current_arr.append(list(np.concatenate(current_arr_ramp)))\n\n        if flat:\n            return np.concatenate(current_arr)\n        else:\n            return current_arr\n
"},{"location":"main/model_configurator/","title":"Model Configurator","text":"

Working on it.

"},{"location":"main/monitors_recordings/","title":"Monitors / Recordings","text":""},{"location":"main/monitors_recordings/#create-monitors","title":"Create Monitors","text":"

CompNeuroPy provides a CompNeuroMonitors class that can be used to easily create and control multiple ANNarchy monitors at once. To create a CompNeuroMonitors object, all that is needed is a monitors_dictionary that defines which variables should be recorded for each model component. All populations and projections have to have unique names to work with CompNeuroMonitors. The keys of the monitor_dictionary are the names of the model components (in example below \"my_pop1\" and \"my_pop2\"). The key can also include a recording period (the time between two recordings, given after a \";\"), e.g. record the variables of my_pop1 only every 10 ms would look like this: 'pop;my_pop1;10':['v', 'spike']. The default period is the time step of the simulation for populations and 1000 times the timestep for projections. The values of the monitor_dictionary are lists of all the variables that should be recorded from the corresponding components. The names of components (populations, projections) could be provided by a CompNeuroModel.

"},{"location":"main/monitors_recordings/#example","title":"Example:","text":"

Here the variables v and spike should be recorded of the population with the name \"my_pop1\" and the variable v should be recorded from the population with the name \"my_pop2\":

from CompNeuroPy import CompNeuroMonitors\nmonitor_dictionary = {'my_pop1':['v', 'spike'], 'my_pop2':['v']}\nmon = CompNeuroMonitors(monitor_dictionary)\n

A full example is available in the Examples.

"},{"location":"main/monitors_recordings/#chunks-and-periods","title":"Chunks and periods","text":"

In CompNeuroPy, recordings are divided into so-called chunks and periods. Chunks are simulation sections that are separated by monitor resets (optionally also reset the model). A chunk can consist of several periods. A period represents the time span between the start and pause of a monitor recording. To divide a simulation into chunks and periods, a CompNeuroMonitors object provides the three functions start(), pause() and reset().

At the beginning of a simulation, the monitors do not start automatically which is why the start() function must be called at least once. The start() function can also be used to resume paused recordings. With the function pause() recordings are paused. The function reset() starts a new chunk for the recordings (the end of a chunk is also always the end of a period, i.e. the last period of the corresponding chunk). After calling reset() the monitors remain in their current mode (active or paused). By default reset() also resets the model to the compile status (time = 0) by calling the ANNarchy reset() function and has the same arguments. If the argument model is set to False, the ANNarchy reset() function is not called and only a new chunk is created.

"},{"location":"main/monitors_recordings/#example_1","title":"Example:","text":"
### first chunk, one period\nsimulate(100) # 100 ms not recorded\nmon.start()   # start all monitors\nsimulate(100) # 100 ms recorded\n\n### second chunk, two periods\nmon.reset()   # model reset, beginning of new chunk\nsimulate(100) # 100 ms recorded (monitors were active before reset --> still active)\nmon.pause()   # pause all monitors\nsimulate(100) # 100 ms not recorded\nmon.start()   # start all monitors\nsimulate(100) # 100 ms recorded\n
"},{"location":"main/monitors_recordings/#get-recordings","title":"Get recordings","text":"

The recordings can be obtained from the CompNeuroMonitors object using the get_recordings() function. This returns a list of dictionaries (one for each chunk). The dictionaries contain the recorded data defined with the monitor_dictionary at the CompNeuroMonitors initialization. In the recordings dictionaries the keys have the following structure: \"<component_name>;variable\"; the corresponding dictionary values are the recordings of the respective variable. The dictionaries always contain the time step of the simulation (key = \"dt\"), the periods (time between recorded values) for each component (key = \"<component_name>;period\") and the attributes of each component (key = \"<component_name>;parameter_dict\").

"},{"location":"main/monitors_recordings/#example_2","title":"Example:","text":"
recordings = mon.get_recordings()\ny1 = recordings[0]['my_pop1;v'] ### variable v of my_pop1 from 1st chunk\ny2 = recordings[1]['my_pop1;v'] ### variable v of my_pop1 from 2nd chunk\n
"},{"location":"main/monitors_recordings/#get-recording-times","title":"Get recording times","text":"

In addition to the recordings themselves, recording times can also be obtained from the CompNeuroMonitors object, which is very useful for later analyses. With the function get_recording_times() of the CompNeuroMonitors object a RecordingTimes object can be obtained. From the RecordingTimes object one can get time limits (in ms) and coresponding indizes for the recordings.

"},{"location":"main/monitors_recordings/#example_3","title":"Example:","text":"
recording_times = mon.get_recording_times()\nstart_time = recording_times.time_lims(chunk=1, period=1)[0] ### 200 ms\nstart_idx  = recording_times.idx_lims(chunk=1, period=1)[0]  ### 1000, if dt == 0.1\nend_time   = recording_times.time_lims(chunk=1, period=1)[1] ### 300 ms\nend_idx    = recording_times.idx_lims(chunk=1, period=1)[1]  ### 2000\n

You can combine the recordings of both chunks of the example simulation shown above into a single time array and a single value array using the RecordingTimes object's combine_chunks function:

time_arr, value_arr = recording_times.combine_chunks(recordings, 'my_pop1;v', 'consecutive')\n

"},{"location":"main/monitors_recordings/#plot-recordings","title":"Plot recordings","text":"

To get a quick overview of the recordings, CompNeuroPy provides the PlotRecordings class.

"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors","title":"CompNeuroPy.monitors.CompNeuroMonitors","text":"

Class to bring together ANNarchy monitors into one object.

Source code in CompNeuroPy/monitors.py
class CompNeuroMonitors:\n    \"\"\"\n    Class to bring together ANNarchy monitors into one object.\n    \"\"\"\n\n    def __init__(self, mon_dict={}):\n        \"\"\"\n        Initialize CompNeuroMonitors object by creating ANNarchy monitors.\n\n        Args:\n            mon_dict (dict):\n                dict with key=\"compartment_name;period\" where period is optional and\n                val=list with variables to record.\n        \"\"\"\n        self.mon = self._add_monitors(mon_dict)\n        self.mon_dict = mon_dict\n        self._init_internals(init_call=True)\n\n    def _init_internals(self, init_call=False):\n        \"\"\"\n        Initialize the following internal variables:\n            - timings (dict):\n                dict with key=\"pop_name\" for populations and \"proj_name\" for projections\n                for each recorded population and projection and\n                val={\"currently_paused\": True, \"start\": [], \"stop\": []}\n            - recordings (list):\n                list with recordings of all chunks. Set to empty list.\n            - recording_times (list):\n                list with recording times of all chunks. Set to empty list.\n            - already_got_recordings (bool):\n                True if recordings were already requested, False otherwise. Set to\n                False.\n            - already_got_recording_times (bool):\n                True if recording_times were already requested, False otherwise. Set to\n                False.\n            - get_recordings_reset_call (bool):\n                True if get_recordings() and get_recording_times() are called within\n                reset(), False otherwise. Set to False.\n\n        Args:\n            init_call (bool, optional):\n                True if called from __init__(), False otherwise. Default: False.\n        \"\"\"\n        if init_call is False:\n            #### pause all ANNarchy monitors because currently paused will be set to False\n            self.pause()\n\n        ### initialize timings\n        timings = {}\n        for key, val in self.mon_dict.items():\n            _, compartment, _ = self._unpack_mon_dict_keys(key)\n            timings[compartment] = {\"currently_paused\": True, \"start\": [], \"stop\": []}\n        self.timings = timings\n\n        ### initialize recordings and recording_times etc.\n        self.recordings = []\n        self.recording_times = []\n        self.already_got_recordings = False\n        self.already_got_recording_times = False\n        self.get_recordings_reset_call = False\n\n    @check_types()\n    def start(self, compartment_list: list | None = None):\n        \"\"\"\n        Start or resume recording of all recorded compartments in compartment_list.\n\n        Args:\n            compartment_list (list, optional):\n                List with compartment names to start or resume recording. Default: None,\n                i.e., all compartments of initialized mon_dict are started or resumed.\n        \"\"\"\n        if compartment_list == None:\n            mon_dict_key_list = list(self.mon_dict.keys())\n            compartment_list = [\n                self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n            ]\n\n        self.timings = self._start_monitors(compartment_list, self.mon, self.timings)\n\n    @check_types()\n    def pause(self, compartment_list: list | None = None):\n        \"\"\"\n        Pause recording of all recorded compartments in compartment_list.\n\n        Args:\n            compartment_list (list, optional):\n                List with compartment names to pause recording. Default: None,\n                i.e., all compartments of initialized mon_dict are paused.\n        \"\"\"\n        if compartment_list == None:\n            mon_dict_key_list = list(self.mon_dict.keys())\n            compartment_list = [\n                self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n            ]\n\n        self.timings = self._pause_monitors(compartment_list, self.mon, self.timings)\n\n    def reset(\n        self,\n        populations=True,\n        projections=False,\n        synapses=False,\n        monitors=True,\n        model=True,\n        parameters=True,\n        net_id=0,\n    ):\n        \"\"\"\n        Create a new recording chunk by getting recordings and recording times of the\n        current chunk and optionally resetting the model. Recordings are automatically\n        resumed in the new chunk if they are not paused.\n\n        Args:\n            populations (bool, optional):\n                If True, reset populations. Default: True.\n            projections (bool, optional):\n                If True, reset projections. Default: False.\n            synapses (bool, optional):\n                If True, reset synapses. Default: False.\n            monitors (bool, optional):\n                If True, reset ANNarchy monitors. Default: True.\n            model (bool, optional):\n                If True, reset model. Default: True.\n            parameters (bool, optional):\n                If True, reset the parameters of popilations and projections. Default:\n                True.\n            net_id (int, optional):\n                Id of the network to reset. Default: 0.\n        \"\"\"\n        ### TODO rename this function to new_chunk() or something like that and let\n        ### recordings and recording times be returned\n        self.get_recordings_reset_call = True\n        self.get_recordings()\n        self.get_recording_times()\n        self.get_recordings_reset_call = False\n        self.already_got_recordings = (\n            False  # after reset one can still update recordings\n        )\n        self.already_got_recording_times = (\n            False  # after reset one can still update recording_times\n        )\n\n        ### reset timings, after reset, add a zero to start if the monitor is still\n        ### running (this is not resetted by reset())\n        ### if the model was not resetted --> do add current time instead of zero\n        for key in self.timings.keys():\n            self.timings[key][\"start\"] = []\n            self.timings[key][\"stop\"] = []\n            if self.timings[key][\"currently_paused\"] == False:\n                if model:\n                    self.timings[key][\"start\"].append(0)\n                else:\n                    self.timings[key][\"start\"].append(\n                        np.round(get_time(), af.get_number_of_decimals(dt()))\n                    )\n\n        ### reset model\n        if model:\n            if parameters is False:\n                ### if parameters=False, get parameters before reset and set them after\n                ### reset\n                parameters_dict = mf._get_all_parameters()\n            reset(populations, projections, synapses, monitors, net_id=net_id)\n            if parameters is False:\n                ### if parameters=False, set parameters after reset\n                mf._set_all_parameters(parameters_dict)\n\n    def current_chunk(self):\n        \"\"\"\n        Get the index of the current chunk.\n\n        Returns:\n            current_chunk_idx (int):\n                Index of the current chunk. If no recordings are currently active,\n                returns None.\n        \"\"\"\n        ### if recordings are currently active --> return chunk in which these recordings will be saved\n        ### check if there are currently active recordings\n        active_recordings = False\n        for key, val in self.mon_dict.items():\n            _, compartment, _ = self._unpack_mon_dict_keys(key)\n            if not (self.timings[compartment][\"currently_paused\"]):\n                ### tere are currently active recordings\n                active_recordings = True\n\n        if active_recordings:\n            current_chunk_idx = len(self.recordings)\n            return current_chunk_idx\n        else:\n            ### if currently no recordings are active return None\n            return None\n\n    def get_recordings(self) -> list[dict]:\n        \"\"\"\n        Get recordings of all recorded compartments.\n\n        Returns:\n            recordings (list):\n                List with recordings of all chunks.\n        \"\"\"\n        ### only if recordings in current chunk and get_recodings was not already called add current chunk to recordings\n        if (\n            self._any_recordings_in_current_chunk()\n            and self.already_got_recordings is False\n        ):\n            ### update recordings\n            self.recordings.append(self._get_monitors(self.mon_dict, self.mon))\n            ### upade already_got_recordings --> it will not update recordings again\n            self.already_got_recordings = True\n\n            if not (self.get_recordings_reset_call):\n                if len(self.recordings) == 0:\n                    print(\n                        \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                    )\n            return self.recordings\n        else:\n            if not (self.get_recordings_reset_call):\n                if len(self.recordings) == 0:\n                    print(\n                        \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                    )\n            return self.recordings\n\n    def get_recording_times(self):\n        \"\"\"\n        Get recording times of all recorded compartments.\n\n        Returns:\n            recording_times (recording_times_cl):\n                Object with recording times of all chunks.\n        \"\"\"\n\n        temp_timings = self._get_temp_timings()\n\n        ### only append temp_timings of current chunk if there are recordings in current chunk at all and if get_recordings was not already called (double call would add the same chunk again)\n        if (\n            self._any_recordings_in_current_chunk()\n            and self.already_got_recording_times is False\n        ):\n            self.recording_times.append(temp_timings)\n\n        ### upade already_got_recording_times --> it will not update recording_times again\n        self.already_got_recording_times = True\n\n        ### generate a object from recording_times and return this instead of the dict\n        recording_times_ob = RecordingTimes(self.recording_times)\n\n        if not (self.get_recordings_reset_call):\n            if len(self.recording_times) == 0:\n                print(\n                    \"WARNING get_recording_times: no recordings available, empty list returned. Maybe forgot start()?\"\n                )\n        return recording_times_ob\n\n    def get_recordings_and_clear(self):\n        \"\"\"\n        The default get_recordings method should be called at the end of the simulation.\n        The get_recordings_and_clear method allows to get several times recordings with\n        the same monitor object and to simulate between the calls. Sets the internal\n        variables back to their initial state. Usefull if you repeat a simulation +\n        recording several times and you do not want to always create new chunks.\n\n        !!! warning\n            If you want to continue recording after calling this method, you have to\n            call start() again.\n\n        Returns:\n            recordings (list):\n                List with recordings of all chunks.\n            recording_times (recording_times_cl):\n                Object with recording times of all chunks.\n        \"\"\"\n        ret0 = self.get_recordings()\n        ret1 = self.get_recording_times()\n        self._init_internals()\n        ret = (ret0, ret1)\n        return ret\n\n    def _correct_start_stop(self, start_time_arr, stop_time_arr, period):\n        \"\"\"\n        Corrects the start and stop times of recordings to the actual start and stop\n        times of recorded values.\n\n        Args:\n            start_time_arr (np.array):\n                Array with start times of recordings, obtained with get_time() function\n                of ANNarchy.\n            stop_time_arr (np.array):\n                Array with stop times of recordings, obtained with get_time() function\n                of ANNarchy.\n            period (float):\n                Time difference between recording values specified by the user.\n\n        Returns:\n            actual_start_time (np.array):\n                Array with actual start times of recorded values.\n            actual_stop_time (np.array):\n                Array with actual stop times of recorded values.\n            nr_rec_vals (np.array):\n                Array with number of recorded values between start and stop.\n        \"\"\"\n        # actual_period = int(period / dt()) * dt()\n        actual_start_time = np.ceil(start_time_arr / period) * period\n\n        actual_stop_time = np.ceil(stop_time_arr / period - 1) * period\n\n        nr_rec_vals = 1 + (actual_stop_time - actual_start_time) / period\n\n        return (actual_start_time, actual_stop_time, nr_rec_vals)\n\n    def _get_temp_timings(self):\n        \"\"\"\n        Generates a timings dictionary with time lims and idx lims for each compartment.\n        Calculates the idx lims of the recordings based on the time lims.\n\n        Returns:\n            temp_timings (dict):\n                Dict with time lims and idx lims for each compartment.\n        \"\"\"\n        temp_timings = {}\n        for key in self.mon_dict.keys():\n            _, compartment, period = self._unpack_mon_dict_keys(key)\n            if len(self.timings[compartment][\"start\"]) > len(\n                self.timings[compartment][\"stop\"]\n            ):\n                ### was started/resumed but never stoped after --> use current time for stop time\n                self.timings[compartment][\"stop\"].append(get_time())\n            ### calculate the idx of the recorded arrays which correspond to the timings and remove 'currently_paused'\n            ### get for each start-stop pair the corrected start stop timings (when teh values were actually recorded, depends on period and timestep)\n            ### and also get the number of recorded values for start-stop pair\n            start_time_arr = np.array(self.timings[compartment][\"start\"])\n            stop_time_arr = np.array(self.timings[compartment][\"stop\"])\n            (\n                start_time_arr,\n                stop_time_arr,\n                nr_rec_vals_arr,\n            ) = self._correct_start_stop(start_time_arr, stop_time_arr, period)\n\n            ### with the number of recorded values -> get start and end idx for each start-stop pair\n            start_idx = [\n                np.sum(nr_rec_vals_arr[0:i]).astype(int)\n                for i in range(nr_rec_vals_arr.size)\n            ]\n            stop_idx = [\n                np.sum(nr_rec_vals_arr[0 : i + 1]).astype(int) - 1\n                for i in range(nr_rec_vals_arr.size)\n            ]\n\n            ### return start-stop pair info in timings format\n            temp_timings[compartment] = {\n                \"start\": {\n                    \"ms\": np.round(\n                        start_time_arr, af.get_number_of_decimals(dt())\n                    ).tolist(),\n                    \"idx\": start_idx,\n                },\n                \"stop\": {\n                    \"ms\": np.round(\n                        stop_time_arr, af.get_number_of_decimals(dt())\n                    ).tolist(),\n                    \"idx\": stop_idx,\n                },\n            }\n        return temp_timings\n\n    def _any_recordings_in_current_chunk(self):\n        \"\"\"\n        Check if there are any recordings in the current chunk.\n\n        Returns:\n            any_recordings (bool):\n                True if there are any recordings in the current chunk, False otherwise.\n        \"\"\"\n        temp_timings = self._get_temp_timings()\n\n        ### generate a temp object of temp timings to check if there were recordings at all\n        recording_times_ob_temp = RecordingTimes([temp_timings])\n        return recording_times_ob_temp._any_recordings(chunk=0)\n\n    def _add_monitors(self, mon_dict: dict):\n        \"\"\"\n        Generate monitors defined by mon_dict.\n\n        Args:\n            mon_dict (dict):\n                dict with key=\"compartment_name;period\" where period is optional and\n                val=list with variables to record.\n\n        Returns:\n            mon (dict):\n                dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n        \"\"\"\n        mon = {}\n        for key, val in mon_dict.items():\n            compartmentType, compartment, period = self._unpack_mon_dict_keys(\n                key, warning=True\n            )\n            ### check if compartment is pop\n            if compartmentType == \"pop\":\n                mon[compartment] = Monitor(\n                    get_population(compartment), val, start=False, period=period\n                )\n            ### check if compartment is proj\n            if compartmentType == \"proj\":\n                mon[compartment] = Monitor(\n                    get_projection(compartment), val, start=False, period=period\n                )\n        return mon\n\n    def _start_monitors(self, compartment_list, mon, timings=None):\n        \"\"\"\n        Starts or resumes monitores defined by compartment_list.\n\n        Args:\n            compartment_list (list):\n                List with compartment names to start or resume recording.\n            mon (dict):\n                Dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n            timings (dict, optional):\n                timings variable of the CompNeuroMonitors object. Default: None.\n\n        Returns:\n            timings (dict):\n                timings variable of the CompNeuroMonitors object.\n        \"\"\"\n        ### for each compartment generate started variable (because compartments can ocure multiple times if multiple variables of them are recorded --> do not start same monitor multiple times)\n        started = {}\n        for compartment_name in compartment_list:\n            started[compartment_name] = False\n\n        if timings == None:\n            ### information about pauses not available, just start\n            for compartment_name in compartment_list:\n                if started[compartment_name] == False:\n                    mon[compartment_name].start()\n                    print(\"start\", compartment_name)\n                    started[compartment_name] = True\n            return None\n        else:\n            ### information about pauses available, start if not paused, resume if paused\n            for compartment_name in compartment_list:\n                if started[compartment_name] == False:\n                    if timings[compartment_name][\"currently_paused\"]:\n                        if len(timings[compartment_name][\"start\"]) > 0:\n                            ### resume\n                            mon[compartment_name].resume()\n                        else:\n                            ### initial start\n                            mon[compartment_name].start()\n                    started[compartment_name] = True\n                    ### update currently_paused\n                    timings[compartment_name][\"currently_paused\"] = False\n                    ### never make start longer than stop+1!... this can be caused if start is called multiple times without pause in between\n                    if len(timings[compartment_name][\"start\"]) <= len(\n                        timings[compartment_name][\"stop\"]\n                    ):\n                        timings[compartment_name][\"start\"].append(get_time())\n            return timings\n\n    def _pause_monitors(self, compartment_list, mon, timings=None):\n        \"\"\"\n        Pause monitores defined by compartment_list.\n\n        Args:\n            compartment_list (list):\n                List with compartment names to pause recording.\n            mon (dict):\n                Dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n            timings (dict, optional):\n                timings variable of the CompNeuroMonitors object. Default: None.\n\n        Returns:\n            timings (dict):\n                timings variable of the CompNeuroMonitors object.\n        \"\"\"\n        ### for each compartment generate paused variable (because compartments can ocure multiple times if multiple variables of them are recorded --> do not pause same monitor multiple times)\n        paused = {}\n        for compartment_name in compartment_list:\n            paused[compartment_name] = False\n\n        for compartment_name in compartment_list:\n            if paused[compartment_name] == False:\n                mon[compartment_name].pause()\n                paused[compartment_name] = True\n\n        if timings != None:\n            ### information about pauses is available, update it\n            for key, val in paused.items():\n                timings[key][\"currently_paused\"] = True\n                ### never make pause longer than start, this can be caused if pause is called multiple times without start in between\n                if len(timings[key][\"stop\"]) < len(timings[key][\"start\"]):\n                    timings[key][\"stop\"].append(get_time())\n                ### if pause is directly called after start --> start == stop --> remove these entries, this is no actual period\n                if (\n                    len(timings[key][\"stop\"]) == len(timings[key][\"start\"])\n                    and timings[key][\"stop\"][-1] == timings[key][\"start\"][-1]\n                ):\n                    timings[key][\"stop\"] = timings[key][\"stop\"][:-1]\n                    timings[key][\"start\"] = timings[key][\"start\"][:-1]\n            return timings\n        else:\n            return None\n\n    def _get_monitors(self, mon_dict, mon):\n        \"\"\"\n        Get recorded values from ANNarchy monitors defined by mon_dict.\n\n        Args:\n            mon_dict (dict):\n                dict with key=\"compartment_name;period\" where period is optional and\n                val=list with variables to record.\n            mon (dict):\n                Dict with key=\"pop_name\" for populations and key=\"proj_name\" for\n                projections and val=ANNarchy monitor object.\n\n        Returns:\n            recordings (dict):\n                Dict with key=\"compartment_name;variable\" and val=list with recorded\n                values.\n        \"\"\"\n        recordings = {}\n        for key, val in mon_dict.items():\n            compartment_type, compartment, period = self._unpack_mon_dict_keys(key)\n            recordings[f\"{compartment};period\"] = period\n            if compartment_type == \"pop\":\n                pop = get_population(compartment)\n                parameter_dict = {\n                    param_name: getattr(pop, param_name)\n                    for param_name in pop.parameters\n                }\n                recordings[f\"{compartment};parameter_dict\"] = parameter_dict\n            if compartment_type == \"proj\":\n                proj = get_projection(compartment)\n                parameter_dict = {\n                    param_name: getattr(proj, param_name)\n                    for param_name in proj.parameters\n                }\n                recordings[f\"{compartment};parameters\"] = parameter_dict\n            for val_val in val:\n                temp = mon[compartment].get(val_val)\n                recordings[f\"{compartment};{val_val}\"] = temp\n        recordings[\"dt\"] = dt()\n        return recordings\n\n    def _unpack_mon_dict_keys(self, s: str, warning: bool = False):\n        \"\"\"\n        Unpacks a string of the form \"compartment_name;period\" or\n        \"compartment_name\" into its components. If period is not provided\n        it is set to dt() for populations and dt()*1000 for projections.\n\n        Args:\n            s (str):\n                String to be unpacked\n            warning (bool, optional):\n                If True, print warning if period is not provided for projections.\n\n        Returns:\n            compartment_type (str):\n                Compartment type\n            compartment_name (str):\n                Compartment name\n            period (float):\n                Period of the compartment\n        \"\"\"\n        ### split string\n        splitted_s = s.split(\";\")\n\n        ### get name\n        compartment_name = splitted_s[0]\n\n        ### get type\n        pop_list = [pop.name for pop in populations()]\n        proj_list = [proj.name for proj in projections()]\n        if compartment_name in pop_list and compartment_name in proj_list:\n            ### raise error because name is in both lists\n            print(\n                \"ERROR CompNeuroMonitors._unpack_mon_dict_keys(): compartment_name is both populaiton and projection\"\n            )\n            quit()\n        elif compartment_name in pop_list:\n            compartment_type = \"pop\"\n        elif compartment_name in proj_list:\n            compartment_type = \"proj\"\n\n        ### get period\n        if len(splitted_s) == 2:\n            period = float(splitted_s[1])\n        else:\n            period = {\"pop\": dt(), \"proj\": dt() * 1000}[compartment_type]\n            ### print warning for compartment_type proj\n            if compartment_type == \"proj\" and warning:\n                print(\n                    f\"WARNING CompNeuroMonitors: no period provided for projection {compartment_name}, period set to {period} ms\"\n                )\n        period = round(period / dt()) * dt()\n\n        return compartment_type, compartment_name, period\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.__init__","title":"__init__(mon_dict={})","text":"

Initialize CompNeuroMonitors object by creating ANNarchy monitors.

Parameters:

Name Type Description Default mon_dict dict

dict with key=\"compartment_name;period\" where period is optional and val=list with variables to record.

{} Source code in CompNeuroPy/monitors.py
def __init__(self, mon_dict={}):\n    \"\"\"\n    Initialize CompNeuroMonitors object by creating ANNarchy monitors.\n\n    Args:\n        mon_dict (dict):\n            dict with key=\"compartment_name;period\" where period is optional and\n            val=list with variables to record.\n    \"\"\"\n    self.mon = self._add_monitors(mon_dict)\n    self.mon_dict = mon_dict\n    self._init_internals(init_call=True)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.start","title":"start(compartment_list=None)","text":"

Start or resume recording of all recorded compartments in compartment_list.

Parameters:

Name Type Description Default compartment_list list

List with compartment names to start or resume recording. Default: None, i.e., all compartments of initialized mon_dict are started or resumed.

None Source code in CompNeuroPy/monitors.py
@check_types()\ndef start(self, compartment_list: list | None = None):\n    \"\"\"\n    Start or resume recording of all recorded compartments in compartment_list.\n\n    Args:\n        compartment_list (list, optional):\n            List with compartment names to start or resume recording. Default: None,\n            i.e., all compartments of initialized mon_dict are started or resumed.\n    \"\"\"\n    if compartment_list == None:\n        mon_dict_key_list = list(self.mon_dict.keys())\n        compartment_list = [\n            self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n        ]\n\n    self.timings = self._start_monitors(compartment_list, self.mon, self.timings)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.pause","title":"pause(compartment_list=None)","text":"

Pause recording of all recorded compartments in compartment_list.

Parameters:

Name Type Description Default compartment_list list

List with compartment names to pause recording. Default: None, i.e., all compartments of initialized mon_dict are paused.

None Source code in CompNeuroPy/monitors.py
@check_types()\ndef pause(self, compartment_list: list | None = None):\n    \"\"\"\n    Pause recording of all recorded compartments in compartment_list.\n\n    Args:\n        compartment_list (list, optional):\n            List with compartment names to pause recording. Default: None,\n            i.e., all compartments of initialized mon_dict are paused.\n    \"\"\"\n    if compartment_list == None:\n        mon_dict_key_list = list(self.mon_dict.keys())\n        compartment_list = [\n            self._unpack_mon_dict_keys(key)[1] for key in mon_dict_key_list\n        ]\n\n    self.timings = self._pause_monitors(compartment_list, self.mon, self.timings)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.reset","title":"reset(populations=True, projections=False, synapses=False, monitors=True, model=True, parameters=True, net_id=0)","text":"

Create a new recording chunk by getting recordings and recording times of the current chunk and optionally resetting the model. Recordings are automatically resumed in the new chunk if they are not paused.

Parameters:

Name Type Description Default populations bool

If True, reset populations. Default: True.

True projections bool

If True, reset projections. Default: False.

False synapses bool

If True, reset synapses. Default: False.

False monitors bool

If True, reset ANNarchy monitors. Default: True.

True model bool

If True, reset model. Default: True.

True parameters bool

If True, reset the parameters of popilations and projections. Default: True.

True net_id int

Id of the network to reset. Default: 0.

0 Source code in CompNeuroPy/monitors.py
def reset(\n    self,\n    populations=True,\n    projections=False,\n    synapses=False,\n    monitors=True,\n    model=True,\n    parameters=True,\n    net_id=0,\n):\n    \"\"\"\n    Create a new recording chunk by getting recordings and recording times of the\n    current chunk and optionally resetting the model. Recordings are automatically\n    resumed in the new chunk if they are not paused.\n\n    Args:\n        populations (bool, optional):\n            If True, reset populations. Default: True.\n        projections (bool, optional):\n            If True, reset projections. Default: False.\n        synapses (bool, optional):\n            If True, reset synapses. Default: False.\n        monitors (bool, optional):\n            If True, reset ANNarchy monitors. Default: True.\n        model (bool, optional):\n            If True, reset model. Default: True.\n        parameters (bool, optional):\n            If True, reset the parameters of popilations and projections. Default:\n            True.\n        net_id (int, optional):\n            Id of the network to reset. Default: 0.\n    \"\"\"\n    ### TODO rename this function to new_chunk() or something like that and let\n    ### recordings and recording times be returned\n    self.get_recordings_reset_call = True\n    self.get_recordings()\n    self.get_recording_times()\n    self.get_recordings_reset_call = False\n    self.already_got_recordings = (\n        False  # after reset one can still update recordings\n    )\n    self.already_got_recording_times = (\n        False  # after reset one can still update recording_times\n    )\n\n    ### reset timings, after reset, add a zero to start if the monitor is still\n    ### running (this is not resetted by reset())\n    ### if the model was not resetted --> do add current time instead of zero\n    for key in self.timings.keys():\n        self.timings[key][\"start\"] = []\n        self.timings[key][\"stop\"] = []\n        if self.timings[key][\"currently_paused\"] == False:\n            if model:\n                self.timings[key][\"start\"].append(0)\n            else:\n                self.timings[key][\"start\"].append(\n                    np.round(get_time(), af.get_number_of_decimals(dt()))\n                )\n\n    ### reset model\n    if model:\n        if parameters is False:\n            ### if parameters=False, get parameters before reset and set them after\n            ### reset\n            parameters_dict = mf._get_all_parameters()\n        reset(populations, projections, synapses, monitors, net_id=net_id)\n        if parameters is False:\n            ### if parameters=False, set parameters after reset\n            mf._set_all_parameters(parameters_dict)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.current_chunk","title":"current_chunk()","text":"

Get the index of the current chunk.

Returns:

Name Type Description current_chunk_idx int

Index of the current chunk. If no recordings are currently active, returns None.

Source code in CompNeuroPy/monitors.py
def current_chunk(self):\n    \"\"\"\n    Get the index of the current chunk.\n\n    Returns:\n        current_chunk_idx (int):\n            Index of the current chunk. If no recordings are currently active,\n            returns None.\n    \"\"\"\n    ### if recordings are currently active --> return chunk in which these recordings will be saved\n    ### check if there are currently active recordings\n    active_recordings = False\n    for key, val in self.mon_dict.items():\n        _, compartment, _ = self._unpack_mon_dict_keys(key)\n        if not (self.timings[compartment][\"currently_paused\"]):\n            ### tere are currently active recordings\n            active_recordings = True\n\n    if active_recordings:\n        current_chunk_idx = len(self.recordings)\n        return current_chunk_idx\n    else:\n        ### if currently no recordings are active return None\n        return None\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.get_recordings","title":"get_recordings()","text":"

Get recordings of all recorded compartments.

Returns:

Name Type Description recordings list

List with recordings of all chunks.

Source code in CompNeuroPy/monitors.py
def get_recordings(self) -> list[dict]:\n    \"\"\"\n    Get recordings of all recorded compartments.\n\n    Returns:\n        recordings (list):\n            List with recordings of all chunks.\n    \"\"\"\n    ### only if recordings in current chunk and get_recodings was not already called add current chunk to recordings\n    if (\n        self._any_recordings_in_current_chunk()\n        and self.already_got_recordings is False\n    ):\n        ### update recordings\n        self.recordings.append(self._get_monitors(self.mon_dict, self.mon))\n        ### upade already_got_recordings --> it will not update recordings again\n        self.already_got_recordings = True\n\n        if not (self.get_recordings_reset_call):\n            if len(self.recordings) == 0:\n                print(\n                    \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                )\n        return self.recordings\n    else:\n        if not (self.get_recordings_reset_call):\n            if len(self.recordings) == 0:\n                print(\n                    \"WARNING get_recordings: no recordings available, empty list returned. Maybe forgot start()?\"\n                )\n        return self.recordings\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.get_recording_times","title":"get_recording_times()","text":"

Get recording times of all recorded compartments.

Returns:

Name Type Description recording_times recording_times_cl

Object with recording times of all chunks.

Source code in CompNeuroPy/monitors.py
def get_recording_times(self):\n    \"\"\"\n    Get recording times of all recorded compartments.\n\n    Returns:\n        recording_times (recording_times_cl):\n            Object with recording times of all chunks.\n    \"\"\"\n\n    temp_timings = self._get_temp_timings()\n\n    ### only append temp_timings of current chunk if there are recordings in current chunk at all and if get_recordings was not already called (double call would add the same chunk again)\n    if (\n        self._any_recordings_in_current_chunk()\n        and self.already_got_recording_times is False\n    ):\n        self.recording_times.append(temp_timings)\n\n    ### upade already_got_recording_times --> it will not update recording_times again\n    self.already_got_recording_times = True\n\n    ### generate a object from recording_times and return this instead of the dict\n    recording_times_ob = RecordingTimes(self.recording_times)\n\n    if not (self.get_recordings_reset_call):\n        if len(self.recording_times) == 0:\n            print(\n                \"WARNING get_recording_times: no recordings available, empty list returned. Maybe forgot start()?\"\n            )\n    return recording_times_ob\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.CompNeuroMonitors.get_recordings_and_clear","title":"get_recordings_and_clear()","text":"

The default get_recordings method should be called at the end of the simulation. The get_recordings_and_clear method allows to get several times recordings with the same monitor object and to simulate between the calls. Sets the internal variables back to their initial state. Usefull if you repeat a simulation + recording several times and you do not want to always create new chunks.

Warning

If you want to continue recording after calling this method, you have to call start() again.

Returns:

Name Type Description recordings list

List with recordings of all chunks.

recording_times recording_times_cl

Object with recording times of all chunks.

Source code in CompNeuroPy/monitors.py
def get_recordings_and_clear(self):\n    \"\"\"\n    The default get_recordings method should be called at the end of the simulation.\n    The get_recordings_and_clear method allows to get several times recordings with\n    the same monitor object and to simulate between the calls. Sets the internal\n    variables back to their initial state. Usefull if you repeat a simulation +\n    recording several times and you do not want to always create new chunks.\n\n    !!! warning\n        If you want to continue recording after calling this method, you have to\n        call start() again.\n\n    Returns:\n        recordings (list):\n            List with recordings of all chunks.\n        recording_times (recording_times_cl):\n            Object with recording times of all chunks.\n    \"\"\"\n    ret0 = self.get_recordings()\n    ret1 = self.get_recording_times()\n    self._init_internals()\n    ret = (ret0, ret1)\n    return ret\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes","title":"CompNeuroPy.monitors.RecordingTimes","text":"Source code in CompNeuroPy/monitors.py
class RecordingTimes:\n    def __init__(self, recording_times_list):\n        \"\"\"\n        Initialize RecordingTimes object.\n\n        Args:\n            recording_times_list (list):\n                List with recording times of all chunks.\n        \"\"\"\n        self.recording_times_list = recording_times_list\n\n    def time_lims(\n        self,\n        chunk: int | None = None,\n        compartment: str | None = None,\n        period: int | None = None,\n    ):\n        \"\"\"\n        Get the time limits recordings of of a specified chunk/model compartment in ms.\n\n        chunk (int, optional):\n            Index of the chunk. Default: None, i.e., first chunk.\n        compartment (str, optional):\n            Name of the compartment. Default: None, i.e., first model compartment from\n            monitor.\n        period (int, optional):\n            Index of the period. Default: None, i.e., all periods.\n\n        Returns:\n            lims (tuple):\n                Tuple with start and stop time of the specified chunk/model compartment.\n        \"\"\"\n        assert (\n            len(self.recording_times_list) > 0\n        ), \"ERROR time_lims(): No recordings/recording_times available.\"\n        return self._lims(\"ms\", chunk, compartment, period)\n\n    def idx_lims(\n        self,\n        chunk: int | None = None,\n        compartment: str | None = None,\n        period: int | None = None,\n    ):\n        \"\"\"\n        Get the index limits of recordings of a specified chunk/model compartment.\n\n        chunk (int, optional):\n            Index of the chunk. Default: None, i.e., first chunk.\n        compartment (str, optional):\n            Name of the compartment. Default: None, i.e., first model compartment from\n            monitor.\n        period (int, optional):\n            Index of the period. Default: None, i.e., all periods.\n\n        Returns:\n            lims (tuple):\n                Tuple with start and stop index of the specified chunk/model\n                compartment.\n        \"\"\"\n        assert (\n            len(self.recording_times_list) > 0\n        ), \"ERROR idx_lims(): No recordings/recording_times available.\"\n        return self._lims(\"idx\", chunk, compartment, period)\n\n    def all(self):\n        \"\"\"\n        Get the recording times of all chunks, compartments, periods in ms and index.\n\n        Returns:\n            recording_times_list (list):\n                List with recording times of all chunks.\n        \"\"\"\n        return self.recording_times_list\n\n    def nr_periods(self, chunk=None, compartment=None):\n        \"\"\"\n        Get the number of recording periods (start-pause) of a specified chunk/model\n        compartment.\n\n        Args:\n            chunk (int, optional):\n                Index of the chunk. Default: None, i.e., first chunk.\n            compartment (str, optional):\n                Name of the compartment. Default: None, i.e., first model compartment\n                from monitor.\n\n        Returns:\n            nr_periods (int):\n                Number of recording periods (start-pause) of a specified chunk/model\n                compartment.\n        \"\"\"\n        chunk = self._check_chunk(chunk)\n        compartment = self.__check_compartment__(compartment, chunk)\n        return self._get_nr_periods(chunk, compartment)\n\n    def combine_chunks(\n        self, recordings: list, recording_data_str: str, mode=\"sequential\"\n    ):\n        \"\"\"\n        Combines the data of all chunks of recordings, only possible if no pauses in\n        between.\n\n        Args:\n            recordings (list):\n                List with recordings of all chunks.\n            recording_data_str (str):\n                String specifying the compartment name and the variable to combine.\n                Format: \"compartment_name;variable_name\"\n            mode (str, optional):\n                How should the time array be generated. Can be \"sequential\" or\n                \"consecutive\". Default: \"sequential\".\n                - \"sequential\": each chunk starts at zero e.g.: [0,100] + [0,250] -->\n                    [0, 1, ..., 100, 0, 1, ..., 250]\n                - \"consecutive\": each chunk starts at the last stop time of the previous\n                    chunk e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 101, 102, ..., 350]\n\n        Returns:\n            time_arr (np.array):\n                Array with time values in ms.\n            data_arr (np.array):\n                Array with the recorded variable.\n        \"\"\"\n        assert (\n            len(self.recording_times_list) > 0\n        ), \"ERROR combine_chunks(): No recordings/recording_times available.\"\n\n        compartment = recording_data_str.split(\";\")[0]\n        period_time = recordings[0][f\"{compartment};period\"]\n        time_step = recordings[0][\"dt\"]\n        nr_chunks = self._get_nr_chunks()\n        data_list = []\n        time_list = []\n        pre_chunk_start_time = 0\n\n        for chunk in range(nr_chunks):\n            ### append data list with data of all periods of this chunk\n            data_list.append(recordings[chunk][recording_data_str])\n\n            ### nr of periods in this chunk\n            nr_periods = self._get_nr_periods(chunk, compartment)\n\n            ### start time of chunk depends on mode\n            if mode == \"sequential\":\n                chunk_start_time = 0\n            elif mode == \"consecutive\":\n                if chunk == 0:\n                    chunk_start_time = 0\n                else:\n                    last_stop_time = self.recording_times_list[chunk - 1][compartment][\n                        \"stop\"\n                    ][\"ms\"][-1]\n                    chunk_start_time = (\n                        pre_chunk_start_time + last_stop_time + period_time\n                    )\n                    pre_chunk_start_time = chunk_start_time\n            else:\n                print(\"ERROR recording_times.combine_data, Wrong mode.\")\n                quit()\n\n            ### append the time list with all times of the periods\n            for period in range(nr_periods):\n                start_time = (\n                    self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                        0\n                    ]\n                    + chunk_start_time\n                )\n                end_time = (\n                    self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                        1\n                    ]\n                    + chunk_start_time\n                )\n                start_time = round(start_time, af.get_number_of_decimals(time_step))\n                end_time = round(end_time, af.get_number_of_decimals(time_step))\n                times = np.arange(start_time, end_time + period_time, period_time)\n                time_list.append(times)\n\n        ### flatten the two lists\n        data_arr = np.concatenate(data_list, 0)\n        time_arr = np.concatenate(time_list, 0)\n\n        ### check if there are gaps in the time array\n        ### fill them with the corersponding times and\n        ### the data array with nan values\n        time_arr, data_arr = af.time_data_add_nan(\n            time_arr,\n            data_arr,\n            fill_time_step=period_time,\n        )\n\n        return time_arr, data_arr\n\n    def _lims(self, string, chunk=None, compartment=None, period=None):\n        \"\"\"\n        Get the limits of recordings of a specified chunk/model compartment.\n\n        Args:\n            string (str):\n                String specifying the type of limits to return. Can be \"ms\" for time\n                limits in ms or \"idx\" for index limits.\n            chunk (int, optional):\n                Index of the chunk. Default: None, i.e., first chunk.\n            compartment (str, optional):\n                Name of the compartment. Default: None, i.e., first model compartment\n                from monitor.\n            period (int, optional):\n                Index of the period. Default: None, i.e., all periods.\n\n        Returns:\n            lims (tuple):\n                Tuple with start and stop time/index of the specified chunk/model\n                compartment.\n        \"\"\"\n\n        chunk = self._check_chunk(chunk)\n        compartment = self.__check_compartment__(compartment, chunk)\n        period_0, period_1 = self._check_period(period, chunk, compartment)\n        lims = (\n            self.recording_times_list[chunk][compartment][\"start\"][string][period_0],\n            self.recording_times_list[chunk][compartment][\"stop\"][string][period_1],\n        )\n        return lims\n\n    def __check_compartment__(self, compartment, chunk):\n        if compartment == None:\n            ### by default just use the first compartment\n            compartment = list(self.recording_times_list[chunk].keys())[0]\n        elif compartment in list(self.recording_times_list[chunk].keys()):\n            compartment = compartment\n        else:\n            print(\n                'ERROR recording_times, given compartment \"'\n                + str(compartment)\n                + '\" not available'\n            )\n            quit()\n\n        return compartment\n\n    def _check_period(self, period, chunk, compartment):\n        \"\"\"\n        Check if period is given.\n\n        Args:\n            period (int, optional):\n                Index of the period. Default: None, i.e., all periods.\n            chunk (int):\n                Index of the chunk.\n            compartment (str):\n                Name of the compartment.\n\n        Returns:\n            period_0 (int):\n                Index of the first period.\n            period_1 (int):\n                Index of the last period. If perios is given, period_0 == period_1.\n        \"\"\"\n        if period == None:\n            ### by default use all periods\n            period_0 = 0\n            period_1 = (\n                len(self.recording_times_list[chunk][compartment][\"start\"][\"idx\"]) - 1\n            )\n        elif period < len(\n            self.recording_times_list[chunk][compartment][\"start\"][\"idx\"]\n        ):\n            period_0 = period\n            period_1 = period\n        else:\n            print(\"ERROR recording_times, given period not available\")\n            quit()\n\n        return period_0, period_1\n\n    def _check_chunk(self, chunk):\n        \"\"\"\n        Check if chunk is given.\n\n        Args:\n            chunk (int, optional):\n                Index of the chunk. Default: None, i.e., first chunk.\n\n        Returns:\n            chunk (int):\n                Index of the chunk.\n        \"\"\"\n        if chunk is None:\n            ### by default use first chunk\n            chunk = 0\n        elif chunk < self._get_nr_chunks():\n            chunk = chunk\n        else:\n            print(\"ERROR recording_times, given chunk not available\")\n            quit()\n\n        return chunk\n\n    def _get_nr_chunks(self):\n        \"\"\"\n        Get the number of chunks of the recordings.\n\n        Returns:\n            nr_chunks (int):\n                Number of chunks.\n        \"\"\"\n        return len(self.recording_times_list)\n\n    def _get_nr_periods(self, chunk, compartment):\n        \"\"\"\n        Get the number of recording periods (start-pause) of a specified chunk/model\n        compartment.\n\n        Args:\n            chunk (int):\n                Index of the chunk.\n            compartment (str):\n                Name of the compartment.\n\n        Returns:\n            nr_periods (int):\n                Number of recording periods (start-pause) of a specified chunk/model\n                compartment.\n        \"\"\"\n        return len(self.recording_times_list[chunk][compartment][\"start\"][\"idx\"])\n\n    def _any_recordings(self, chunk):\n        \"\"\"\n        Check all periods and compartments if there are any recordings.\n\n        Args:\n            chunk (int):\n                Index of the chunk.\n\n        Returns:\n            found_recordings (bool):\n                True if there are any recordings in the chunk, False otherwise.\n        \"\"\"\n        compartment_list = list(self.recording_times_list[chunk].keys())\n        found_recordings = False\n        for compartment in compartment_list:\n            nr_periods_of_compartment = len(\n                self.recording_times_list[chunk][compartment][\"start\"][\"idx\"]\n            )\n\n            for period_idx in range(nr_periods_of_compartment):\n                idx_lims = self.idx_lims(\n                    chunk=chunk, compartment=compartment, period=period_idx\n                )\n                if np.diff(idx_lims)[0] > 0:\n                    found_recordings = True\n\n        return found_recordings\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.__init__","title":"__init__(recording_times_list)","text":"

Initialize RecordingTimes object.

Parameters:

Name Type Description Default recording_times_list list

List with recording times of all chunks.

required Source code in CompNeuroPy/monitors.py
def __init__(self, recording_times_list):\n    \"\"\"\n    Initialize RecordingTimes object.\n\n    Args:\n        recording_times_list (list):\n            List with recording times of all chunks.\n    \"\"\"\n    self.recording_times_list = recording_times_list\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.time_lims","title":"time_lims(chunk=None, compartment=None, period=None)","text":"

Get the time limits recordings of of a specified chunk/model compartment in ms.

chunk (int, optional): Index of the chunk. Default: None, i.e., first chunk. compartment (str, optional): Name of the compartment. Default: None, i.e., first model compartment from monitor. period (int, optional): Index of the period. Default: None, i.e., all periods.

Returns:

Name Type Description lims tuple

Tuple with start and stop time of the specified chunk/model compartment.

Source code in CompNeuroPy/monitors.py
def time_lims(\n    self,\n    chunk: int | None = None,\n    compartment: str | None = None,\n    period: int | None = None,\n):\n    \"\"\"\n    Get the time limits recordings of of a specified chunk/model compartment in ms.\n\n    chunk (int, optional):\n        Index of the chunk. Default: None, i.e., first chunk.\n    compartment (str, optional):\n        Name of the compartment. Default: None, i.e., first model compartment from\n        monitor.\n    period (int, optional):\n        Index of the period. Default: None, i.e., all periods.\n\n    Returns:\n        lims (tuple):\n            Tuple with start and stop time of the specified chunk/model compartment.\n    \"\"\"\n    assert (\n        len(self.recording_times_list) > 0\n    ), \"ERROR time_lims(): No recordings/recording_times available.\"\n    return self._lims(\"ms\", chunk, compartment, period)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.idx_lims","title":"idx_lims(chunk=None, compartment=None, period=None)","text":"

Get the index limits of recordings of a specified chunk/model compartment.

chunk (int, optional): Index of the chunk. Default: None, i.e., first chunk. compartment (str, optional): Name of the compartment. Default: None, i.e., first model compartment from monitor. period (int, optional): Index of the period. Default: None, i.e., all periods.

Returns:

Name Type Description lims tuple

Tuple with start and stop index of the specified chunk/model compartment.

Source code in CompNeuroPy/monitors.py
def idx_lims(\n    self,\n    chunk: int | None = None,\n    compartment: str | None = None,\n    period: int | None = None,\n):\n    \"\"\"\n    Get the index limits of recordings of a specified chunk/model compartment.\n\n    chunk (int, optional):\n        Index of the chunk. Default: None, i.e., first chunk.\n    compartment (str, optional):\n        Name of the compartment. Default: None, i.e., first model compartment from\n        monitor.\n    period (int, optional):\n        Index of the period. Default: None, i.e., all periods.\n\n    Returns:\n        lims (tuple):\n            Tuple with start and stop index of the specified chunk/model\n            compartment.\n    \"\"\"\n    assert (\n        len(self.recording_times_list) > 0\n    ), \"ERROR idx_lims(): No recordings/recording_times available.\"\n    return self._lims(\"idx\", chunk, compartment, period)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.all","title":"all()","text":"

Get the recording times of all chunks, compartments, periods in ms and index.

Returns:

Name Type Description recording_times_list list

List with recording times of all chunks.

Source code in CompNeuroPy/monitors.py
def all(self):\n    \"\"\"\n    Get the recording times of all chunks, compartments, periods in ms and index.\n\n    Returns:\n        recording_times_list (list):\n            List with recording times of all chunks.\n    \"\"\"\n    return self.recording_times_list\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.nr_periods","title":"nr_periods(chunk=None, compartment=None)","text":"

Get the number of recording periods (start-pause) of a specified chunk/model compartment.

Parameters:

Name Type Description Default chunk int

Index of the chunk. Default: None, i.e., first chunk.

None compartment str

Name of the compartment. Default: None, i.e., first model compartment from monitor.

None

Returns:

Name Type Description nr_periods int

Number of recording periods (start-pause) of a specified chunk/model compartment.

Source code in CompNeuroPy/monitors.py
def nr_periods(self, chunk=None, compartment=None):\n    \"\"\"\n    Get the number of recording periods (start-pause) of a specified chunk/model\n    compartment.\n\n    Args:\n        chunk (int, optional):\n            Index of the chunk. Default: None, i.e., first chunk.\n        compartment (str, optional):\n            Name of the compartment. Default: None, i.e., first model compartment\n            from monitor.\n\n    Returns:\n        nr_periods (int):\n            Number of recording periods (start-pause) of a specified chunk/model\n            compartment.\n    \"\"\"\n    chunk = self._check_chunk(chunk)\n    compartment = self.__check_compartment__(compartment, chunk)\n    return self._get_nr_periods(chunk, compartment)\n
"},{"location":"main/monitors_recordings/#CompNeuroPy.monitors.RecordingTimes.combine_chunks","title":"combine_chunks(recordings, recording_data_str, mode='sequential')","text":"

Combines the data of all chunks of recordings, only possible if no pauses in between.

Parameters:

Name Type Description Default recordings list

List with recordings of all chunks.

required recording_data_str str

String specifying the compartment name and the variable to combine. Format: \"compartment_name;variable_name\"

required mode str

How should the time array be generated. Can be \"sequential\" or \"consecutive\". Default: \"sequential\". - \"sequential\": each chunk starts at zero e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 0, 1, ..., 250] - \"consecutive\": each chunk starts at the last stop time of the previous chunk e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 101, 102, ..., 350]

'sequential'

Returns:

Name Type Description time_arr array

Array with time values in ms.

data_arr array

Array with the recorded variable.

Source code in CompNeuroPy/monitors.py
def combine_chunks(\n    self, recordings: list, recording_data_str: str, mode=\"sequential\"\n):\n    \"\"\"\n    Combines the data of all chunks of recordings, only possible if no pauses in\n    between.\n\n    Args:\n        recordings (list):\n            List with recordings of all chunks.\n        recording_data_str (str):\n            String specifying the compartment name and the variable to combine.\n            Format: \"compartment_name;variable_name\"\n        mode (str, optional):\n            How should the time array be generated. Can be \"sequential\" or\n            \"consecutive\". Default: \"sequential\".\n            - \"sequential\": each chunk starts at zero e.g.: [0,100] + [0,250] -->\n                [0, 1, ..., 100, 0, 1, ..., 250]\n            - \"consecutive\": each chunk starts at the last stop time of the previous\n                chunk e.g.: [0,100] + [0,250] --> [0, 1, ..., 100, 101, 102, ..., 350]\n\n    Returns:\n        time_arr (np.array):\n            Array with time values in ms.\n        data_arr (np.array):\n            Array with the recorded variable.\n    \"\"\"\n    assert (\n        len(self.recording_times_list) > 0\n    ), \"ERROR combine_chunks(): No recordings/recording_times available.\"\n\n    compartment = recording_data_str.split(\";\")[0]\n    period_time = recordings[0][f\"{compartment};period\"]\n    time_step = recordings[0][\"dt\"]\n    nr_chunks = self._get_nr_chunks()\n    data_list = []\n    time_list = []\n    pre_chunk_start_time = 0\n\n    for chunk in range(nr_chunks):\n        ### append data list with data of all periods of this chunk\n        data_list.append(recordings[chunk][recording_data_str])\n\n        ### nr of periods in this chunk\n        nr_periods = self._get_nr_periods(chunk, compartment)\n\n        ### start time of chunk depends on mode\n        if mode == \"sequential\":\n            chunk_start_time = 0\n        elif mode == \"consecutive\":\n            if chunk == 0:\n                chunk_start_time = 0\n            else:\n                last_stop_time = self.recording_times_list[chunk - 1][compartment][\n                    \"stop\"\n                ][\"ms\"][-1]\n                chunk_start_time = (\n                    pre_chunk_start_time + last_stop_time + period_time\n                )\n                pre_chunk_start_time = chunk_start_time\n        else:\n            print(\"ERROR recording_times.combine_data, Wrong mode.\")\n            quit()\n\n        ### append the time list with all times of the periods\n        for period in range(nr_periods):\n            start_time = (\n                self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                    0\n                ]\n                + chunk_start_time\n            )\n            end_time = (\n                self.time_lims(chunk=chunk, compartment=compartment, period=period)[\n                    1\n                ]\n                + chunk_start_time\n            )\n            start_time = round(start_time, af.get_number_of_decimals(time_step))\n            end_time = round(end_time, af.get_number_of_decimals(time_step))\n            times = np.arange(start_time, end_time + period_time, period_time)\n            time_list.append(times)\n\n    ### flatten the two lists\n    data_arr = np.concatenate(data_list, 0)\n    time_arr = np.concatenate(time_list, 0)\n\n    ### check if there are gaps in the time array\n    ### fill them with the corersponding times and\n    ### the data array with nan values\n    time_arr, data_arr = af.time_data_add_nan(\n        time_arr,\n        data_arr,\n        fill_time_step=period_time,\n    )\n\n    return time_arr, data_arr\n
"},{"location":"main/optimize_neuron/","title":"Optimize a neuron model","text":""},{"location":"main/optimize_neuron/#introduction","title":"Introduction","text":"

CompNeuroPy provides the OptNeuron class which can be used to define your optimization of an ANNarchy neuron model (tuning the parameters). You can either optimize your neuron model to some data or try to reproduce the dynamics of a different neuron model (for example to reduce a more complex model). In both cases, you have to define the experiment which generates the data of interest with your neuron model.

Warning

OptNeuron has to be imported from \"CompNeuroPy.opt_neuron\" and you have to install torch, sbi and hyperopt (e.g. pip install torch sbi hyperopt)

Used optimization methods:

  • hyperopt

    Bergstra, J., Yamins, D., Cox, D. D. (2013) Making a Science of Model Search: Hyperparameter Optimization in Hundreds of Dimensions for Vision Architectures. TProc. of the 30th International Conference on Machine Learning (ICML 2013), June 2013, pp. I-115 to I-23.

  • sbi

    Tejero-Cantero et al., (2020). sbi: A toolkit for simulation-based inference. Journal of Open Source Software, 5(52), 2505, https://doi.org/10.21105/joss.02505

"},{"location":"main/optimize_neuron/#example","title":"Example:","text":"
opt = OptNeuron(\n    experiment=my_exp,\n    get_loss_function=get_loss,\n    variables_bounds=variables_bounds,\n    results_soll=experimental_data[\"results_soll\"],\n    time_step=experimental_data[\"time_step\"],\n    compile_folder_name=\"annarchy_opt_neuron_example\",\n    neuron_model=my_neuron,\n    method=\"hyperopt\",\n    record=[\"r\"],\n)\n

A full example is available in the Examples.

"},{"location":"main/optimize_neuron/#run-the-optimization","title":"Run the optimization","text":"

To run the optimization simply call the run() function of the OptNeuron object.

"},{"location":"main/optimize_neuron/#define-the-experiment","title":"Define the experiment","text":"

You have to define a CompNeuroExp object containing a run() function. In the run() function simulations and recordings are performed.

Warning

While defining the CompNeuroExp run() function for the optimization with OptNeuron you must observe the following rules:

  • the run() function has to take a single argument (besides self) which contains the name of the population consiting of a single neuron of the optimized neuron model (you can use this to access the population)
  • call self.reset(parameters=False) at the beginning of the run function, thus the neuron will be in its compile state (except the paramters) at the beginning of each simulation run
  • always set parameters=False while calling the self.reset() function (otherwise the parameter optimization will not work)
  • besides the optimized parameters and the loss, the results of the experiment (using the optimized parameters) will be available after the optimization, you can store any additional data in the self.data attribute
"},{"location":"main/optimize_neuron/#example_1","title":"Example:","text":"
class my_exp(CompNeuroExp):\n    \"\"\"\n    Define an experiment by inheriting from CompNeuroExp.\n\n    CompNeuroExp provides the attributes:\n\n        monitors (CompNeuroMonitors):\n            a CompNeuroMonitors object to do recordings, define during init otherwise\n            None\n        data (dict):\n            a dictionary for storing any optional data\n\n    and the functions:\n        reset():\n            resets the model and monitors\n        results():\n            returns a results object\n    \"\"\"\n\n    def run(self, population_name):\n        \"\"\"\n        Do the simulations and recordings.\n\n        To use the CompNeuroExp class, you need to define a run function which\n        does the simulations and recordings. The run function should return the\n        results object which can be obtained by calling self.results().\n\n        For using the CompNeuroExp for OptNeuron, the run function should have\n        one argument which is the name of the population which is automatically created\n        by OptNeuron, containing a single neuron of the model which should be optimized.\n\n        Args:\n            population_name (str):\n                name of the population which contains a single neuron, this will be\n                automatically provided by opt_neuron\n\n        Returns:\n            results (CompNeuroExp._ResultsCl):\n                results object with attributes:\n                    recordings (list):\n                        list of recordings\n                    recording_times (recording_times_cl):\n                        recording times object\n                    mon_dict (dict):\n                        dict of recorded variables of the monitors\n                    data (dict):\n                        dict with optional data stored during the experiment\n        \"\"\"\n        ### For OptNeuron you have to reset the model and monitors at the beginning of\n        ### the run function! Do not reset the parameters, otherwise the optimization\n        ### will not work!\n        self.reset(parameters=False)\n\n        ### you have to start monitors within the run function, otherwise nothing will\n        ### be recorded\n        self.monitors.start()\n\n        ### run the simulation, remember setting parameters=False in the reset function!\n        ...\n        simulate(100)\n        self.reset(parameters=False)\n        ...\n\n        ### optional: store anything you want in the data dict. For example infomration\n        ### about the simulations. This is not used for the optimization but can be\n        ### retrieved after the optimization is finished\n        self.data[\"sim\"] = sim_step.simulation_info()\n        self.data[\"population_name\"] = population_name\n        self.data[\"time_step\"] = dt()\n\n        ### return results, use the object's self.results()\n        return self.results()\n
"},{"location":"main/optimize_neuron/#the-get_loss_function","title":"The get_loss_function","text":"

The get_loss_function must have two arguments. When this function is called during optimization, the first argument is always the results object returned by the experiment, i.e. the results of the neuron you want to optimize. The second argument depends on whether you have specified results_soll, i.e. data to be reproduced by the neuron_model, or whether you have specified a target_neuron_model whose results are to be reproduced by the neuron_model. Thus, the second argument is either results_soll provided to the OptNeuron class during initialization or another results object (returned by the CompNeuroExp run function), generated with the target_neuron_model.

"},{"location":"main/optimize_neuron/#example_2","title":"Example:","text":"

In this example we assume, that results_soll was provided during initialization of the OptNeuron class (no target_neuron_model used).

def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll):\n    \"\"\"\n    Function which has to have the arguments results_ist and results_soll and should\n    calculates and return the loss. This structure is needed for the OptNeuron class.\n\n    Args:\n        results_ist (object):\n            the results object returned by the run function of experiment (see above)\n        results_soll (any):\n            the target data directly provided to OptNeuron during initialization\n\n    Returns:\n        loss (float or list of floats):\n            the loss\n    \"\"\"\n    ### get the recordings and other important things for calculating the loss from\n    ### results_ist, we do not use all available information here, but you could\n    rec_ist = results_ist.recordings\n    pop_ist = results_ist.data[\"population_name\"]\n    neuron = 0\n\n    ### get the data for calculating the loss from the results_soll\n    r_target_0 = results_soll[0]\n    r_target_1 = results_soll[1]\n\n    ### get the data for calculating the loss from the recordings\n    r_ist_0 = rec_ist[0][f\"{pop_ist};r\"][:, neuron]\n    r_ist_1 = rec_ist[1][f\"{pop_ist};r\"][:, neuron]\n\n    ### calculate the loss, e.g. the root mean squared error\n    rmse1 = rmse(r_target_0, r_ist_0)\n    rmse2 = rmse(r_target_1, r_ist_1)\n\n    ### return the loss, one can return a singel value or a list of values which will\n    ### be summed during the optimization\n    return [rmse1, rmse2]\n

"},{"location":"main/optimize_neuron/#CompNeuroPy.opt_neuron.OptNeuron","title":"CompNeuroPy.opt_neuron.OptNeuron","text":"

This class is used to optimize neuron models with ANNarchy.

Source code in CompNeuroPy/opt_neuron.py
class OptNeuron:\n    \"\"\"\n    This class is used to optimize neuron models with ANNarchy.\n    \"\"\"\n\n    opt_created = []\n\n    @check_types()\n    def __init__(\n        self,\n        experiment: Type[CompNeuroExp],\n        get_loss_function: Callable[[Any, Any], float | list[float]],\n        variables_bounds: dict[str, float | list[float]],\n        neuron_model: Neuron,\n        results_soll: Any | None = None,\n        target_neuron_model: Neuron | None = None,\n        time_step: float = 1.0,\n        compile_folder_name: str = \"annarchy_OptNeuron\",\n        num_rep_loss: int = 1,\n        method: str = \"hyperopt\",\n        prior=None,\n        fv_space: list = None,\n        record: list[str] = [],\n    ):\n        \"\"\"\n        This prepares the optimization. To run the optimization call the run function.\n\n        Args:\n            experiment (CompNeuroExp class):\n                CompNeuroExp class containing a 'run' function which defines the\n                simulations and recordings\n\n            get_loss_function (function):\n                function which takes results_ist and results_soll as arguments and\n                calculates/returns the loss\n\n            variables_bounds (dict):\n                Dictionary with parameter names (keys) and their bounds (values). If\n                single values are given as values, the parameter is constant, i.e., not\n                optimized. If a list is given as value, the parameter is optimized and\n                the list contains the lower and upper bound of the parameter (order is\n                not important).\n\n            neuron_model (ANNarchy Neuron):\n                The neuron model whose parameters should be optimized.\n\n            results_soll (Any, optional):\n                Some variable which contains the target data and can be used by the\n                get_loss_function (second argument of get_loss_function)\n                !!! warning\n                    Either provide results_soll or a target_neuron_model not both!\n                Default: None.\n\n            target_neuron_model (ANNarchy Neuron, optional):\n                The neuron model which produces the target data by running the\n                experiment.\n                !!! warning\n                    Either provide results_soll or a target_neuron_model not both!\n                Default: None.\n\n            time_step (float, optional):\n                The time step for the simulation in ms. Default: 1.\n\n            compile_folder_name (string, optional):\n                The name of the annarchy compilation folder within annarchy_folders/.\n                Default: 'annarchy_OptNeuron'.\n\n            num_rep_loss (int, optional):\n                Only interesting for noisy simulations/models. How often should the\n                simulaiton be run to calculate the loss (the defined number of losses\n                is obtained and averaged). Default: 1.\n\n            method (str, optional):\n                Either 'sbi' or 'hyperopt'. If 'sbi' is used, the optimization is\n                performed with sbi. If 'hyperopt' is used, the optimization is\n                performed with hyperopt. Default: 'hyperopt'.\n\n            prior (distribution, optional):\n                The prior distribution used by sbi. Default: None, i.e., uniform\n                distributions between the variable bounds are assumed.\n\n            fv_space (list, optional):\n                The search space for hyperopt. Default: None, i.e., uniform\n                distributions between the variable bounds are assumed.\n\n            record (list, optional):\n                List of strings which define what variables of the tuned neuron should\n                be recorded. Default: [].\n        \"\"\"\n\n        if len(self.opt_created) > 0:\n            print(\n                \"OptNeuron: Error: Already another OptNeuron created. Only create one per python session!\"\n            )\n            quit()\n        else:\n            print(\n                \"OptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\"\n            )\n\n            ### set object variables\n            self.opt_created.append(1)\n            self.record = record\n            self.results_soll = results_soll\n            self.variables_bounds = variables_bounds\n            self.fitting_variables_name_list = self._get_fitting_variables_name_list()\n            self.method = method\n            if method == \"hyperopt\":\n                if fv_space is None:\n                    self.fv_space = self._get_hyperopt_space()\n                else:\n                    self.fv_space = fv_space\n            self.const_params = self._get_const_params()\n            self.num_rep_loss = num_rep_loss\n            self.neuron_model = neuron_model\n            if method == \"sbi\":\n                self.prior = self._get_prior(prior)\n            self.target_neuron = target_neuron_model\n            self.compile_folder_name = compile_folder_name\n            self.__get_loss__ = get_loss_function\n\n            ### check target_neuron/results_soll\n            self._check_target()\n            ### check neuron models\n            self._check_neuron_models()\n\n            ### setup ANNarchy\n            setup(dt=time_step)\n\n            ### create and compile model\n            ### if neuron models and target neuron model --> create both models then\n            ### test, then clear and create only model for neuron model\n            model, target_model, monitors = self._generate_models()\n\n            self.pop = model.populations[0]\n            if target_model is not None:\n                self.pop_target = target_model.populations[0]\n            else:\n                self.pop_target = None\n            ### create experiment with current monitors\n            self.experiment = experiment(monitors=monitors)\n\n            ### check variables of model\n            self._test_variables()\n\n            ### check neuron models, experiment, get_loss\n            ### if results_soll is None -_> generate results_soll\n            self._check_get_loss_function()\n\n            ### after checking neuron models, experiment, get_loss\n            ### if two models exist --> clear ANNarchy and create/compile again only\n            ### standard model, thus recreate also monitors and experiment\n            clear()\n            model, _, monitors = self._generate_models()\n            self.monitors = monitors\n            self.experiment = experiment(monitors=monitors)\n\n    def _generate_models(self):\n        \"\"\"\n        Generates the tuned model and the target_model (only if results_soll is None).\n\n        Returns:\n            model (CompNeuroModel):\n                The model which is used for the optimization.\n\n            target_model (CompNeuroModel):\n                The model which is used to generate the target data. If results_soll is\n                provided, target_model is None.\n\n            monitors (CompNeuroMonitors):\n                The monitors which are used to record the data. If no variables are\n                recorded, monitors is None.\n        \"\"\"\n        with ef.suppress_stdout():\n            model = None\n            target_model = None\n            monitors = None\n            if self.results_soll is None:\n                ### create two models\n                model = CompNeuroModel(\n                    model_creation_function=self._raw_neuron,\n                    model_kwargs={\"neuron\": self.neuron_model, \"name\": \"model_neuron\"},\n                    name=\"standard_model\",\n                    do_create=True,\n                    do_compile=False,\n                    compile_folder_name=self.compile_folder_name,\n                )\n\n                target_model = CompNeuroModel(\n                    model_creation_function=self._raw_neuron,\n                    model_kwargs={\n                        \"neuron\": self.target_neuron,\n                        \"name\": \"target_model_neuron\",\n                    },\n                    name=\"target_model\",\n                    do_create=True,\n                    do_compile=True,\n                    compile_folder_name=self.compile_folder_name,\n                )\n\n                ### create monitors\n                if len(self.record) > 0:\n                    monitors = CompNeuroMonitors(\n                        {\n                            pop_name: self.record\n                            for pop_name in [\n                                model.populations[0],\n                                target_model.populations[0],\n                            ]\n                        }\n                    )\n\n            else:\n                ### create one model\n                model = CompNeuroModel(\n                    model_creation_function=self._raw_neuron,\n                    model_kwargs={\"neuron\": self.neuron_model, \"name\": \"model_neuron\"},\n                    name=\"single_model\",\n                    do_create=True,\n                    do_compile=True,\n                    compile_folder_name=self.compile_folder_name,\n                )\n                ### create monitors\n                if len(self.record) > 0:\n                    monitors = CompNeuroMonitors({model.populations[0]: self.record})\n\n        return model, target_model, monitors\n\n    def _check_neuron_models(self):\n        \"\"\"\n        Checks if the neuron models are ANNarchy neuron models.\n        \"\"\"\n        if not (isinstance(self.neuron_model, type(Neuron()))) or (\n            self.target_neuron is not None\n            and not (isinstance(self.target_neuron, type(Neuron())))\n        ):\n            print(\n                \"OptNeuron: Error: neuron_model and/or target_neuron_model have to be ANNarchy neuron models\"\n            )\n            quit()\n\n    def _check_target(self):\n        \"\"\"\n        Check if either results_soll or target_neuron are provided and not both.\n        \"\"\"\n        if self.target_neuron is None and self.results_soll is None:\n            print(\n                \"OptNeuron: Error: Either provide results_soll or target_neuron_model\"\n            )\n            quit()\n        elif self.target_neuron is not None and self.results_soll is not None:\n            print(\n                \"OptNeuron: Error: Either provide results_soll or target_neuron_model, not both\"\n            )\n            quit()\n\n    def _get_prior(self, prior):\n        \"\"\"\n        Get the prior distribution used by sbi. If no prior is given, uniform\n        distributions between the variable bounds are assumed. If a prior is given,\n        this prior is used.\n\n        Args:\n            prior (distribution, optional):\n                The prior distribution used by sbi. Default: None, i.e., uniform\n                distributions between the variable bounds are assumed.\n\n        Returns:\n            prior (distribution):\n                The prior distribution used by sbi.\n        \"\"\"\n        if prior is None:\n            prior_min = []\n            prior_max = []\n            for _, param_bounds in self.variables_bounds.items():\n                if isinstance(param_bounds, list):\n                    prior_min.append(param_bounds[0])\n                    prior_max.append(param_bounds[1])\n\n            return utils.BoxUniform(\n                low=torch.as_tensor(prior_min), high=torch.as_tensor(prior_max)\n            )\n        else:\n            return prior\n\n    def _get_fitting_variables_name_list(self):\n        \"\"\"\n        Returns a list with the names of the fitting variables.\n\n        Returns:\n            fitting_variables_name_list (list):\n                list with names of fitting variables\n        \"\"\"\n        name_list = []\n        for param_name, param_bounds in self.variables_bounds.items():\n            if isinstance(param_bounds, list):\n                name_list.append(param_name)\n        return name_list\n\n    def _get_hyperopt_space(self):\n        \"\"\"\n        Generates the hyperopt variable space from the fitting variable bounds. The\n        variable space is a uniform distribution between the bounds.\n\n        Returns:\n            fitting_variables_space (list):\n                list with hyperopt variables\n        \"\"\"\n        fitting_variables_space = []\n        for param_name, param_bounds in self.variables_bounds.items():\n            if isinstance(param_bounds, list):\n                fitting_variables_space.append(\n                    hp.uniform(param_name, min(param_bounds), max(param_bounds))\n                )\n        return fitting_variables_space\n\n    def _get_const_params(self):\n        \"\"\"\n        Returns:\n            const_params (dict):\n                Dictionary with constant variables. The keys are the parameter names\n                and the values are the parameter values.\n        \"\"\"\n        const_params = {}\n        for param_name, param_bounds in self.variables_bounds.items():\n            if not (isinstance(param_bounds, list)):\n                const_params[param_name] = param_bounds\n        return const_params\n\n    def _check_get_loss_function(self):\n        \"\"\"\n        Checks if the get_loss_function is compatible to the experiment and the neuron\n        model(s). To test, the experiment is run once with the tuned neuron model\n        (generating results_ist) and once with the target neuron model (if provided,\n        generating results_soll). Then, the get_loss_function is called with the\n        results_ist and results_soll.\n        \"\"\"\n        print(\"checking neuron_models, experiment, get_loss...\", end=\"\")\n\n        fitparams = []\n        for bounds in self.variables_bounds.values():\n            if isinstance(bounds, list):\n                fitparams.append(bounds[0])\n\n        if self.results_soll is not None:\n            ### only generate results_ist with standard neuron model\n            results_ist = self._run_simulator_with_results(fitparams)[\"results\"]\n        else:\n            ### run simulator with both populations (standard neuron model and target\n            ### neuron model) and generatate results_ist and results_soll\n            results_ist = self._run_simulator_with_results(fitparams)[\"results\"]\n            self.results_soll = self._run_simulator_with_results(\n                fitparams, pop=self.pop_target\n            )[\"results\"]\n\n        try:\n            self.__get_loss__(results_ist, self.results_soll)\n        except:\n            print(\n                \"\\nThe get_loss_function, experiment and neuron model(s) are not compatible:\\n\"\n            )\n            traceback.print_exc()\n            quit()\n        print(\"Done\\n\")\n\n    def _raw_neuron(self, neuron, name):\n        \"\"\"\n        Generates a population with one neuron of the given neuron model.\n\n        Args:\n            neuron (ANNarchy Neuron):\n                The neuron model.\n\n            name (str):\n                The name of the population.\n        \"\"\"\n        Population(1, neuron=neuron, name=name)\n\n    def _test_variables(self):\n        \"\"\"\n        Check if the tuned neuron model contains all parameters which are defined in\n        variables_bounds or even more.\n        \"\"\"\n        ### collect all names\n        all_vars_names = np.concatenate(\n            [\n                np.array(list(self.const_params.keys())),\n                np.array(self.fitting_variables_name_list),\n            ]\n        ).tolist()\n        ### check if pop has these parameters\n        pop_parameter_names = get_population(self.pop).attributes.copy()\n        for name in pop_parameter_names.copy():\n            if name in all_vars_names:\n                all_vars_names.remove(name)\n                pop_parameter_names.remove(name)\n        if len(pop_parameter_names) > 0:\n            print(\n                \"OptNeuron: WARNING: attributes\",\n                pop_parameter_names,\n                \"are not used/initialized.\",\n            )\n        if len(all_vars_names) > 0:\n            print(\n                \"OptNeuron: WARNING: The neuron_model does not contain parameters\",\n                all_vars_names,\n                \"!\",\n            )\n\n    def _run_simulator(self, fitparams):\n        \"\"\"\n        Runs the function simulator with the multiprocessing manager (if function is\n        called multiple times this saves memory, otherwise same as calling simulator\n        directly).\n\n        Args:\n            fitparams (list):\n                list with values for fitting parameters\n\n        Returns:\n            return_dict (dict):\n                dictionary needed for optimization with hyperopt, containing the loss,\n                the loss variance (in case of noisy models with multiple runs per loss\n                calculation), and the status (STATUS_OK for hyperopt).\n        \"\"\"\n\n        ### initialize manager and generate m_list = dictionary to save data\n        manager = multiprocessing.Manager()\n        m_list = manager.dict()\n\n        ### in case of noisy models, here optionally run multiple simulations, to mean the loss\n        lossAr = np.zeros(self.num_rep_loss)\n\n        return_results = False\n        for nr_run in range(self.num_rep_loss):\n            ### initialize for each run a new rng (--> not always have same noise in case of noisy models/simulations)\n            rng = np.random.default_rng()\n            ### run simulator with multiprocessign manager\n            proc = Process(\n                target=self._simulator, args=(fitparams, rng, m_list, return_results)\n            )\n            proc.start()\n            proc.join()\n            ### get simulation results/loss\n            lossAr[nr_run] = m_list[0]\n\n        ### calculate mean and std of loss\n        if self.num_rep_loss > 1:\n            loss = np.mean(lossAr)\n            std = np.std(lossAr)\n        else:\n            loss = lossAr[0]\n            std = None\n\n        ### return loss and other things for optimization\n        if self.num_rep_loss > 1:\n            return {\"status\": STATUS_OK, \"loss\": loss, \"loss_variance\": std}\n        else:\n            return {\"status\": STATUS_OK, \"loss\": loss}\n\n    def _sbi_simulation_wrapper(self, fitparams):\n        \"\"\"\n        This function is called by sbi. It calls the simulator function and\n        returns the loss and adjusts the format of the input parameters.\n\n        Args:\n            fitparams (tensor):\n                either a batch of parameters (tensor with two dimensions) or a single\n                parameter set\n\n        Returns:\n            loss (tensor):\n                loss as tensor for sbi inference\n        \"\"\"\n        fitparams = np.asarray(fitparams)\n        if len(fitparams.shape) == 2:\n            ### batch parameters!\n            data = []\n            for idx in range(fitparams.shape[0]):\n                data.append(self._run_simulator(fitparams[idx])[\"loss\"])\n        else:\n            ### single parameter set!\n            data = [self._run_simulator(fitparams)[\"loss\"]]\n\n        return torch.as_tensor(data)\n\n    def _run_simulator_with_results(self, fitparams, pop=None):\n        \"\"\"\n        Runs the function simulator with the multiprocessing manager (if function is\n        called multiple times this saves memory, otherwise same as calling simulator\n        directly) and also returns the results.\n\n        Args:\n            fitparams (list):\n                list with values for fitting parameters\n\n            pop (str, optional):\n                ANNarchy population name. Default: None, i.e., the tuned population\n                is used.\n\n        Returns:\n            return_dict (dict):\n                dictionary needed for optimization with hyperopt, containing the loss,\n                the loss variance (in case of noisy models with multiple runs per loss\n                calculation), and the status (STATUS_OK for hyperopt) and the results\n                generated by the experiment.\n        \"\"\"\n        ### check if pop is given\n        if pop is None:\n            pop = self.pop\n        ### initialize manager and generate m_list = dictionary to save data\n        manager = multiprocessing.Manager()\n        m_list = manager.dict()\n\n        ### in case of noisy models, here optionally run multiple simulations, to mean the loss\n        lossAr = np.zeros(self.num_rep_loss)\n        all_loss_list = []\n        return_results = True\n        for nr_run in range(self.num_rep_loss):\n            ### initialize for each run a new rng (--> not always have same noise in case of noisy models/simulations)\n            rng = np.random.default_rng()\n            ### run simulator with multiprocessign manager\n            proc = Process(\n                target=self._simulator,\n                args=(fitparams, rng, m_list, return_results, pop),\n            )\n            proc.start()\n            proc.join()\n            ### get simulation results/loss\n            lossAr[nr_run] = m_list[0]\n            results_ist = m_list[1]\n            all_loss_list.append(m_list[2])\n\n        all_loss_arr = np.array(all_loss_list)\n        ### calculate mean and std of loss\n        if self.num_rep_loss > 1:\n            loss = np.mean(lossAr)\n            std = np.std(lossAr)\n            all_loss = np.mean(all_loss_arr, 0)\n        else:\n            loss = lossAr[0]\n            std = None\n            all_loss = all_loss_arr[0]\n\n        ### return loss and other things for optimization and results\n        if self.num_rep_loss > 1:\n            return {\n                \"status\": STATUS_OK,\n                \"loss\": loss,\n                \"loss_variance\": std,\n                \"std\": std,\n                \"all_loss\": all_loss,\n                \"results\": results_ist,\n            }\n        else:\n            return {\n                \"status\": STATUS_OK,\n                \"loss\": loss,\n                \"std\": std,\n                \"all_loss\": all_loss,\n                \"results\": results_ist,\n            }\n\n    def _simulator(\n        self, fitparams, rng, m_list=[0, 0, 0], return_results=False, pop=None\n    ):\n        \"\"\"\n        Runs the experiment with the given parameters and 'returns' the loss and\n        optionally the results and all individual losses of the get_loss_function. The\n        'returned' values are saved in m_list.\n\n        Args:\n            fitparams (list):\n                list with values for fitting parameters\n\n            rng (numpy random generator):\n                random generator for the simulation\n\n            m_list (list, optional):\n                list with the loss, the results, and the all_loss. Default: [0, 0, 0].\n\n            return_results (bool, optional):\n                If True, the results are returned. Default: False.\n\n            pop (str, optional):\n                ANNarchy population name. Default: None, i.e., the tuned population\n                is used.\n        \"\"\"\n        ### TODO use rng here and add it to CompNeuroExp\n        ### check if pop is given\n        if pop is None:\n            pop = self.pop\n\n        ### set parameters which should not be optimized and parameters which should be\n        ### optimized before the experiment, they should not be resetted by the\n        ### experiment!\n        self._set_fitting_parameters(fitparams, pop=pop)\n\n        ### conduct loaded experiment\n        results = self.experiment.run(pop)\n\n        if self.results_soll is not None:\n            ### compute loss\n            all_loss = self.__get_loss__(results, self.results_soll)\n            if isinstance(all_loss, list) or isinstance(all_loss, type(np.zeros(1))):\n                loss = sum(all_loss)\n            else:\n                loss = all_loss\n        else:\n            all_loss = 999\n            loss = 999\n        ### \"return\" loss and other optional things\n        m_list[0] = loss\n        if return_results:\n            m_list[1] = results\n            m_list[2] = all_loss\n\n    def _set_fitting_parameters(\n        self,\n        fitparams,\n        pop=None,\n    ):\n        \"\"\"\n        Sets all given parameters for the population pop.\n\n        Args:\n            pop (str, optional):\n                ANNarchy population name. Default: None, i.e., the tuned population\n                is used.\n        \"\"\"\n        if pop is None:\n            pop = self.pop\n\n        ### get all variables dict (combine fitting variables and const variables)\n        all_variables_dict = self.const_params.copy()\n\n        for fitting_variable_idx, fitting_variable_name in enumerate(\n            self.fitting_variables_name_list\n        ):\n            all_variables_dict[fitting_variable_name] = fitparams[fitting_variable_idx]\n\n        ### evaluate variables defined by a str\n        for key, val in all_variables_dict.items():\n            if isinstance(val, str):\n                all_variables_dict[key] = ef.evaluate_expression_with_dict(\n                    val, all_variables_dict\n                )\n\n        ### only set parameters of the fitted neuron model (in case target neuron model is given)\n        if pop == self.pop:\n            ### set parameters\n            for param_name, param_val in all_variables_dict.items():\n                pop_parameter_names = get_population(pop).attributes\n                ### only if param_name in parameter attributes\n                if param_name in pop_parameter_names:\n                    setattr(\n                        get_population(pop),\n                        param_name,\n                        param_val,\n                    )\n\n    def _test_fit(self, fitparams_dict):\n        \"\"\"\n        Runs the experiment with the optimized parameters obtained with hyperopt and\n        returns the loss, the results and all individual losses of the\n        get_loss_function.\n\n        Args:\n            fitparams_dict (dict):\n                dictionary with parameter names (keys) and their values (values)\n\n        Returns:\n            fit (dict):\n                dictionary containing the loss, the loss variance (in case of noisy\n                models with multiple runs per loss calculation), and the status\n                (STATUS_OK for hyperopt) and the results generated by the experiment.\n        \"\"\"\n        return self._run_simulator_with_results(\n            [fitparams_dict[name] for name in self.fitting_variables_name_list]\n        )\n\n    def _run_with_sbi(self, max_evals, sbi_plot_file):\n        \"\"\"\n        Runs the optimization with sbi.\n\n        Args:\n            max_evals (int):\n                number of runs the optimization method performs\n\n            sbi_plot_file (str):\n                If you use \"sbi\": the name of the figure which will be saved and shows\n                the posterior.\n\n        Returns:\n            best (dict):\n                dictionary containing the optimized parameters and the posterior.\n        \"\"\"\n        ### get prior bounds\n        prior_min = []\n        prior_max = []\n        for _, param_bounds in self.variables_bounds.items():\n            if isinstance(param_bounds, list):\n                prior_min.append(param_bounds[0])\n                prior_max.append(param_bounds[1])\n\n        ### run sbi\n        simulator, prior = prepare_for_sbi(\n            self._sbi_simulation_wrapper,\n            self.prior,\n            {\n                \"lower_bound\": torch.as_tensor(prior_min),\n                \"upper_bound\": torch.as_tensor(prior_max),\n            },\n        )\n        inference = SNPE(prior, density_estimator=\"mdn\")\n        theta, x = simulate_for_sbi(\n            simulator=simulator,\n            proposal=prior,\n            num_simulations=max_evals,\n            num_workers=1,\n        )\n        density_estimator = inference.append_simulations(theta, x).train()\n        posterior = inference.build_posterior(density_estimator)\n        x_o = torch.as_tensor([0])  # data which should be obtained: loss==0\n        posterior = posterior.set_default_x(x_o)\n\n        ### get best params\n        posterior_samples = posterior.sample(\n            (10000,)\n        )  # posterior = distribution P(params|data) --> set data and then sample possible parameters\n        best_params = posterior_samples[\n            torch.argmax(posterior.log_prob(posterior_samples))\n        ].numpy()  # sampled parameters with highest prob in posterior\n\n        ### create best dict with best parameters\n        best = {}\n        for param_idx, param_name in enumerate(self.fitting_variables_name_list):\n            best[param_name] = best_params[param_idx]\n\n        ### also return posterior\n        best[\"posterior\"] = posterior\n\n        ### plot posterior\n        plot_limits = [\n            [prior_min[idx], prior_max[idx]] for idx in range(len(prior_max))\n        ]\n        analysis.pairplot(\n            posterior_samples,\n            limits=plot_limits,\n            ticks=plot_limits,\n            fig_size=(5, 5),\n            labels=self.fitting_variables_name_list,\n        )\n\n        ### save plot\n        sf.create_dir(\"/\".join(sbi_plot_file.split(\"/\")[:-1]))\n        plt.savefig(sbi_plot_file)\n\n        return best\n\n    @check_types()\n    def run(\n        self,\n        max_evals: int,\n        results_file_name: str = \"best\",\n        sbi_plot_file: str = \"posterior.svg\",\n    ):\n        \"\"\"\n        Runs the optimization.\n\n        Args:\n            max_evals (int):\n                number of runs the optimization method performs\n\n            results_file_name (str, optional):\n                name of the file which is saved. The file contains the optimized and\n                target results, the obtained parameters, the loss, and the SD of the\n                loss (in case of noisy models with multiple runs per loss calculation)\n                Default: \"best\".\n\n            sbi_plot_file (str, optional):\n                If you use \"sbi\": the name of the figure which will be saved and shows\n                the posterior. Default: \"posterior.svg\".\n\n        Returns:\n            best (dict):\n                dictionary containing the optimized parameters (as keys) and:\n\n                - \"loss\": the loss\n                - \"all_loss\": the individual losses of the get_loss_function\n                - \"std\": the SD of the loss (in case of noisy models with multiple\n                    runs per loss calculation)\n                - \"results\": the results generated by the experiment\n                - \"results_soll\": the target results\n        \"\"\"\n        if self.method == \"hyperopt\":\n            ### run optimization with hyperopt and return best dict\n            best = fmin(\n                fn=self._run_simulator,\n                space=self.fv_space,\n                algo=tpe.suggest,\n                max_evals=max_evals,\n            )\n        elif self.method == \"sbi\":\n            ### run optimization with sbi and return best dict\n            best = self._run_with_sbi(max_evals, sbi_plot_file)\n        else:\n            print(\"ERROR run; method should be 'hyperopt' or 'sbi'\")\n            quit()\n        fit = self._test_fit(best)\n        best[\"loss\"] = fit[\"loss\"]\n        if self.method == \"sbi\":\n            print(\"\\tbest loss:\", best[\"loss\"])\n        best[\"all_loss\"] = fit[\"all_loss\"]\n        best[\"std\"] = fit[\"std\"]\n        best[\"results\"] = fit[\"results\"]\n        best[\"results_soll\"] = self.results_soll\n        self.results = best\n\n        ### SAVE OPTIMIZED PARAMS AND LOSS\n        sf.save_variables([best], [results_file_name], \"parameter_fit\")\n\n        return best\n
"},{"location":"main/optimize_neuron/#CompNeuroPy.opt_neuron.OptNeuron.__init__","title":"__init__(experiment, get_loss_function, variables_bounds, neuron_model, results_soll=None, target_neuron_model=None, time_step=1.0, compile_folder_name='annarchy_OptNeuron', num_rep_loss=1, method='hyperopt', prior=None, fv_space=None, record=[])","text":"

This prepares the optimization. To run the optimization call the run function.

Parameters:

Name Type Description Default experiment CompNeuroExp class

CompNeuroExp class containing a 'run' function which defines the simulations and recordings

required get_loss_function function

function which takes results_ist and results_soll as arguments and calculates/returns the loss

required variables_bounds dict

Dictionary with parameter names (keys) and their bounds (values). If single values are given as values, the parameter is constant, i.e., not optimized. If a list is given as value, the parameter is optimized and the list contains the lower and upper bound of the parameter (order is not important).

required neuron_model ANNarchy Neuron

The neuron model whose parameters should be optimized.

required results_soll Any

Some variable which contains the target data and can be used by the get_loss_function (second argument of get_loss_function)

Warning

Either provide results_soll or a target_neuron_model not both!

Default: None.

None target_neuron_model ANNarchy Neuron

The neuron model which produces the target data by running the experiment.

Warning

Either provide results_soll or a target_neuron_model not both!

Default: None.

None time_step float

The time step for the simulation in ms. Default: 1.

1.0 compile_folder_name string

The name of the annarchy compilation folder within annarchy_folders/. Default: 'annarchy_OptNeuron'.

'annarchy_OptNeuron' num_rep_loss int

Only interesting for noisy simulations/models. How often should the simulaiton be run to calculate the loss (the defined number of losses is obtained and averaged). Default: 1.

1 method str

Either 'sbi' or 'hyperopt'. If 'sbi' is used, the optimization is performed with sbi. If 'hyperopt' is used, the optimization is performed with hyperopt. Default: 'hyperopt'.

'hyperopt' prior distribution

The prior distribution used by sbi. Default: None, i.e., uniform distributions between the variable bounds are assumed.

None fv_space list

The search space for hyperopt. Default: None, i.e., uniform distributions between the variable bounds are assumed.

None record list

List of strings which define what variables of the tuned neuron should be recorded. Default: [].

[] Source code in CompNeuroPy/opt_neuron.py
@check_types()\ndef __init__(\n    self,\n    experiment: Type[CompNeuroExp],\n    get_loss_function: Callable[[Any, Any], float | list[float]],\n    variables_bounds: dict[str, float | list[float]],\n    neuron_model: Neuron,\n    results_soll: Any | None = None,\n    target_neuron_model: Neuron | None = None,\n    time_step: float = 1.0,\n    compile_folder_name: str = \"annarchy_OptNeuron\",\n    num_rep_loss: int = 1,\n    method: str = \"hyperopt\",\n    prior=None,\n    fv_space: list = None,\n    record: list[str] = [],\n):\n    \"\"\"\n    This prepares the optimization. To run the optimization call the run function.\n\n    Args:\n        experiment (CompNeuroExp class):\n            CompNeuroExp class containing a 'run' function which defines the\n            simulations and recordings\n\n        get_loss_function (function):\n            function which takes results_ist and results_soll as arguments and\n            calculates/returns the loss\n\n        variables_bounds (dict):\n            Dictionary with parameter names (keys) and their bounds (values). If\n            single values are given as values, the parameter is constant, i.e., not\n            optimized. If a list is given as value, the parameter is optimized and\n            the list contains the lower and upper bound of the parameter (order is\n            not important).\n\n        neuron_model (ANNarchy Neuron):\n            The neuron model whose parameters should be optimized.\n\n        results_soll (Any, optional):\n            Some variable which contains the target data and can be used by the\n            get_loss_function (second argument of get_loss_function)\n            !!! warning\n                Either provide results_soll or a target_neuron_model not both!\n            Default: None.\n\n        target_neuron_model (ANNarchy Neuron, optional):\n            The neuron model which produces the target data by running the\n            experiment.\n            !!! warning\n                Either provide results_soll or a target_neuron_model not both!\n            Default: None.\n\n        time_step (float, optional):\n            The time step for the simulation in ms. Default: 1.\n\n        compile_folder_name (string, optional):\n            The name of the annarchy compilation folder within annarchy_folders/.\n            Default: 'annarchy_OptNeuron'.\n\n        num_rep_loss (int, optional):\n            Only interesting for noisy simulations/models. How often should the\n            simulaiton be run to calculate the loss (the defined number of losses\n            is obtained and averaged). Default: 1.\n\n        method (str, optional):\n            Either 'sbi' or 'hyperopt'. If 'sbi' is used, the optimization is\n            performed with sbi. If 'hyperopt' is used, the optimization is\n            performed with hyperopt. Default: 'hyperopt'.\n\n        prior (distribution, optional):\n            The prior distribution used by sbi. Default: None, i.e., uniform\n            distributions between the variable bounds are assumed.\n\n        fv_space (list, optional):\n            The search space for hyperopt. Default: None, i.e., uniform\n            distributions between the variable bounds are assumed.\n\n        record (list, optional):\n            List of strings which define what variables of the tuned neuron should\n            be recorded. Default: [].\n    \"\"\"\n\n    if len(self.opt_created) > 0:\n        print(\n            \"OptNeuron: Error: Already another OptNeuron created. Only create one per python session!\"\n        )\n        quit()\n    else:\n        print(\n            \"OptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before!\"\n        )\n\n        ### set object variables\n        self.opt_created.append(1)\n        self.record = record\n        self.results_soll = results_soll\n        self.variables_bounds = variables_bounds\n        self.fitting_variables_name_list = self._get_fitting_variables_name_list()\n        self.method = method\n        if method == \"hyperopt\":\n            if fv_space is None:\n                self.fv_space = self._get_hyperopt_space()\n            else:\n                self.fv_space = fv_space\n        self.const_params = self._get_const_params()\n        self.num_rep_loss = num_rep_loss\n        self.neuron_model = neuron_model\n        if method == \"sbi\":\n            self.prior = self._get_prior(prior)\n        self.target_neuron = target_neuron_model\n        self.compile_folder_name = compile_folder_name\n        self.__get_loss__ = get_loss_function\n\n        ### check target_neuron/results_soll\n        self._check_target()\n        ### check neuron models\n        self._check_neuron_models()\n\n        ### setup ANNarchy\n        setup(dt=time_step)\n\n        ### create and compile model\n        ### if neuron models and target neuron model --> create both models then\n        ### test, then clear and create only model for neuron model\n        model, target_model, monitors = self._generate_models()\n\n        self.pop = model.populations[0]\n        if target_model is not None:\n            self.pop_target = target_model.populations[0]\n        else:\n            self.pop_target = None\n        ### create experiment with current monitors\n        self.experiment = experiment(monitors=monitors)\n\n        ### check variables of model\n        self._test_variables()\n\n        ### check neuron models, experiment, get_loss\n        ### if results_soll is None -_> generate results_soll\n        self._check_get_loss_function()\n\n        ### after checking neuron models, experiment, get_loss\n        ### if two models exist --> clear ANNarchy and create/compile again only\n        ### standard model, thus recreate also monitors and experiment\n        clear()\n        model, _, monitors = self._generate_models()\n        self.monitors = monitors\n        self.experiment = experiment(monitors=monitors)\n
"},{"location":"main/optimize_neuron/#CompNeuroPy.opt_neuron.OptNeuron.run","title":"run(max_evals, results_file_name='best', sbi_plot_file='posterior.svg')","text":"

Runs the optimization.

Parameters:

Name Type Description Default max_evals int

number of runs the optimization method performs

required results_file_name str

name of the file which is saved. The file contains the optimized and target results, the obtained parameters, the loss, and the SD of the loss (in case of noisy models with multiple runs per loss calculation) Default: \"best\".

'best' sbi_plot_file str

If you use \"sbi\": the name of the figure which will be saved and shows the posterior. Default: \"posterior.svg\".

'posterior.svg'

Returns:

Name Type Description best dict

dictionary containing the optimized parameters (as keys) and:

  • \"loss\": the loss
  • \"all_loss\": the individual losses of the get_loss_function
  • \"std\": the SD of the loss (in case of noisy models with multiple runs per loss calculation)
  • \"results\": the results generated by the experiment
  • \"results_soll\": the target results
Source code in CompNeuroPy/opt_neuron.py
@check_types()\ndef run(\n    self,\n    max_evals: int,\n    results_file_name: str = \"best\",\n    sbi_plot_file: str = \"posterior.svg\",\n):\n    \"\"\"\n    Runs the optimization.\n\n    Args:\n        max_evals (int):\n            number of runs the optimization method performs\n\n        results_file_name (str, optional):\n            name of the file which is saved. The file contains the optimized and\n            target results, the obtained parameters, the loss, and the SD of the\n            loss (in case of noisy models with multiple runs per loss calculation)\n            Default: \"best\".\n\n        sbi_plot_file (str, optional):\n            If you use \"sbi\": the name of the figure which will be saved and shows\n            the posterior. Default: \"posterior.svg\".\n\n    Returns:\n        best (dict):\n            dictionary containing the optimized parameters (as keys) and:\n\n            - \"loss\": the loss\n            - \"all_loss\": the individual losses of the get_loss_function\n            - \"std\": the SD of the loss (in case of noisy models with multiple\n                runs per loss calculation)\n            - \"results\": the results generated by the experiment\n            - \"results_soll\": the target results\n    \"\"\"\n    if self.method == \"hyperopt\":\n        ### run optimization with hyperopt and return best dict\n        best = fmin(\n            fn=self._run_simulator,\n            space=self.fv_space,\n            algo=tpe.suggest,\n            max_evals=max_evals,\n        )\n    elif self.method == \"sbi\":\n        ### run optimization with sbi and return best dict\n        best = self._run_with_sbi(max_evals, sbi_plot_file)\n    else:\n        print(\"ERROR run; method should be 'hyperopt' or 'sbi'\")\n        quit()\n    fit = self._test_fit(best)\n    best[\"loss\"] = fit[\"loss\"]\n    if self.method == \"sbi\":\n        print(\"\\tbest loss:\", best[\"loss\"])\n    best[\"all_loss\"] = fit[\"all_loss\"]\n    best[\"std\"] = fit[\"std\"]\n    best[\"results\"] = fit[\"results\"]\n    best[\"results_soll\"] = self.results_soll\n    self.results = best\n\n    ### SAVE OPTIMIZED PARAMS AND LOSS\n    sf.save_variables([best], [results_file_name], \"parameter_fit\")\n\n    return best\n
"}]} \ No newline at end of file diff --git a/site/sitemap.xml.gz b/site/sitemap.xml.gz index 56ca1c86dae01de39d7ee0c61a6441418879c7e2..135302f6b38e558792641ebdaa8aae2c4cb7f586 100644 GIT binary patch delta 12 Tcmb=gXOr*d;P~h_k*yK{8Lb2k delta 12 Tcmb=gXOr*d;E;5l$W{pe6-fgt From 2e9f30e2235a72b8f3edac8eb7371533efddeffb Mon Sep 17 00:00:00 2001 From: olimaol Date: Mon, 15 Jan 2024 10:22:54 +0100 Subject: [PATCH 03/44] OptNeuron check types without warning --- src/CompNeuroPy/opt_neuron.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index b07a154..3002dbc 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -40,7 +40,7 @@ class OptNeuron: opt_created = [] - @check_types() + @check_types(warnings=False) def __init__( self, experiment: Type[CompNeuroExp], From 1c827f6adfa6f539ad1ef98574eb7a6b577eb298 Mon Sep 17 00:00:00 2001 From: olimaol Date: Mon, 15 Jan 2024 14:25:55 +0100 Subject: [PATCH 04/44] OptNeuron TODO --- src/CompNeuroPy/opt_neuron.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 3002dbc..25ff1b7 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -32,6 +32,9 @@ ) sys.exit() +### TODO extend with some evolutionary algorithms (e.g. DEAP) +### https://efel.readthedocs.io/en/latest/deap_optimisation.html + class OptNeuron: """ From eb34c46cedc41a15cde27fdc914f1e55ea6b681b Mon Sep 17 00:00:00 2001 From: olmai Date: Tue, 16 Jan 2024 17:47:45 +0100 Subject: [PATCH 05/44] OptNeuron: started deap implementation --- .../opt_neuron/run_opt_neuron_from_data.py | 5 +- src/CompNeuroPy/opt_neuron.py | 245 +++++++++++++++++- 2 files changed, 243 insertions(+), 7 deletions(-) diff --git a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py index e9a4eb8..6259b73 100644 --- a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py +++ b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py @@ -146,6 +146,9 @@ def run(self, population_name): ### Next, the OptNeuron class needs a function to calculate the loss. +# TODO add neuron_id to the arguments of the get_loss function, the fucntion has to be +# created that it works with multiple neurons in the population and the current +# neuron is selected by the neuron_id which is provided by OptNeuron def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): """ Function which has to have the arguments results_ist and results_soll and should @@ -202,7 +205,7 @@ def main(): results_soll=experimental_data["results_soll"], time_step=experimental_data["time_step"], compile_folder_name="annarchy_opt_neuron_example_from_data", - method="hyperopt", + method="deap", record=["r"], ) diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 25ff1b7..5992bb1 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -10,6 +10,7 @@ import sys from typing import Callable, Any, Type from typingchecker import check_types +import random # multiprocessing from multiprocessing import Process @@ -26,15 +27,27 @@ from sbi import analysis as analysis from sbi import utils as utils from sbi.inference import SNPE, prepare_for_sbi, simulate_for_sbi + + # efel + import efel + + # deap + import deap + import deap.gp + import deap.benchmarks + from deap import base + from deap import creator + from deap import tools + from deap import algorithms + from deap.tools import cxSimulatedBinaryBounded, mutPolynomialBounded + from deap.algorithms import varAnd + except: print( "OptNeuron: Error: You need to install hyperopt, torch, and sbi to use OptNeuron (e.g. use pip install hyperopt torch sbi))" ) sys.exit() -### TODO extend with some evolutionary algorithms (e.g. DEAP) -### https://efel.readthedocs.io/en/latest/deap_optimisation.html - class OptNeuron: """ @@ -155,7 +168,7 @@ def __init__( self.prior = self._get_prior(prior) self.target_neuron = target_neuron_model self.compile_folder_name = compile_folder_name - self.__get_loss__ = get_loss_function + self._get_loss = get_loss_function ### check target_neuron/results_soll self._check_target() @@ -392,7 +405,7 @@ def _check_get_loss_function(self): )["results"] try: - self.__get_loss__(results_ist, self.results_soll) + self._get_loss(results_ist, self.results_soll) except: print( "\nThe get_loss_function, experiment and neuron model(s) are not compatible:\n" @@ -522,6 +535,20 @@ def _sbi_simulation_wrapper(self, fitparams): return torch.as_tensor(data) + def _deap_simulation_wrapper(self, individual: list): + """ + Obtain the loss of the individual from the deap optimization. + + Args: + individual (list): + list with values for fitting parameters + + Returns: + loss (tuple): + loss as tuple for deap optimization + """ + return (self._run_simulator(individual)["loss"],) + def _run_simulator_with_results(self, fitparams, pop=None): """ Runs the function simulator with the multiprocessing manager (if function is @@ -639,7 +666,7 @@ def _simulator( if self.results_soll is not None: ### compute loss - all_loss = self.__get_loss__(results, self.results_soll) + all_loss = self._get_loss(results, self.results_soll) if isinstance(all_loss, list) or isinstance(all_loss, type(np.zeros(1))): loss = sum(all_loss) else: @@ -842,6 +869,8 @@ def run( elif self.method == "sbi": ### run optimization with sbi and return best dict best = self._run_with_sbi(max_evals, sbi_plot_file) + elif self.method == "deap": + best = self._run_with_deap() else: print("ERROR run; method should be 'hyperopt' or 'sbi'") quit() @@ -860,6 +889,210 @@ def run( return best + def _run_with_deap(self): + # general paramters + MATE_FUNCTION_KEY = "cxSimulatedBinaryBounded" + MUTATE_FUNCTION_KEY = "mutPolynomialBounded" + VARIATE_FUNCTION_KEY = "varOr" + POP_SIZE = 100 + # mate, mutate and variate parameters + ETA = 20.0 + INDPB = 0.1 + LAMBDA = POP_SIZE * 2 + REPRODPB = 0.1 + CXPB = 0.7 * (1 - REPRODPB) + MUTPB = 0.3 * (1 - REPRODPB) + TOURNSIZE = max(int(round(POP_SIZE * 0.01)), 2) + # derived parameters + LOWER = [ + min(self.variables_bounds[name]) + for name in self.fitting_variables_name_list + ] + UPPER = [ + max(self.variables_bounds[name]) + for name in self.fitting_variables_name_list + ] + MATE_FUNCTION = { + "cxOnePoint": tools.cxOnePoint, + "cxTwoPoint": tools.cxTwoPoint, + "cxUniform": tools.cxUniform, + "cxPartialyMatched": tools.cxPartialyMatched, + "cxUniformPartialyMatched": tools.cxUniformPartialyMatched, + "cxOrdered": tools.cxOrdered, + "cxBlend": tools.cxBlend, + "cxSimulatedBinary": tools.cxSimulatedBinary, + "cxSimulatedBinaryBounded": tools.cxSimulatedBinaryBounded, + "cxMessyOnePoint": tools.cxMessyOnePoint, + }[MATE_FUNCTION_KEY] + MUTATE_FUNCTION = { + "mutGaussian": tools.mutGaussian, + "mutPolynomialBounded": tools.mutPolynomialBounded, + }[MUTATE_FUNCTION_KEY] + VARIATE_FUNCTION = { + "varOr": algorithms.varOr, + "varAnd": algorithms.varAnd, + }[VARIATE_FUNCTION_KEY] + SELECT_FUNCTION_KEY = "selTournament" + SELECT_FUNCTION = { + "selTournament": tools.selTournament, + "selBest": tools.selBest, + "selNSGA2": tools.selNSGA2, + "selSPEA2": tools.selSPEA2, + }[SELECT_FUNCTION_KEY] + + # init deap toolbox + toolbox = base.Toolbox() + + # define what individuals are (numbers for the fitting parameters), how their + # fitness is weighted (negative to get fitness = negative loss), and how they + # are created initially (uniformly between the bounds) + creator.create("Fitness", base.Fitness, weights=(-1.0,)) + creator.create("Individual", list, fitness=creator.Fitness) + toolbox.register("uniformparams", self.init_uniform_deap) + toolbox.register( + "Individual", tools.initIterate, creator.Individual, toolbox.uniformparams + ) + toolbox.register("population", tools.initRepeat, list, toolbox.Individual) + + # register evaluate function which gets an individual and returns the loss + # (which will be multiplied by -1.0 to get the fitness) + toolbox.register("evaluate", self._deap_simulation_wrapper) + + # register functions for the genetic algorithm + # mate (mate two individuals to create two children) TODO generalize MATE_FUNCTION kwargs + # mutate (mutate two individuals to create two new ones) TODO generalize MUTATE_FUNCTION kwargs + # variate (how crossover, variate and reproduce are applied to create an + # offspring population from the current population) + # select (how individuals are selected from the offspring population to create + # the next generation population) + toolbox.register( + "mate", + MATE_FUNCTION, + **{"eta": ETA, "low": LOWER, "up": UPPER}, + ) + toolbox.register( + "mutate", + MUTATE_FUNCTION, + **{"eta": ETA, "low": LOWER, "up": UPPER, "indpb": INDPB}, + ) + toolbox.register( + "variate", + VARIATE_FUNCTION, + **{"toolbox": toolbox, "lambda_": LAMBDA, "cxpb": CXPB, "mutpb": MUTPB}, + ) + toolbox.register( + "select", SELECT_FUNCTION, {"k": POP_SIZE, "tournsize": TOURNSIZE} + ) + + # initialize population (initial variables for POP_SIZE individuals/neurons) + pop = toolbox.population(n=POP_SIZE) + + # register some statistics we want to print during the run of the algorithm + stats = tools.Statistics(lambda ind: ind.fitness.values) + stats.register("avg", np.mean) + stats.register("std", np.std) + stats.register("min", np.min) + stats.register("max", np.max) + + # run the algorithm + pop, logbook = self._evolutionary_algorithm() + + def _evolutionary_algorithm( + self, + population, + toolbox, + cxpb, + mutpb, + ngen, + stats=None, + halloffame=None, + verbose=__debug__, + ): + """This algorithm reproduce the simplest evolutionary algorithm as + presented in chapter 7 of [Back2000]_. + + :param population: A list of individuals. + :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution + operators. + :param cxpb: The probability of mating two individuals. + :param mutpb: The probability of mutating an individual. + :param ngen: The number of generation. + :param stats: A :class:`~deap.tools.Statistics` object that is updated + inplace, optional. + :param halloffame: A :class:`~deap.tools.HallOfFame` object that will + contain the best individuals, optional. + :param verbose: Whether or not to log the statistics. + :returns: The final population + :returns: A class:`~deap.tools.Logbook` with the statistics of the + evolution + + """ + # TODO + logbook = tools.Logbook() + logbook.header = ["gen", "nevals"] + (stats.fields if stats else []) + + # Evaluate the individuals with an invalid fitness + invalid_ind = [ind for ind in population if not ind.fitness.valid] + fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) + for ind, fit in zip(invalid_ind, fitnesses): + ind.fitness.values = fit + + if halloffame is not None: + halloffame.update(population) + + record = stats.compile(population) if stats else {} + logbook.record(gen=0, nevals=len(invalid_ind), **record) + if verbose: + print(logbook.stream) + + # Begin the generational process + for gen in range(1, ngen + 1): + # Select the next generation individuals + offspring = toolbox.select(population, len(population)) + + # Vary the pool of individuals + offspring = varAnd(offspring, toolbox, cxpb, mutpb) + + # Evaluate the individuals with an invalid fitness + invalid_ind = [ind for ind in offspring if not ind.fitness.valid] + fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) + for ind, fit in zip(invalid_ind, fitnesses): + ind.fitness.values = fit + + # Update the hall of fame with the generated individuals + if halloffame is not None: + halloffame.update(offspring) + + # Replace the current population by the offspring + population[:] = offspring + + # Append the current generation statistics to the logbook + record = stats.compile(population) if stats else {} + logbook.record(gen=gen, nevals=len(invalid_ind), **record) + if verbose: + print(logbook.stream) + + return population, logbook + + def init_uniform_deap(self): + """ + Returns: + uniform (list): + list with values for fitting parameters + """ + lower_list = [ + min(self.variables_bounds[name]) + for name in self.fitting_variables_name_list + ] + upper_list = [ + max(self.variables_bounds[name]) + for name in self.fitting_variables_name_list + ] + return [ + random.uniform(lower_list, upper_list) + for _ in range(len(self.fitting_variables_name_list)) + ] + ### old name for backward compatibility, TODO remove opt_neuron = OptNeuron From 657a7827c048a1ccbcd2729b009089129953d26e Mon Sep 17 00:00:00 2001 From: olmai Date: Wed, 17 Jan 2024 16:21:34 +0100 Subject: [PATCH 06/44] OptNeuron: implemented cma optimization --- .../opt_neuron/run_opt_neuron_from_data.py | 6 +- .../opt_neuron/run_opt_neuron_from_neuron.py | 18 +- src/CompNeuroPy/opt_neuron.py | 500 ++++++++++-------- 3 files changed, 303 insertions(+), 221 deletions(-) diff --git a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py index 6259b73..e5eaf5c 100644 --- a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py +++ b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py @@ -7,15 +7,15 @@ import numpy as np from ANNarchy import Neuron, dt - +# TODO update docs for OptNeuron and examples, warn that neuron sjhoudl not have :population and that the experiment should be conductable for a population consisting of multiple neurons ### in this example we want to fit an ANNarchy neuron model to some data (which ca be ### somehow obtained by simulating the neuron and recording variables) for this example, ### we have the following simple neuron model my_neuron = Neuron( parameters=""" I_app = 0 - a = 0 : population - b = 0 : population + a = 0 + b = 0 """, equations=""" r = a*I_app + b diff --git a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py index ab24d3c..c872509 100644 --- a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py +++ b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py @@ -20,15 +20,15 @@ complex_neuron = Neuron( parameters=""" I_app = 0 - f = 6.0542364610842572e-002 : population - e = 3.7144041714209490e+000 : population - d = -4.9446336126026436e-001 : population - c = 9.0909599124334911e-002 : population - b = -4.4497411506061648e-003 : population - a = -6.2239117460540167e-005 : population + m0 = 1 + m1 = 2 + m2 = 3 + n0 = 1 + n1 = 0 + n2 = -1 """, equations=""" - r = a*I_app**5 + b*I_app**4 + c*I_app**3 + d*I_app**2 + e*I_app**1 + f + r = m0*I_app + n0 + m1*I_app + n1 + m2*I_app + n2 """, ) @@ -91,14 +91,14 @@ def main(): target_neuron_model=complex_neuron, time_step=1, compile_folder_name="annarchy_opt_neuron_example_from_neuron", - method="hyperopt", + method="deap", record=["r"], ) ### run the optimization, define how often the experiment should be repeated fit = opt.run(max_evals=1000, results_file_name="best_from_neuron") - ### print optimized parameters, we should get around a=2.8 and b=0.28 + ### print optimized parameters, we should get around a=6 and b=0 print("a", fit["a"]) print("b", fit["b"]) print(list(fit.keys())) diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 5992bb1..46b0290 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -11,6 +11,8 @@ from typing import Callable, Any, Type from typingchecker import check_types import random +from tqdm import tqdm +from copy import deepcopy # multiprocessing from multiprocessing import Process @@ -41,6 +43,7 @@ from deap import algorithms from deap.tools import cxSimulatedBinaryBounded, mutPolynomialBounded from deap.algorithms import varAnd + from deap import cma except: print( @@ -170,6 +173,12 @@ def __init__( self.compile_folder_name = compile_folder_name self._get_loss = get_loss_function + ### prepare deap and get popsize + if method == "deap": + self.deap_dict, self.popsize = self._get_deap_dict() + else: + self.popsize = 1 + ### check target_neuron/results_soll self._check_target() ### check neuron models @@ -181,7 +190,7 @@ def __init__( ### create and compile model ### if neuron models and target neuron model --> create both models then ### test, then clear and create only model for neuron model - model, target_model, monitors = self._generate_models() + model, target_model, monitors = self._generate_models(self.popsize) self.pop = model.populations[0] if target_model is not None: @@ -199,17 +208,21 @@ def __init__( self._check_get_loss_function() ### after checking neuron models, experiment, get_loss - ### if two models exist --> clear ANNarchy and create/compile again only + ### clear ANNarchy and create/compile again only ### standard model, thus recreate also monitors and experiment clear() - model, _, monitors = self._generate_models() + model, _, monitors = self._generate_models(self.popsize) self.monitors = monitors self.experiment = experiment(monitors=monitors) - def _generate_models(self): + def _generate_models(self, popsize=1): """ Generates the tuned model and the target_model (only if results_soll is None). + Args: + popsize (int, optional): + The number of neurons in the population(s). Default: 1. + Returns: model (CompNeuroModel): The model which is used for the optimization. @@ -230,7 +243,11 @@ def _generate_models(self): ### create two models model = CompNeuroModel( model_creation_function=self._raw_neuron, - model_kwargs={"neuron": self.neuron_model, "name": "model_neuron"}, + model_kwargs={ + "neuron": self.neuron_model, + "name": "model_neuron", + "size": popsize, + }, name="standard_model", do_create=True, do_compile=False, @@ -242,6 +259,7 @@ def _generate_models(self): model_kwargs={ "neuron": self.target_neuron, "name": "target_model_neuron", + "size": 1, }, name="target_model", do_create=True, @@ -265,7 +283,11 @@ def _generate_models(self): ### create one model model = CompNeuroModel( model_creation_function=self._raw_neuron, - model_kwargs={"neuron": self.neuron_model, "name": "model_neuron"}, + model_kwargs={ + "neuron": self.neuron_model, + "name": "model_neuron", + "size": popsize, + }, name="single_model", do_create=True, do_compile=True, @@ -405,7 +427,7 @@ def _check_get_loss_function(self): )["results"] try: - self._get_loss(results_ist, self.results_soll) + self._wrapper_get_loss(results_ist, self.results_soll) except: print( "\nThe get_loss_function, experiment and neuron model(s) are not compatible:\n" @@ -414,18 +436,66 @@ def _check_get_loss_function(self): quit() print("Done\n") - def _raw_neuron(self, neuron, name): + def _wrapper_get_loss(self, results_ist, results_soll): + """ + TODO + """ + ### + all_loss_list = [] + for neuron_idx in range(self.popsize): + results_ist_neuron = self._get_results_of_single_neuron( + results_ist, neuron_idx + ) + all_loss_list.append(self._get_loss(results_ist_neuron, results_soll)) + + return all_loss_list + + def _get_results_of_single_neuron(self, results, neuron_idx): + """ + TODO + """ + if self.popsize == 1: + return results + + results_neuron = deepcopy(results) + + for chunk in range(len(results_neuron.recordings)): + for rec_key in results_neuron.recordings[chunk].keys(): + if "spike" in rec_key and not ("target" in rec_key): + results_neuron.recordings[chunk][rec_key] = { + 0: results_neuron.recordings[chunk][rec_key][neuron_idx] + } + elif not ( + "period" in rec_key + or "parameter_dict" in rec_key + or "dt" in rec_key + or "target" in rec_key + ): + # print(rec_key) + # print(results_neuron.recordings[chunk][rec_key].shape) + results_neuron.recordings[chunk][ + rec_key + ] = results_neuron.recordings[chunk][rec_key][ + :, neuron_idx + ].reshape( + -1, 1 + ) + + return results_neuron + + def _raw_neuron(self, neuron, name, size): """ Generates a population with one neuron of the given neuron model. Args: neuron (ANNarchy Neuron): The neuron model. - name (str): The name of the population. + size (int): + The number of neurons in the population. """ - Population(1, neuron=neuron, name=name) + Population(size, neuron=neuron, name=name) def _test_variables(self): """ @@ -480,7 +550,7 @@ def _run_simulator(self, fitparams): m_list = manager.dict() ### in case of noisy models, here optionally run multiple simulations, to mean the loss - lossAr = np.zeros(self.num_rep_loss) + loss_list_over_runs = [] return_results = False for nr_run in range(self.num_rep_loss): @@ -493,17 +563,28 @@ def _run_simulator(self, fitparams): proc.start() proc.join() ### get simulation results/loss - lossAr[nr_run] = m_list[0] + loss_list_from_simulator = m_list[0] + loss_list_over_runs.append(loss_list_from_simulator) + loss_arr = np.array(loss_list_over_runs) ### calculate mean and std of loss if self.num_rep_loss > 1: - loss = np.mean(lossAr) - std = np.std(lossAr) + ### multiple runs, mean over runs + loss_ret_arr = np.mean(loss_arr, 0) + std_ret_arr = np.std(loss_arr, 0) + else: + loss_ret_arr = loss_arr[0] + std_ret_arr = [None] * self.popsize + + if self.popsize == 1: + loss = loss_ret_arr[0] + std = std_ret_arr[0] else: - loss = lossAr[0] - std = None + loss = loss_ret_arr + std = std_ret_arr - ### return loss and other things for optimization + ### return loss and other things for optimization, if multiple neurons + ### --> loss and std are arrays with loss/std for each neuron if self.num_rep_loss > 1: return {"status": STATUS_OK, "loss": loss, "loss_variance": std} else: @@ -535,19 +616,16 @@ def _sbi_simulation_wrapper(self, fitparams): return torch.as_tensor(data) - def _deap_simulation_wrapper(self, individual: list): + def _deap_simulation_wrapper(self, population: list): """ - Obtain the loss of the individual from the deap optimization. - - Args: - individual (list): - list with values for fitting parameters - - Returns: - loss (tuple): - loss as tuple for deap optimization + TODO """ - return (self._run_simulator(individual)["loss"],) + ### transpose population list + populationT = np.array(population).T.tolist() + ### get loss list + loss_list = self._run_simulator(populationT)["loss"] + + return [(loss_list[neuron_idx],) for neuron_idx in range(len(population))] def _run_simulator_with_results(self, fitparams, pop=None): """ @@ -578,8 +656,8 @@ def _run_simulator_with_results(self, fitparams, pop=None): m_list = manager.dict() ### in case of noisy models, here optionally run multiple simulations, to mean the loss - lossAr = np.zeros(self.num_rep_loss) - all_loss_list = [] + loss_list_over_runs = [] + all_loss_list_over_runs = [] return_results = True for nr_run in range(self.num_rep_loss): ### initialize for each run a new rng (--> not always have same noise in case of noisy models/simulations) @@ -592,21 +670,31 @@ def _run_simulator_with_results(self, fitparams, pop=None): proc.start() proc.join() ### get simulation results/loss - lossAr[nr_run] = m_list[0] + loss_list_over_runs.append(m_list[0]) results_ist = m_list[1] - all_loss_list.append(m_list[2]) + all_loss_list_over_runs.append(m_list[2]) - all_loss_arr = np.array(all_loss_list) - ### calculate mean and std of loss + all_loss_arr = np.array(all_loss_list_over_runs) + loss_arr = np.array(loss_list_over_runs) + ### calculate mean and std of loss over runs if self.num_rep_loss > 1: - loss = np.mean(lossAr) - std = np.std(lossAr) + loss = np.mean(loss_arr, 0) + std = np.std(loss_arr) all_loss = np.mean(all_loss_arr, 0) else: - loss = lossAr[0] - std = None + loss = loss_arr[0] + std = [None] * self.popsize all_loss = all_loss_arr[0] + if self.popsize == 1: + loss = loss[0] + std = std[0] + all_loss = all_loss[0] + else: + loss = loss + std = std + all_loss = all_loss + ### return loss and other things for optimization and results if self.num_rep_loss > 1: return { @@ -665,64 +753,91 @@ def _simulator( results = self.experiment.run(pop) if self.results_soll is not None: - ### compute loss - all_loss = self._get_loss(results, self.results_soll) - if isinstance(all_loss, list) or isinstance(all_loss, type(np.zeros(1))): - loss = sum(all_loss) - else: - loss = all_loss + ### compute loss_list, loss for each neuron + loss_list = [] + ### wrapper_get_loss returns list (neurons) of lists (individual losses) + all_loss_list = self._wrapper_get_loss(results, self.results_soll) + for all_loss in all_loss_list: + if isinstance(all_loss, list) or isinstance( + all_loss, type(np.zeros(1)) + ): + loss_list.append(sum(all_loss)) + else: + loss_list.append(all_loss) else: - all_loss = 999 - loss = 999 + all_loss_list = [999] * self.popsize + loss_list = [999] * self.popsize ### "return" loss and other optional things - m_list[0] = loss + m_list[0] = loss_list if return_results: m_list[1] = results - m_list[2] = all_loss + m_list[2] = all_loss_list def _set_fitting_parameters( self, fitparams, - pop=None, + pop, ): """ Sets all given parameters for the population pop. Args: + fitparams (list): + list with values for fitting parameters, either a single list or a list + of lists (first dimensio is the number of parameters, second dimension + is the number of neurons) pop (str, optional): ANNarchy population name. Default: None, i.e., the tuned population is used. """ - if pop is None: - pop = self.pop + ### only set parameters of the fitted neuron model + if pop != self.pop: + return ### get all variables dict (combine fitting variables and const variables) all_variables_dict = self.const_params.copy() + ### multiply const params for number of neurons + for const_param_key, const_param_val in all_variables_dict.items(): + if not (isinstance(const_param_val, str)): + all_variables_dict[const_param_key] = [ + all_variables_dict[const_param_key] + ] * self.popsize + for fitting_variable_idx, fitting_variable_name in enumerate( self.fitting_variables_name_list ): - all_variables_dict[fitting_variable_name] = fitparams[fitting_variable_idx] + if not (isinstance(fitparams[fitting_variable_idx], list)): + add_params = [fitparams[fitting_variable_idx]] * self.popsize + else: + add_params = fitparams[fitting_variable_idx] + all_variables_dict[fitting_variable_name] = add_params ### evaluate variables defined by a str for key, val in all_variables_dict.items(): if isinstance(val, str): - all_variables_dict[key] = ef.evaluate_expression_with_dict( - val, all_variables_dict - ) - - ### only set parameters of the fitted neuron model (in case target neuron model is given) - if pop == self.pop: - ### set parameters - for param_name, param_val in all_variables_dict.items(): - pop_parameter_names = get_population(pop).attributes - ### only if param_name in parameter attributes - if param_name in pop_parameter_names: - setattr( - get_population(pop), - param_name, - param_val, + all_variables_dict[key] = [ + ef.evaluate_expression_with_dict( + val, + { + all_variables_key: all_variables_dict[all_variables_key][ + neuron_idx + ] + for all_variables_key in all_variables_dict.keys() + }, ) + for neuron_idx in range(self.popsize) + ] + + ### set parameters + for param_name, param_val in all_variables_dict.items(): + pop_parameter_names = get_population(pop).attributes + ### only if param_name in parameter attributes + if param_name in pop_parameter_names: + if self.popsize == 1: + setattr(get_population(pop), param_name, param_val[0]) + else: + setattr(get_population(pop), param_name, param_val) def _test_fit(self, fitparams_dict): """ @@ -889,133 +1004,83 @@ def run( return best - def _run_with_deap(self): - # general paramters - MATE_FUNCTION_KEY = "cxSimulatedBinaryBounded" - MUTATE_FUNCTION_KEY = "mutPolynomialBounded" - VARIATE_FUNCTION_KEY = "varOr" - POP_SIZE = 100 - # mate, mutate and variate parameters - ETA = 20.0 - INDPB = 0.1 - LAMBDA = POP_SIZE * 2 - REPRODPB = 0.1 - CXPB = 0.7 * (1 - REPRODPB) - MUTPB = 0.3 * (1 - REPRODPB) - TOURNSIZE = max(int(round(POP_SIZE * 0.01)), 2) - # derived parameters - LOWER = [ - min(self.variables_bounds[name]) - for name in self.fitting_variables_name_list - ] - UPPER = [ - max(self.variables_bounds[name]) - for name in self.fitting_variables_name_list - ] - MATE_FUNCTION = { - "cxOnePoint": tools.cxOnePoint, - "cxTwoPoint": tools.cxTwoPoint, - "cxUniform": tools.cxUniform, - "cxPartialyMatched": tools.cxPartialyMatched, - "cxUniformPartialyMatched": tools.cxUniformPartialyMatched, - "cxOrdered": tools.cxOrdered, - "cxBlend": tools.cxBlend, - "cxSimulatedBinary": tools.cxSimulatedBinary, - "cxSimulatedBinaryBounded": tools.cxSimulatedBinaryBounded, - "cxMessyOnePoint": tools.cxMessyOnePoint, - }[MATE_FUNCTION_KEY] - MUTATE_FUNCTION = { - "mutGaussian": tools.mutGaussian, - "mutPolynomialBounded": tools.mutPolynomialBounded, - }[MUTATE_FUNCTION_KEY] - VARIATE_FUNCTION = { - "varOr": algorithms.varOr, - "varAnd": algorithms.varAnd, - }[VARIATE_FUNCTION_KEY] - SELECT_FUNCTION_KEY = "selTournament" - SELECT_FUNCTION = { - "selTournament": tools.selTournament, - "selBest": tools.selBest, - "selNSGA2": tools.selNSGA2, - "selSPEA2": tools.selSPEA2, - }[SELECT_FUNCTION_KEY] - - # init deap toolbox - toolbox = base.Toolbox() - - # define what individuals are (numbers for the fitting parameters), how their - # fitness is weighted (negative to get fitness = negative loss), and how they - # are created initially (uniformly between the bounds) - creator.create("Fitness", base.Fitness, weights=(-1.0,)) - creator.create("Individual", list, fitness=creator.Fitness) - toolbox.register("uniformparams", self.init_uniform_deap) - toolbox.register( - "Individual", tools.initIterate, creator.Individual, toolbox.uniformparams + def _get_deap_dict(self): + LOWER = np.array( + [ + min(self.variables_bounds[name]) + for name in self.fitting_variables_name_list + ] + ) + UPPER = np.array( + [ + max(self.variables_bounds[name]) + for name in self.fitting_variables_name_list + ] ) - toolbox.register("population", tools.initRepeat, list, toolbox.Individual) - # register evaluate function which gets an individual and returns the loss - # (which will be multiplied by -1.0 to get the fitness) + creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) + creator.create("Individual", list, fitness=creator.FitnessMin) + + toolbox = base.Toolbox() toolbox.register("evaluate", self._deap_simulation_wrapper) - # register functions for the genetic algorithm - # mate (mate two individuals to create two children) TODO generalize MATE_FUNCTION kwargs - # mutate (mutate two individuals to create two new ones) TODO generalize MUTATE_FUNCTION kwargs - # variate (how crossover, variate and reproduce are applied to create an - # offspring population from the current population) - # select (how individuals are selected from the offspring population to create - # the next generation population) - toolbox.register( - "mate", - MATE_FUNCTION, - **{"eta": ETA, "low": LOWER, "up": UPPER}, - ) - toolbox.register( - "mutate", - MUTATE_FUNCTION, - **{"eta": ETA, "low": LOWER, "up": UPPER, "indpb": INDPB}, - ) - toolbox.register( - "variate", - VARIATE_FUNCTION, - **{"toolbox": toolbox, "lambda_": LAMBDA, "cxpb": CXPB, "mutpb": MUTPB}, - ) - toolbox.register( - "select", SELECT_FUNCTION, {"k": POP_SIZE, "tournsize": TOURNSIZE} + strategy = cma.Strategy( + centroid=(LOWER + UPPER) / 2, + sigma=UPPER - LOWER, ) - # initialize population (initial variables for POP_SIZE individuals/neurons) - pop = toolbox.population(n=POP_SIZE) + toolbox.register("generate", strategy.generate, creator.Individual) + toolbox.register("update", strategy.update) - # register some statistics we want to print during the run of the algorithm + hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) - # run the algorithm - pop, logbook = self._evolutionary_algorithm() + return { + "toolbox": toolbox, + "hof": hof, + "stats": stats, + }, strategy.lambda_ - def _evolutionary_algorithm( - self, - population, - toolbox, - cxpb, - mutpb, - ngen, - stats=None, - halloffame=None, - verbose=__debug__, + def _run_with_deap(self): + """ + TODO + """ + + pop, logbook = self._eaGenerateUpdate( + self.deap_dict["toolbox"], + ngen=500, + stats=self.deap_dict["stats"], + halloffame=self.deap_dict["hof"], + verbose=False, + ) + + best = {} + for param_idx, param_name in enumerate(self.fitting_variables_name_list): + best[param_name] = self.deap_dict["hof"][0][param_idx] + best["logbook"] = logbook + best["deap_pop"] = pop + + ### plot logbook + plt.figure() + plt.plot(logbook.select("gen"), logbook.select("min")) + plt.xlabel("generation") + plt.ylabel("loss") + plt.savefig("logbook.png") + + return best + + def _eaGenerateUpdate( + self, toolbox, ngen, halloffame=None, stats=None, verbose=__debug__ ): - """This algorithm reproduce the simplest evolutionary algorithm as - presented in chapter 7 of [Back2000]_. + """This is algorithm implements the ask-tell model proposed in + [Colette2010]_, where ask is called `generate` and tell is called `update`. - :param population: A list of individuals. :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution operators. - :param cxpb: The probability of mating two individuals. - :param mutpb: The probability of mutating an individual. :param ngen: The number of generation. :param stats: A :class:`~deap.tools.Statistics` object that is updated inplace, optional. @@ -1026,51 +1091,68 @@ def _evolutionary_algorithm( :returns: A class:`~deap.tools.Logbook` with the statistics of the evolution - """ - # TODO - logbook = tools.Logbook() - logbook.header = ["gen", "nevals"] + (stats.fields if stats else []) + The algorithm generates the individuals using the :func:`toolbox.generate` + function and updates the generation method with the :func:`toolbox.update` + function. It returns the optimized population and a + :class:`~deap.tools.Logbook` with the statistics of the evolution. The + logbook will contain the generation number, the number of evaluations for + each generation and the statistics if a :class:`~deap.tools.Statistics` is + given as argument. The pseudocode goes as follow :: - # Evaluate the individuals with an invalid fitness - invalid_ind = [ind for ind in population if not ind.fitness.valid] - fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) - for ind, fit in zip(invalid_ind, fitnesses): - ind.fitness.values = fit + for g in range(ngen): + population = toolbox.generate() + evaluate(population) + toolbox.update(population) - if halloffame is not None: - halloffame.update(population) - record = stats.compile(population) if stats else {} - logbook.record(gen=0, nevals=len(invalid_ind), **record) - if verbose: - print(logbook.stream) + This function expects :meth:`toolbox.generate` and :meth:`toolbox.evaluate` aliases to be + registered in the toolbox. - # Begin the generational process - for gen in range(1, ngen + 1): - # Select the next generation individuals - offspring = toolbox.select(population, len(population)) + .. [Colette2010] Collette, Y., N. Hansen, G. Pujol, D. Salazar Aponte and + R. Le Riche (2010). On Object-Oriented Programming of Optimizers - + Examples in Scilab. In P. Breitkopf and R. F. Coelho, eds.: + Multidisciplinary Design Optimization in Computational Mechanics, + Wiley, pp. 527-565; - # Vary the pool of individuals - offspring = varAnd(offspring, toolbox, cxpb, mutpb) + """ + logbook = tools.Logbook() + logbook.header = ["gen", "nevals"] + (stats.fields if stats else []) - # Evaluate the individuals with an invalid fitness - invalid_ind = [ind for ind in offspring if not ind.fitness.valid] - fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) - for ind, fit in zip(invalid_ind, fitnesses): + # define progress bar 1000/1000 [00:45<00:00, 22.17trial/s, best loss: 0.08673317798888838] + progress_bar = tqdm(range(ngen), total=ngen, unit="gen") + + for gen in progress_bar: + # Generate a new population + population = toolbox.generate() + # clip individuals of population to bounds + for ind in population: + for idx, param_name in enumerate(self.fitting_variables_name_list): + if ind[idx] < min(self.variables_bounds[param_name]): + ind[idx] = min(self.variables_bounds[param_name]) + elif ind[idx] > max(self.variables_bounds[param_name]): + ind[idx] = max(self.variables_bounds[param_name]) + # Evaluate the individuals + fitnesses = toolbox.evaluate(population) + for ind, fit in zip(population, fitnesses): ind.fitness.values = fit - # Update the hall of fame with the generated individuals - if halloffame is not None: - halloffame.update(offspring) + for ind in population: + nan_in_pop = np.isnan(ind.fitness.values[0]) - # Replace the current population by the offspring - population[:] = offspring + if halloffame is not None and not nan_in_pop: + halloffame.update(population) - # Append the current generation statistics to the logbook - record = stats.compile(population) if stats else {} - logbook.record(gen=gen, nevals=len(invalid_ind), **record) + # Update the strategy with the evaluated individuals + toolbox.update(population) + + record = stats.compile(population) if stats is not None else {} + logbook.record(gen=gen, nevals=len(population), **record) if verbose: print(logbook.stream) + # update progress bar with current best loss + progress_bar.set_postfix_str( + f"best loss: {halloffame[0].fitness.values[0]:.5f}" + ) return population, logbook From d487a999206f9ed1ccd4a4bd855ac8dbb64fb5eb Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 18 Jan 2024 10:54:58 +0100 Subject: [PATCH 07/44] documentated deap implementation adjusted docs for site and examples --- docs/examples/opt_neuron.md | 61 ++-- docs/main/optimize_neuron.md | 10 +- .../opt_neuron/run_opt_neuron_from_data.py | 19 +- .../opt_neuron/run_opt_neuron_from_neuron.py | 5 +- src/CompNeuroPy/opt_neuron.py | 312 +++++++++++------- 5 files changed, 258 insertions(+), 149 deletions(-) diff --git a/docs/examples/opt_neuron.md b/docs/examples/opt_neuron.md index 3239323..04bc2bd 100644 --- a/docs/examples/opt_neuron.md +++ b/docs/examples/opt_neuron.md @@ -11,15 +11,15 @@ from CompNeuroPy.opt_neuron import OptNeuron import numpy as np from ANNarchy import Neuron, dt - ### in this example we want to fit an ANNarchy neuron model to some data (which ca be ### somehow obtained by simulating the neuron and recording variables) for this example, -### we have the following simple neuron model +### we have the following simple neuron model, you must not use the :population flag +### for the parameters! my_neuron = Neuron( parameters=""" I_app = 0 - a = 0 : population - b = 0 : population + a = 0 + b = 0 """, equations=""" r = a*I_app + b @@ -88,12 +88,14 @@ class my_exp(CompNeuroExp): For using the CompNeuroExp for OptNeuron, the run function should have one argument which is the name of the population which is automatically created - by OptNeuron, containing a single neuron of the model which should be optimized. + by OptNeuron, containing a single or multiple neurons of the neuron model which + should be optimized. Thus, the run function should be able to run the experiment + with a single or multiple neurons in the given population! Args: population_name (str): - name of the population which contains a single neuron, this will be - automatically provided by OptNeuron + name of the population with neurons of the tuned neuron model, this will + be automatically provided by OptNeuron Returns: results (CompNeuroExp._ResultsCl): @@ -169,6 +171,8 @@ def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): ### results_ist, we do not use all available information here, but you could rec_ist = results_ist.recordings pop_ist = results_ist.data["population_name"] + + ### the get_loss function should always calculate the loss for neuron rank 0! neuron = 0 ### get the data for calculating the loss from the results_soll @@ -206,12 +210,12 @@ def main(): results_soll=experimental_data["results_soll"], time_step=experimental_data["time_step"], compile_folder_name="annarchy_opt_neuron_example_from_data", - method="hyperopt", + method="deap", record=["r"], ) ### run the optimization, define how often the experiment should be repeated - fit = opt.run(max_evals=1000, results_file_name="best_from_data") + fit = opt.run(max_evals=100, results_file_name="best_from_data") ### print optimized parameters, we should get around a=0.8 and b=2 print("a", fit["a"]) @@ -231,10 +235,10 @@ OptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before! OptNeuron: WARNING: attributes ['I_app', 'r'] are not used/initialized. checking neuron_models, experiment, get_loss...Done -100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1000/1000 [00:45<00:00, 21.99trial/s, best loss: 0.31922683758789056] -a 0.7609542202637395 -b 2.171783070482363 -['a', 'b', 'loss', 'all_loss', 'std', 'results', 'results_soll'] +100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 26.66gen/s, best loss: 0.00000] +a 0.8000000007960777 +b 1.9999999939091158 +['a', 'b', 'logbook', 'deap_pop', 'loss', 'all_loss', 'std', 'results', 'results_soll'] ``` ## Optimize neuron model from other neuron model @@ -262,15 +266,15 @@ from run_opt_neuron_from_data import my_neuron as simple_neuron complex_neuron = Neuron( parameters=""" I_app = 0 - f = 6.0542364610842572e-002 : population - e = 3.7144041714209490e+000 : population - d = -4.9446336126026436e-001 : population - c = 9.0909599124334911e-002 : population - b = -4.4497411506061648e-003 : population - a = -6.2239117460540167e-005 : population + m0 = 1 + m1 = 2 + m2 = 3 + n0 = 1 + n1 = 0 + n2 = -1 """, equations=""" - r = a*I_app**5 + b*I_app**4 + c*I_app**3 + d*I_app**2 + e*I_app**1 + f + r = m0*I_app + n0 + m1*I_app + n1 + m2*I_app + n2 """, ) @@ -302,6 +306,9 @@ def get_loss( pop_ist = results_ist.data["population_name"] rec_soll = results_soll.recordings pop_soll = results_soll.data["population_name"] + + ### the get_loss function should always calculate the loss for neuron rank 0! For + ### both, the target and the optimized neuron model. neuron = 0 ### get the data for calculating the loss from the recordings of the @@ -333,14 +340,14 @@ def main(): target_neuron_model=complex_neuron, time_step=1, compile_folder_name="annarchy_opt_neuron_example_from_neuron", - method="hyperopt", + method="deap", record=["r"], ) ### run the optimization, define how often the experiment should be repeated - fit = opt.run(max_evals=1000, results_file_name="best_from_neuron") + fit = opt.run(max_evals=100, results_file_name="best_from_neuron") - ### print optimized parameters, we should get around a=2.8 and b=0.28 + ### print optimized parameters, we should get around a=6 and b=0 print("a", fit["a"]) print("b", fit["b"]) print(list(fit.keys())) @@ -358,8 +365,8 @@ OptNeuron: Initialize OptNeuron... do not create anything with ANNarchy before! OptNeuron: WARNING: attributes ['I_app', 'r'] are not used/initialized. checking neuron_models, experiment, get_loss...Done -100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1000/1000 [00:47<00:00, 21.10trial/s, best loss: 0.5607444520201438] -a 2.8009641859311354 -b 0.22697565003968234 -['a', 'b', 'loss', 'all_loss', 'std', 'results', 'results_soll'] +100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:03<00:00, 26.96gen/s, best loss: 0.00000] +a 5.99999999677215 +b 2.8379565285300652e-08 +['a', 'b', 'logbook', 'deap_pop', 'loss', 'all_loss', 'std', 'results', 'results_soll'] ``` \ No newline at end of file diff --git a/docs/main/optimize_neuron.md b/docs/main/optimize_neuron.md index a683ec1..1c4a659 100644 --- a/docs/main/optimize_neuron.md +++ b/docs/main/optimize_neuron.md @@ -14,6 +14,10 @@ Used optimization methods: Tejero-Cantero et al., (2020). sbi: A toolkit for simulation-based inference. Journal of Open Source Software, 5(52), 2505, [https://doi.org/10.21105/joss.02505](https://doi.org/10.21105/joss.02505) +- [deap](https://github.com/deap/deap) (using the [CMAES](https://deap.readthedocs.io/en/master/api/algo.html#module-deap.cma) strategy) + + Fortin, F. A., De Rainville, F. M., Gardner, M. A. G., Parizeau, M., & Gagné, C. (2012). DEAP: Evolutionary algorithms made easy. The Journal of Machine Learning Research, 13(1), 2171-2175. + ### Example: ```python opt = OptNeuron( @@ -40,7 +44,8 @@ You have to define a [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experimen !!! warning While defining the [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) _run()_ function for the optimization with [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) you must observe the following rules: -- the _run()_ function has to take a single argument (besides self) which contains the name of the population consiting of a single neuron of the optimized neuron model (you can use this to access the population) +- the _run()_ function has to take a single argument (besides self) which contains the name of the population consiting of a single neuron or multiple neurons of the optimized neuron model (you can use this to access the population) +- thus, the simulation has to be compatible with a population consisting of a single or multiple neurons - call _self.reset(parameters=False)_ at the beginning of the run function, thus the neuron will be in its compile state (except the paramters) at the beginning of each simulation run - always set _parameters=False_ while calling the _self.reset()_ function (otherwise the parameter optimization will not work) - besides the optimized parameters and the loss, the results of the experiment (using the optimized parameters) will be available after the optimization, you can store any additional data in the _self.data_ attribute @@ -125,6 +130,9 @@ class my_exp(CompNeuroExp): ## The get_loss_function The _get_loss_function_ must have two arguments. When this function is called during optimization, the first argument is always the _results_ object returned by the _experiment_, i.e. the results of the neuron you want to optimize. The second argument depends on whether you have specified _results_soll_, i.e. data to be reproduced by the _neuron_model_, or whether you have specified a _target_neuron_model_ whose results are to be reproduced by the _neuron_model_. Thus, the second argument is either _results_soll_ provided to the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class during initialization or another _results_ object (returned by the [`CompNeuroExp`](define_experiment.md#CompNeuroPy.experiment.CompNeuroExp) _run_ function), generated with the _target_neuron_model_. +!!! warning + You always have to work with the neuron rank 0 within the _get_loss_function_! + ### Example: In this example we assume, that _results_soll_ was provided during initialization of the [`OptNeuron`](#CompNeuroPy.opt_neuron.OptNeuron) class (no _target_neuron_model_ used). ```python diff --git a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py index e5eaf5c..d08251a 100644 --- a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py +++ b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_data.py @@ -7,10 +7,10 @@ import numpy as np from ANNarchy import Neuron, dt -# TODO update docs for OptNeuron and examples, warn that neuron sjhoudl not have :population and that the experiment should be conductable for a population consisting of multiple neurons ### in this example we want to fit an ANNarchy neuron model to some data (which ca be ### somehow obtained by simulating the neuron and recording variables) for this example, -### we have the following simple neuron model +### we have the following simple neuron model, you must not use the :population flag +### for the parameters! my_neuron = Neuron( parameters=""" I_app = 0 @@ -84,12 +84,14 @@ def run(self, population_name): For using the CompNeuroExp for OptNeuron, the run function should have one argument which is the name of the population which is automatically created - by OptNeuron, containing a single neuron of the model which should be optimized. + by OptNeuron, containing a single or multiple neurons of the neuron model which + should be optimized. Thus, the run function should be able to run the experiment + with a single or multiple neurons in the given population! Args: population_name (str): - name of the population which contains a single neuron, this will be - automatically provided by OptNeuron + name of the population with neurons of the tuned neuron model, this will + be automatically provided by OptNeuron Returns: results (CompNeuroExp._ResultsCl): @@ -146,9 +148,6 @@ def run(self, population_name): ### Next, the OptNeuron class needs a function to calculate the loss. -# TODO add neuron_id to the arguments of the get_loss function, the fucntion has to be -# created that it works with multiple neurons in the population and the current -# neuron is selected by the neuron_id which is provided by OptNeuron def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): """ Function which has to have the arguments results_ist and results_soll and should @@ -168,6 +167,8 @@ def get_loss(results_ist: CompNeuroExp._ResultsCl, results_soll): ### results_ist, we do not use all available information here, but you could rec_ist = results_ist.recordings pop_ist = results_ist.data["population_name"] + + ### the get_loss function should always calculate the loss for neuron rank 0! neuron = 0 ### get the data for calculating the loss from the results_soll @@ -210,7 +211,7 @@ def main(): ) ### run the optimization, define how often the experiment should be repeated - fit = opt.run(max_evals=1000, results_file_name="best_from_data") + fit = opt.run(max_evals=100, results_file_name="best_from_data") ### print optimized parameters, we should get around a=0.8 and b=2 print("a", fit["a"]) diff --git a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py index c872509..73b413e 100644 --- a/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py +++ b/src/CompNeuroPy/examples/opt_neuron/run_opt_neuron_from_neuron.py @@ -60,6 +60,9 @@ def get_loss( pop_ist = results_ist.data["population_name"] rec_soll = results_soll.recordings pop_soll = results_soll.data["population_name"] + + ### the get_loss function should always calculate the loss for neuron rank 0! For + ### both, the target and the optimized neuron model. neuron = 0 ### get the data for calculating the loss from the recordings of the @@ -96,7 +99,7 @@ def main(): ) ### run the optimization, define how often the experiment should be repeated - fit = opt.run(max_evals=1000, results_file_name="best_from_neuron") + fit = opt.run(max_evals=100, results_file_name="best_from_neuron") ### print optimized parameters, we should get around a=6 and b=0 print("a", fit["a"]) diff --git a/src/CompNeuroPy/opt_neuron.py b/src/CompNeuroPy/opt_neuron.py index 46b0290..a9e1fa8 100644 --- a/src/CompNeuroPy/opt_neuron.py +++ b/src/CompNeuroPy/opt_neuron.py @@ -10,7 +10,6 @@ import sys from typing import Callable, Any, Type from typingchecker import check_types -import random from tqdm import tqdm from copy import deepcopy @@ -30,24 +29,15 @@ from sbi import utils as utils from sbi.inference import SNPE, prepare_for_sbi, simulate_for_sbi - # efel - import efel - # deap - import deap - import deap.gp - import deap.benchmarks from deap import base from deap import creator from deap import tools - from deap import algorithms - from deap.tools import cxSimulatedBinaryBounded, mutPolynomialBounded - from deap.algorithms import varAnd from deap import cma except: print( - "OptNeuron: Error: You need to install hyperopt, torch, and sbi to use OptNeuron (e.g. use pip install hyperopt torch sbi))" + "OptNeuron: Error: You need to install hyperopt, torch, sbi, and deap to use OptNeuron (e.g. use pip install hyperopt torch sbi deap))" ) sys.exit() @@ -71,7 +61,7 @@ def __init__( time_step: float = 1.0, compile_folder_name: str = "annarchy_OptNeuron", num_rep_loss: int = 1, - method: str = "hyperopt", + method: str = "deap", prior=None, fv_space: list = None, record: list[str] = [], @@ -125,9 +115,8 @@ def __init__( is obtained and averaged). Default: 1. method (str, optional): - Either 'sbi' or 'hyperopt'. If 'sbi' is used, the optimization is - performed with sbi. If 'hyperopt' is used, the optimization is - performed with hyperopt. Default: 'hyperopt'. + Either 'deap', 'sbi', or 'hyperopt'. Defines the tool which is used for + optimization. Default: 'deap'. prior (distribution, optional): The prior distribution used by sbi. Default: None, i.e., uniform @@ -438,9 +427,22 @@ def _check_get_loss_function(self): def _wrapper_get_loss(self, results_ist, results_soll): """ - TODO + Makes it possible to use the get_loss_function with multiple neurons. The + get_loss_function should always calculate the loss for neuron rank 0! + + Args: + results_ist (object): + the results object returned by the run function of experiment (see above) + it can contain recordings of multiple neurons + results_soll (any): + the target data directly provided to OptNeuron during initialization + it always contains only the recordings of a single neuron + + Returns: + all_loss_list (list): + list of lists containing the 'all_loss_list' for each neuron """ - ### + ### loop over neurons and calculate all_loss_list for each neuron all_loss_list = [] for neuron_idx in range(self.popsize): results_ist_neuron = self._get_results_of_single_neuron( @@ -452,27 +454,45 @@ def _wrapper_get_loss(self, results_ist, results_soll): def _get_results_of_single_neuron(self, results, neuron_idx): """ - TODO + Returns a results object which contains only the recordings of the given neuron + index. The defined neuron will be neuron rank 0 in the returned results object. + + Args: + results (object): + the results object returned by the run function of experiment (see above) + it can contain recordings of multiple neurons + neuron_idx (int): + index of the neuron whose recordings should be returned + + Returns: + results_neuron (object): + the results object as returned by the run function of experiment for a + single neuron """ + ### if only one neuron, simply return results if self.popsize == 1: return results + ### if multiple neurons, return results for single neuron, do not change + ### original results! results_neuron = deepcopy(results) + ### loop over chunks and recordings and select only the recordings of the + ### defined neuron for chunk in range(len(results_neuron.recordings)): for rec_key in results_neuron.recordings[chunk].keys(): + ### adjust spike dictionary if "spike" in rec_key and not ("target" in rec_key): results_neuron.recordings[chunk][rec_key] = { 0: results_neuron.recordings[chunk][rec_key][neuron_idx] } + ### adjust all recorded arrays elif not ( "period" in rec_key or "parameter_dict" in rec_key or "dt" in rec_key or "target" in rec_key ): - # print(rec_key) - # print(results_neuron.recordings[chunk][rec_key].shape) results_neuron.recordings[chunk][ rec_key ] = results_neuron.recordings[chunk][rec_key][ @@ -480,6 +500,20 @@ def _get_results_of_single_neuron(self, results, neuron_idx): ].reshape( -1, 1 ) + ### adjust parameter_dict + elif "parameter_dict" in rec_key and not ("target" in rec_key): + results_neuron.recordings[chunk][rec_key] = { + parameter_dict_key: np.array( + [ + results_neuron.recordings[chunk][rec_key][ + parameter_dict_key + ][neuron_idx] + ] + ) + for parameter_dict_key in results_neuron.recordings[chunk][ + rec_key + ].keys() + } return results_neuron @@ -536,7 +570,9 @@ def _run_simulator(self, fitparams): Args: fitparams (list): - list with values for fitting parameters + list with values for fitting parameters or list of lists with values + for fitting parameters (first dimension is the number of parameters, + second dimension is the number of neurons) Returns: return_dict (dict): @@ -553,7 +589,7 @@ def _run_simulator(self, fitparams): loss_list_over_runs = [] return_results = False - for nr_run in range(self.num_rep_loss): + for _ in range(self.num_rep_loss): ### initialize for each run a new rng (--> not always have same noise in case of noisy models/simulations) rng = np.random.default_rng() ### run simulator with multiprocessign manager @@ -562,20 +598,26 @@ def _run_simulator(self, fitparams): ) proc.start() proc.join() - ### get simulation results/loss - loss_list_from_simulator = m_list[0] - loss_list_over_runs.append(loss_list_from_simulator) + ### get simulation results/loss (list of losses for each neuron) + loss_list_over_runs.append(m_list[0]) + ### create loss array, first dimension is the number of runs, second dimension + ### is the number of neurons loss_arr = np.array(loss_list_over_runs) - ### calculate mean and std of loss + + ### calculate mean and std of loss over runs if self.num_rep_loss > 1: ### multiple runs, mean over runs + ### -> resulting in 1D arrays for neurons loss_ret_arr = np.mean(loss_arr, 0) std_ret_arr = np.std(loss_arr, 0) else: + ### just take the first entry (the only one) + ### -> resulting in 1D arrays for neurons loss_ret_arr = loss_arr[0] - std_ret_arr = [None] * self.popsize + std_ret_arr = np.array([None] * self.popsize) + ### if only one neuron, return loss and std as single values if self.popsize == 1: loss = loss_ret_arr[0] std = std_ret_arr[0] @@ -608,6 +650,10 @@ def _sbi_simulation_wrapper(self, fitparams): if len(fitparams.shape) == 2: ### batch parameters! data = [] + ### TODO the run_simulator_function can now handle multiple parameter sets + ### and directly can return the loss for each parameter set, but the model + ### has to have the corrects size, i.e., the number of neurons has to be + ### the same as the number of parameter sets, maybe adjust sbi to this for idx in range(fitparams.shape[0]): data.append(self._run_simulator(fitparams[idx])["loss"]) else: @@ -618,13 +664,20 @@ def _sbi_simulation_wrapper(self, fitparams): def _deap_simulation_wrapper(self, population: list): """ - TODO + This function is called by deap. It calls the simulator function and + returns the loss and adjusts the format of the input parameters. + + Args: + population (list): + list of lists with values for fitting parameters (first dimension is + the number of neurons, second dimension is the number of parameters) + given by deap """ - ### transpose population list + ### transpose population list (now first dimension is the number of parameters,) populationT = np.array(population).T.tolist() ### get loss list loss_list = self._run_simulator(populationT)["loss"] - + ### return loss list as list of tuples (deap needs this format) return [(loss_list[neuron_idx],) for neuron_idx in range(len(population))] def _run_simulator_with_results(self, fitparams, pop=None): @@ -635,7 +688,9 @@ def _run_simulator_with_results(self, fitparams, pop=None): Args: fitparams (list): - list with values for fitting parameters + list with values for fitting parameters or list of lists with values + for fitting parameters (first dimension is the number of parameters, + second dimension is the number of neurons) pop (str, optional): ANNarchy population name. Default: None, i.e., the tuned population @@ -655,12 +710,14 @@ def _run_simulator_with_results(self, fitparams, pop=None): manager = multiprocessing.Manager() m_list = manager.dict() - ### in case of noisy models, here optionally run multiple simulations, to mean the loss + ### in case of noisy models, here optionally run multiple simulations, to mean + ### the loss loss_list_over_runs = [] all_loss_list_over_runs = [] return_results = True - for nr_run in range(self.num_rep_loss): - ### initialize for each run a new rng (--> not always have same noise in case of noisy models/simulations) + for _ in range(self.num_rep_loss): + ### initialize for each run a new rng (--> not always have same noise in + ### case of noisy models/simulations) rng = np.random.default_rng() ### run simulator with multiprocessign manager proc = Process( @@ -670,22 +727,38 @@ def _run_simulator_with_results(self, fitparams, pop=None): proc.start() proc.join() ### get simulation results/loss + ### list of losses for each neuron loss_list_over_runs.append(m_list[0]) + ### results object of experiment results_ist = m_list[1] + ### list of the all_loss_list for each neuron all_loss_list_over_runs.append(m_list[2]) - all_loss_arr = np.array(all_loss_list_over_runs) + ### create loss array, first dimension is the number of runs, second dimension + ### is the number of neurons loss_arr = np.array(loss_list_over_runs) + ### create all_loss array, first dimension is the number of runs, second + ### dimension is the number of neurons, third dimension is the number of + ### individual losses + all_loss_arr = np.array(all_loss_list_over_runs) + ### calculate mean and std of loss over runs if self.num_rep_loss > 1: + ### resulting in 1D arrays for neurons loss = np.mean(loss_arr, 0) std = np.std(loss_arr) + ### resulting in 2D array for neurons (1st dim) and individual losses (2nd dim) all_loss = np.mean(all_loss_arr, 0) else: + ### just take the first entry (the only one) + ### resulting in 1D arrays for neurons loss = loss_arr[0] - std = [None] * self.popsize + std = np.array([None] * self.popsize) + ### resulting in 2D array for neurons (1st dim) and individual losses (2nd dim) all_loss = all_loss_arr[0] + ### if only one neuron, return loss and std as single values and all_loss as + ### single 1D array (length is the number of individual losses) if self.popsize == 1: loss = loss[0] std = std[0] @@ -724,7 +797,9 @@ def _simulator( Args: fitparams (list): - list with values for fitting parameters + list with values for fitting parameters or list of lists with values + for fitting parameters (first dimension is the number of parameters, + second dimension is the number of neurons) rng (numpy random generator): random generator for the simulation @@ -757,16 +832,20 @@ def _simulator( loss_list = [] ### wrapper_get_loss returns list (neurons) of lists (individual losses) all_loss_list = self._wrapper_get_loss(results, self.results_soll) + ### loop over neurons for all_loss in all_loss_list: + ### if all_loss is list, sum up individual losses if isinstance(all_loss, list) or isinstance( all_loss, type(np.zeros(1)) ): loss_list.append(sum(all_loss)) + ### if all_loss is single value, just append to loss_list else: loss_list.append(all_loss) else: all_loss_list = [999] * self.popsize loss_list = [999] * self.popsize + ### "return" loss and other optional things m_list[0] = loss_list if return_results: @@ -784,7 +863,7 @@ def _set_fitting_parameters( Args: fitparams (list): list with values for fitting parameters, either a single list or a list - of lists (first dimensio is the number of parameters, second dimension + of lists (first dimension is the number of parameters, second dimension is the number of neurons) pop (str, optional): ANNarchy population name. Default: None, i.e., the tuned population @@ -855,9 +934,18 @@ def _test_fit(self, fitparams_dict): models with multiple runs per loss calculation), and the status (STATUS_OK for hyperopt) and the results generated by the experiment. """ - return self._run_simulator_with_results( + results = self._run_simulator_with_results( [fitparams_dict[name] for name in self.fitting_variables_name_list] ) + ### if self.popsize > 1 --> transform results, loss etc. to only 1 neuron + if self.popsize > 1: + results["loss"] = results["loss"][0] + results["std"] = results["std"][0] + results["all_loss"] = results["all_loss"][0] + results["results"] = self._get_results_of_single_neuron( + results["results"], 0 + ) + return results def _run_with_sbi(self, max_evals, sbi_plot_file): """ @@ -934,7 +1022,7 @@ def _run_with_sbi(self, max_evals, sbi_plot_file): ### save plot sf.create_dir("/".join(sbi_plot_file.split("/")[:-1])) - plt.savefig(sbi_plot_file) + plt.savefig(sbi_plot_file, dpi=300) return best @@ -943,7 +1031,8 @@ def run( self, max_evals: int, results_file_name: str = "best", - sbi_plot_file: str = "posterior.svg", + sbi_plot_file: str = "posterior.png", + deap_plot_file: str = "logbook.png", ): """ Runs the optimization. @@ -960,7 +1049,11 @@ def run( sbi_plot_file (str, optional): If you use "sbi": the name of the figure which will be saved and shows - the posterior. Default: "posterior.svg". + the posterior. Default: "posterior.png". + + deap_plot_file (str, optional): + If you use "deap": the name of the figure which will be saved and shows + the logbook. Default: "logbook.png". Returns: best (dict): @@ -985,14 +1078,13 @@ def run( ### run optimization with sbi and return best dict best = self._run_with_sbi(max_evals, sbi_plot_file) elif self.method == "deap": - best = self._run_with_deap() + best = self._run_with_deap(max_evals, deap_plot_file) else: print("ERROR run; method should be 'hyperopt' or 'sbi'") quit() + ### obtain loss for the best parameters fit = self._test_fit(best) best["loss"] = fit["loss"] - if self.method == "sbi": - print("\tbest loss:", best["loss"]) best["all_loss"] = fit["all_loss"] best["std"] = fit["std"] best["results"] = fit["results"] @@ -1005,6 +1097,10 @@ def run( return best def _get_deap_dict(self): + """ + Prepares the deap optimization. + """ + ### get lower and upper bounds LOWER = np.array( [ min(self.variables_bounds[name]) @@ -1018,21 +1114,26 @@ def _get_deap_dict(self): ] ) + ### create the individual class creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Individual", list, fitness=creator.FitnessMin) + ### create the toolbox toolbox = base.Toolbox() + ### function calculating losses from individuals toolbox.register("evaluate", self._deap_simulation_wrapper) - + ### search strategy strategy = cma.Strategy( centroid=(LOWER + UPPER) / 2, sigma=UPPER - LOWER, ) - + ### function generating a population during optimization toolbox.register("generate", strategy.generate, creator.Individual) + ### function updating the search strategy toolbox.register("update", strategy.update) - + ### hall of fame to track best individual i.e. parameters hof = tools.HallOfFame(1) + ### statistics to track evolution of loss stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) @@ -1045,19 +1146,27 @@ def _get_deap_dict(self): "stats": stats, }, strategy.lambda_ - def _run_with_deap(self): - """ - TODO + def _run_with_deap(self, max_evals, deap_plot_file): """ + Runs the optimization with deap. + + Args: + max_evals (int): + number of runs (here generations) the optimization method performs - pop, logbook = self._eaGenerateUpdate( + deap_plot_file (str): + the name of the figure which will be saved and shows the logbook + """ + ### run the search algorithm with the prepared deap_dict + pop, logbook = self._ea_generate_update( self.deap_dict["toolbox"], - ngen=500, + ngen=max_evals, stats=self.deap_dict["stats"], halloffame=self.deap_dict["hof"], verbose=False, ) + ### get best parameters, last population of inidividuals and logbook best = {} for param_idx, param_name in enumerate(self.fitting_variables_name_list): best[param_name] = self.deap_dict["hof"][0][param_idx] @@ -1066,115 +1175,96 @@ def _run_with_deap(self): ### plot logbook plt.figure() - plt.plot(logbook.select("gen"), logbook.select("min")) + plt.plot(logbook.select("gen"), logbook.select("min"), "g", label="min") + plt.plot(logbook.select("gen"), logbook.select("avg"), "k", label="avg") + plt.plot(logbook.select("gen"), logbook.select("max"), "r", label="max") + plt.legend() plt.xlabel("generation") plt.ylabel("loss") - plt.savefig("logbook.png") + sf.create_dir("/".join(deap_plot_file.split("/")[:-1])) + plt.savefig(deap_plot_file, dpi=300) return best - def _eaGenerateUpdate( + def _ea_generate_update( self, toolbox, ngen, halloffame=None, stats=None, verbose=__debug__ ): - """This is algorithm implements the ask-tell model proposed in + """ + This function is copied from deap.algorithms.eaGenerateUpdate and modified. + This is algorithm implements the ask-tell model proposed in [Colette2010]_, where ask is called `generate` and tell is called `update`. - :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution - operators. - :param ngen: The number of generation. - :param stats: A :class:`~deap.tools.Statistics` object that is updated - inplace, optional. - :param halloffame: A :class:`~deap.tools.HallOfFame` object that will - contain the best individuals, optional. - :param verbose: Whether or not to log the statistics. - :returns: The final population - :returns: A class:`~deap.tools.Logbook` with the statistics of the - evolution - - The algorithm generates the individuals using the :func:`toolbox.generate` - function and updates the generation method with the :func:`toolbox.update` - function. It returns the optimized population and a - :class:`~deap.tools.Logbook` with the statistics of the evolution. The - logbook will contain the generation number, the number of evaluations for - each generation and the statistics if a :class:`~deap.tools.Statistics` is - given as argument. The pseudocode goes as follow :: - - for g in range(ngen): - population = toolbox.generate() - evaluate(population) - toolbox.update(population) - - - This function expects :meth:`toolbox.generate` and :meth:`toolbox.evaluate` aliases to be - registered in the toolbox. - .. [Colette2010] Collette, Y., N. Hansen, G. Pujol, D. Salazar Aponte and R. Le Riche (2010). On Object-Oriented Programming of Optimizers - Examples in Scilab. In P. Breitkopf and R. F. Coelho, eds.: Multidisciplinary Design Optimization in Computational Mechanics, Wiley, pp. 527-565; + Args: + toolbox: + A deap Toolbox object that contains the evolution operators. + ngen: + The number of generations to run. + halloffame: + A deap HallOfFame object that will to track the best individuals + stats: + A deap Statistics object to track the statistics of the evolution. + verbose: + Whether or not to print the statistics for each gen. + + Returns: + population: + A list of individuals. + logbook: + A Logbook() object that contains the evolution statistics. """ + ### init logbook logbook = tools.Logbook() logbook.header = ["gen", "nevals"] + (stats.fields if stats else []) - # define progress bar 1000/1000 [00:45<00:00, 22.17trial/s, best loss: 0.08673317798888838] + ### define progress bar progress_bar = tqdm(range(ngen), total=ngen, unit="gen") + ### loop over generations for gen in progress_bar: - # Generate a new population + ### Generate a new population population = toolbox.generate() - # clip individuals of population to bounds + ### clip individuals of population to variable bounds for ind in population: for idx, param_name in enumerate(self.fitting_variables_name_list): if ind[idx] < min(self.variables_bounds[param_name]): ind[idx] = min(self.variables_bounds[param_name]) elif ind[idx] > max(self.variables_bounds[param_name]): ind[idx] = max(self.variables_bounds[param_name]) - # Evaluate the individuals + ### Evaluate the individuals (here whole population at once) fitnesses = toolbox.evaluate(population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit + ### check if nan in population for ind in population: nan_in_pop = np.isnan(ind.fitness.values[0]) + ### Update the hall of fame with the generated individuals if halloffame is not None and not nan_in_pop: halloffame.update(population) - # Update the strategy with the evaluated individuals + ### Update the strategy with the evaluated individuals toolbox.update(population) + ### Append the current generation statistics to the logbook record = stats.compile(population) if stats is not None else {} logbook.record(gen=gen, nevals=len(population), **record) if verbose: print(logbook.stream) - # update progress bar with current best loss + + ### update progress bar with current best loss progress_bar.set_postfix_str( f"best loss: {halloffame[0].fitness.values[0]:.5f}" ) return population, logbook - def init_uniform_deap(self): - """ - Returns: - uniform (list): - list with values for fitting parameters - """ - lower_list = [ - min(self.variables_bounds[name]) - for name in self.fitting_variables_name_list - ] - upper_list = [ - max(self.variables_bounds[name]) - for name in self.fitting_variables_name_list - ] - return [ - random.uniform(lower_list, upper_list) - for _ in range(len(self.fitting_variables_name_list)) - ] - ### old name for backward compatibility, TODO remove opt_neuron = OptNeuron From 1977727bff1dd26e6e04bca83f2e777c6468d6bb Mon Sep 17 00:00:00 2001 From: olmai Date: Thu, 18 Jan 2024 11:03:38 +0100 Subject: [PATCH 08/44] removed site --- site/404.html | 1007 -- site/additional/analysis_functions/index.html | 6556 --------- site/additional/extra_functions/index.html | 4151 ------ site/additional/model_functions/index.html | 1618 --- .../simulation_functions/index.html | 2008 --- .../simulation_requirements/index.html | 1489 -- site/additional/system_functions/index.html | 2004 --- site/assets/_mkdocstrings.css | 64 - .../assets/javascripts/bundle.d7c377c4.min.js | 29 - .../javascripts/bundle.d7c377c4.min.js.map | 7 - .../javascripts/lunr/min/lunr.ar.min.js | 1 - .../javascripts/lunr/min/lunr.da.min.js | 18 - .../javascripts/lunr/min/lunr.de.min.js | 18 - .../javascripts/lunr/min/lunr.du.min.js | 18 - .../javascripts/lunr/min/lunr.el.min.js | 1 - .../javascripts/lunr/min/lunr.es.min.js | 18 - .../javascripts/lunr/min/lunr.fi.min.js | 18 - .../javascripts/lunr/min/lunr.fr.min.js | 18 - .../javascripts/lunr/min/lunr.he.min.js | 1 - .../javascripts/lunr/min/lunr.hi.min.js | 1 - .../javascripts/lunr/min/lunr.hu.min.js | 18 - .../javascripts/lunr/min/lunr.hy.min.js | 1 - .../javascripts/lunr/min/lunr.it.min.js | 18 - .../javascripts/lunr/min/lunr.ja.min.js | 1 - .../javascripts/lunr/min/lunr.jp.min.js | 1 - .../javascripts/lunr/min/lunr.kn.min.js | 1 - .../javascripts/lunr/min/lunr.ko.min.js | 1 - .../javascripts/lunr/min/lunr.multi.min.js | 1 - .../javascripts/lunr/min/lunr.nl.min.js | 18 - .../javascripts/lunr/min/lunr.no.min.js | 18 - .../javascripts/lunr/min/lunr.pt.min.js | 18 - .../javascripts/lunr/min/lunr.ro.min.js | 18 - .../javascripts/lunr/min/lunr.ru.min.js | 18 - .../javascripts/lunr/min/lunr.sa.min.js | 1 - .../lunr/min/lunr.stemmer.support.min.js | 1 - .../javascripts/lunr/min/lunr.sv.min.js | 18 - .../javascripts/lunr/min/lunr.ta.min.js | 1 - .../javascripts/lunr/min/lunr.te.min.js | 1 - .../javascripts/lunr/min/lunr.th.min.js | 1 - .../javascripts/lunr/min/lunr.tr.min.js | 18 - .../javascripts/lunr/min/lunr.vi.min.js | 1 - .../javascripts/lunr/min/lunr.zh.min.js | 1 - site/assets/javascripts/lunr/tinyseg.js | 206 - site/assets/javascripts/lunr/wordcut.js | 6708 --------- .../workers/search.f886a092.min.js | 42 - .../workers/search.f886a092.min.js.map | 7 - site/assets/stylesheets/main.45e1311d.min.css | 1 - .../stylesheets/main.45e1311d.min.css.map | 1 - .../stylesheets/palette.06af60db.min.css | 1 - .../stylesheets/palette.06af60db.min.css.map | 1 - site/built_in/models/index.html | 3495 ----- site/built_in/neuron_models/index.html | 11995 ---------------- site/built_in/synapse_models/index.html | 1242 -- site/examples/dbs/index.html | 2430 ---- site/examples/experiment/index.html | 1393 -- site/examples/generate_models/index.html | 1244 -- site/examples/monitor_recordings/index.html | 1532 -- site/examples/opt_neuron/index.html | 1603 --- site/examples/plot_recordings/index.html | 1233 -- .../run_and_monitor_simulations/index.html | 1393 -- site/index.html | 1049 -- site/installation/index.html | 1058 -- site/license/index.html | 1112 -- site/main/dbs_stimulator/index.html | 4209 ------ site/main/define_experiment/index.html | 2134 --- site/main/generate_models/index.html | 3026 ---- site/main/generate_simulations/index.html | 3641 ----- site/main/model_configurator/index.html | 1041 -- site/main/monitors_recordings/index.html | 5392 ------- site/main/optimize_neuron/index.html | 3945 ----- site/objects.inv | Bin 1756 -> 0 bytes site/search/search_index.json | 1 - site/sitemap.xml | 3 - site/sitemap.xml.gz | Bin 127 -> 0 bytes site/stylesheets/extra.css | 3 - 75 files changed, 80361 deletions(-) delete mode 100644 site/404.html delete mode 100644 site/additional/analysis_functions/index.html delete mode 100644 site/additional/extra_functions/index.html delete mode 100644 site/additional/model_functions/index.html delete mode 100644 site/additional/simulation_functions/index.html delete mode 100644 site/additional/simulation_requirements/index.html delete mode 100644 site/additional/system_functions/index.html delete mode 100644 site/assets/_mkdocstrings.css delete mode 100644 site/assets/javascripts/bundle.d7c377c4.min.js delete mode 100644 site/assets/javascripts/bundle.d7c377c4.min.js.map delete mode 100644 site/assets/javascripts/lunr/min/lunr.ar.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.da.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.de.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.du.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.el.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.es.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.fi.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.fr.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.he.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.hi.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.hu.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.hy.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.it.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.ja.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.jp.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.kn.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.ko.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.multi.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.nl.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.no.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.pt.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.ro.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.ru.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.sa.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.stemmer.support.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.sv.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.ta.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.te.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.th.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.tr.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.vi.min.js delete mode 100644 site/assets/javascripts/lunr/min/lunr.zh.min.js delete mode 100644 site/assets/javascripts/lunr/tinyseg.js delete mode 100644 site/assets/javascripts/lunr/wordcut.js delete mode 100644 site/assets/javascripts/workers/search.f886a092.min.js delete mode 100644 site/assets/javascripts/workers/search.f886a092.min.js.map delete mode 100644 site/assets/stylesheets/main.45e1311d.min.css delete mode 100644 site/assets/stylesheets/main.45e1311d.min.css.map delete mode 100644 site/assets/stylesheets/palette.06af60db.min.css delete mode 100644 site/assets/stylesheets/palette.06af60db.min.css.map delete mode 100644 site/built_in/models/index.html delete mode 100644 site/built_in/neuron_models/index.html delete mode 100644 site/built_in/synapse_models/index.html delete mode 100644 site/examples/dbs/index.html delete mode 100644 site/examples/experiment/index.html delete mode 100644 site/examples/generate_models/index.html delete mode 100644 site/examples/monitor_recordings/index.html delete mode 100644 site/examples/opt_neuron/index.html delete mode 100644 site/examples/plot_recordings/index.html delete mode 100644 site/examples/run_and_monitor_simulations/index.html delete mode 100644 site/index.html delete mode 100644 site/installation/index.html delete mode 100644 site/license/index.html delete mode 100644 site/main/dbs_stimulator/index.html delete mode 100644 site/main/define_experiment/index.html delete mode 100644 site/main/generate_models/index.html delete mode 100644 site/main/generate_simulations/index.html delete mode 100644 site/main/model_configurator/index.html delete mode 100644 site/main/monitors_recordings/index.html delete mode 100644 site/main/optimize_neuron/index.html delete mode 100644 site/objects.inv delete mode 100644 site/search/search_index.json delete mode 100644 site/sitemap.xml delete mode 100644 site/sitemap.xml.gz delete mode 100644 site/stylesheets/extra.css diff --git a/site/404.html b/site/404.html deleted file mode 100644 index 8f1a8f2..0000000 --- a/site/404.html +++ /dev/null @@ -1,1007 +0,0 @@ - - - - - - - - - - - - - - - - - - - CompNeuroPy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
-
- -
- - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- -

404 - Not found

- -
-
- - - -
- -
- - - -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/site/additional/analysis_functions/index.html b/site/additional/analysis_functions/index.html deleted file mode 100644 index f98a77f..0000000 --- a/site/additional/analysis_functions/index.html +++ /dev/null @@ -1,6556 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Analysis Functions - CompNeuroPy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - -

Analysis Functions

- -
- - - - -
- - - -
- - - - - - - - -
- - - - -

- PlotRecordings - - -#

- - -
- - -

Plot recordings from CompNeuroMonitors.

-

TODO: CHeck if there are memory issues with large recordings or many subplots.

- -
- Source code in CompNeuroPy/analysis_functions.py -
1787
-1788
-1789
-1790
-1791
-1792
-1793
-1794
-1795
-1796
-1797
-1798
-1799
-1800
-1801
-1802
-1803
-1804
-1805
-1806
-1807
-1808
-1809
-1810
-1811
-1812
-1813
-1814
-1815
-1816
-1817
-1818
-1819
-1820
-1821
-1822
-1823
-1824
-1825
-1826
-1827
-1828
-1829
-1830
-1831
-1832
-1833
-1834
-1835
-1836
-1837
-1838
-1839
-1840
-1841
-1842
-1843
-1844
-1845
-1846
-1847
-1848
-1849
-1850
-1851
-1852
-1853
-1854
-1855
-1856
-1857
-1858
-1859
-1860
-1861
-1862
-1863
-1864
-1865
-1866
-1867
-1868
-1869
-1870
-1871
-1872
-1873
-1874
-1875
-1876
-1877
-1878
-1879
-1880
-1881
-1882
-1883
-1884
-1885
-1886
-1887
-1888
-1889
-1890
-1891
-1892
-1893
-1894
-1895
-1896
-1897
-1898
-1899
-1900
-1901
-1902
-1903
-1904
-1905
-1906
-1907
-1908
-1909
-1910
-1911
-1912
-1913
-1914
-1915
-1916
-1917
-1918
-1919
-1920
-1921
-1922
-1923
-1924
-1925
-1926
-1927
-1928
-1929
-1930
-1931
-1932
-1933
-1934
-1935
-1936
-1937
-1938
-1939
-1940
-1941
-1942
-1943
-1944
-1945
-1946
-1947
-1948
-1949
-1950
-1951
-1952
-1953
-1954
-1955
-1956
-1957
-1958
-1959
-1960
-1961
-1962
-1963
-1964
-1965
-1966
-1967
-1968
-1969
-1970
-1971
-1972
-1973
-1974
-1975
-1976
-1977
-1978
-1979
-1980
-1981
-1982
-1983
-1984
-1985
-1986
-1987
-1988
-1989
-1990
-1991
-1992
-1993
-1994
-1995
-1996
-1997
-1998
-1999
-2000
-2001
-2002
-2003
-2004
-2005
-2006
-2007
-2008
-2009
-2010
-2011
-2012
-2013
-2014
-2015
-2016
-2017
-2018
-2019
-2020
-2021
-2022
-2023
-2024
-2025
-2026
-2027
-2028
-2029
-2030
-2031
-2032
-2033
-2034
-2035
-2036
-2037
-2038
-2039
-2040
-2041
-2042
-2043
-2044
-2045
-2046
-2047
-2048
-2049
-2050
-2051
-2052
-2053
-2054
-2055
-2056
-2057
-2058
-2059
-2060
-2061
-2062
-2063
-2064
-2065
-2066
-2067
-2068
-2069
-2070
-2071
-2072
-2073
-2074
-2075
-2076
-2077
-2078
-2079
-2080
-2081
-2082
-2083
-2084
-2085
-2086
-2087
-2088
-2089
-2090
-2091
-2092
-2093
-2094
-2095
-2096
-2097
-2098
-2099
-2100
-2101
-2102
-2103
-2104
-2105
-2106
-2107
-2108
-2109
-2110
-2111
-2112
-2113
-2114
-2115
-2116
-2117
-2118
-2119
-2120
-2121
-2122
-2123
-2124
-2125
-2126
-2127
-2128
-2129
-2130
-2131
-2132
-2133
-2134
-2135
-2136
-2137
-2138
-2139
-2140
-2141
-2142
-2143
-2144
-2145
-2146
-2147
-2148
-2149
-2150
-2151
-2152
-2153
-2154
-2155
-2156
-2157
-2158
-2159
-2160
-2161
-2162
-2163
-2164
-2165
-2166
-2167
-2168
-2169
-2170
-2171
-2172
-2173
-2174
-2175
-2176
-2177
-2178
-2179
-2180
-2181
-2182
-2183
-2184
-2185
-2186
-2187
-2188
-2189
-2190
-2191
-2192
-2193
-2194
-2195
-2196
-2197
-2198
-2199
-2200
-2201
-2202
-2203
-2204
-2205
-2206
-2207
-2208
-2209
-2210
-2211
-2212
-2213
-2214
-2215
-2216
-2217
-2218
-2219
-2220
-2221
-2222
-2223
-2224
-2225
-2226
-2227
-2228
-2229
-2230
-2231
-2232
-2233
-2234
-2235
-2236
-2237
-2238
-2239
-2240
-2241
-2242
-2243
-2244
-2245
-2246
-2247
-2248
-2249
-2250
-2251
-2252
-2253
-2254
-2255
-2256
-2257
-2258
-2259
-2260
-2261
-2262
-2263
-2264
-2265
-2266
-2267
-2268
-2269
-2270
-2271
-2272
-2273
-2274
-2275
-2276
-2277
-2278
-2279
-2280
-2281
-2282
-2283
-2284
-2285
-2286
-2287
-2288
-2289
-2290
-2291
-2292
-2293
-2294
-2295
-2296
-2297
-2298
-2299
-2300
-2301
-2302
-2303
-2304
-2305
-2306
-2307
-2308
-2309
-2310
-2311
-2312
-2313
-2314
-2315
-2316
-2317
-2318
-2319
-2320
-2321
-2322
-2323
-2324
-2325
-2326
-2327
-2328
-2329
-2330
-2331
-2332
-2333
-2334
-2335
-2336
-2337
-2338
-2339
-2340
-2341
-2342
-2343
-2344
-2345
-2346
-2347
-2348
-2349
-2350
-2351
-2352
-2353
-2354
-2355
-2356
-2357
-2358
-2359
-2360
-2361
-2362
-2363
-2364
-2365
-2366
-2367
-2368
-2369
-2370
-2371
-2372
-2373
-2374
-2375
-2376
-2377
-2378
-2379
-2380
-2381
-2382
-2383
-2384
-2385
-2386
-2387
-2388
-2389
-2390
-2391
-2392
-2393
-2394
-2395
-2396
-2397
-2398
-2399
-2400
-2401
-2402
-2403
-2404
-2405
-2406
-2407
-2408
-2409
-2410
-2411
-2412
-2413
-2414
-2415
-2416
-2417
-2418
-2419
-2420
-2421
-2422
-2423
-2424
-2425
-2426
-2427
-2428
-2429
-2430
-2431
-2432
-2433
-2434
-2435
-2436
-2437
-2438
-2439
-2440
-2441
-2442
-2443
-2444
-2445
-2446
-2447
-2448
-2449
-2450
-2451
-2452
-2453
-2454
-2455
-2456
-2457
-2458
-2459
-2460
-2461
-2462
-2463
-2464
-2465
-2466
-2467
-2468
-2469
-2470
-2471
-2472
-2473
-2474
-2475
-2476
-2477
-2478
-2479
-2480
-2481
-2482
-2483
-2484
-2485
-2486
-2487
-2488
-2489
-2490
-2491
-2492
-2493
-2494
-2495
-2496
-2497
-2498
-2499
-2500
-2501
-2502
-2503
-2504
-2505
-2506
-2507
-2508
-2509
-2510
-2511
-2512
-2513
-2514
-2515
-2516
-2517
-2518
-2519
-2520
-2521
-2522
-2523
-2524
-2525
-2526
-2527
-2528
-2529
-2530
-2531
-2532
-2533
-2534
-2535
-2536
-2537
-2538
-2539
-2540
-2541
-2542
-2543
-2544
-2545
-2546
-2547
-2548
-2549
-2550
-2551
-2552
-2553
-2554
-2555
-2556
-2557
-2558
-2559
-2560
-2561
-2562
-2563
-2564
-2565
-2566
-2567
-2568
-2569
-2570
-2571
-2572
-2573
-2574
-2575
-2576
-2577
-2578
-2579
-2580
-2581
-2582
-2583
-2584
-2585
-2586
-2587
-2588
-2589
-2590
-2591
-2592
-2593
-2594
-2595
-2596
-2597
-2598
-2599
-2600
-2601
-2602
-2603
-2604
-2605
-2606
-2607
-2608
-2609
-2610
-2611
-2612
-2613
-2614
-2615
-2616
-2617
-2618
-2619
-2620
-2621
-2622
-2623
-2624
-2625
-2626
-2627
-2628
-2629
class PlotRecordings:
-    """
-    Plot recordings from CompNeuroMonitors.
-
-    TODO: CHeck if there are memory issues with large recordings or many subplots.
-    """
-
-    @check_types()
-    def __init__(
-        self,
-        figname: str,
-        recordings: list[dict],
-        recording_times: RecordingTimes,
-        shape: tuple[int, int],
-        plan: dict,
-        chunk: int = 0,
-        time_lim: None | tuple[float, float] = None,
-        dpi: int = 300,
-    ) -> None:
-        """
-        Create and save the plot.
-
-        Args:
-            figname (str):
-                The name of the figure to be saved.
-            recordings (list):
-                A recordings list obtained from CompNeuroMonitors.
-            recording_times (RecordingTimes):
-                The RecordingTimes object containing the recording times obtained from
-                CompNeuroMonitors.
-            shape (tuple):
-                The shape of the figure. (number of rows, number of columns)
-            plan (dict):
-                Defines which recordings are plotted in which subplot and how. The plan
-                has to contain the following keys: "position", "compartment",
-                "variable", "format". The values of the keys have to be lists of the
-                same length. The values of the key "position" have to be integers
-                between 1 and the number of subplots (defined by shape). The values of
-                the key "compartment" have to be the names of the model compartments as
-                strings. The values of the key "variable" have to be strings containing
-                the names of the recorded variables or equations using the recorded
-                variables. The values of the key "format" have to be strings defining
-                how the recordings are plotted. The following formats are available for
-                spike recordings: "raster", "mean", "hybrid", "interspike". The
-                following formats are available for other recordings: "line",
-                "line_mean", "matrix", "matrix_mean".
-            chunk (int, optional):
-                The chunk of the recordings to be plotted. Default: 0.
-            time_lim (tuple, optional):
-                Defines the x-axis for all subplots. The tuple contains two
-                numbers: start and end time in ms. The times have to be
-                within the chunk. Default: None, i.e., the whole chunk is plotted.
-            dpi (int, optional):
-                The dpi of the saved figure. Default: 300.
-        """
-        ### print start message
-        print(f"Generate fig {figname}", end="... ", flush=True)
-
-        ### set attributes
-        self.figname = figname
-        self.recordings = recordings
-        self.recording_times = recording_times
-        self.shape = shape
-        self.plan = plan
-        self.chunk = chunk
-        self.time_lim = time_lim
-        self.dpi = dpi
-
-        ### get available compartments (from recordings) and recorded variables for each
-        ### compartment
-        (
-            self._compartment_list,
-            self._compartment_recordings_dict,
-        ) = self._get_compartment_recordings()
-
-        ### check plan keys and values
-        self._check_plan()
-
-        ### get start and end time for plotting and timestep
-        self._start_time, self._end_time, self._time_step = self._get_start_end_time()
-
-        ### get compbined time array for recordings of each compartment
-        self._time_arr_list = self._get_time_arr_list()
-
-        ### get data from recordings for each subplot
-        self._raw_data_list = self._get_raw_data_list()
-
-        ### create plot
-        self._plot()
-
-        ### print end message
-        print("Done\n")
-
-    def _get_compartment_recordings(self):
-        """
-        Get available compartment names from recordings.
-        Get recorded variables (names) for each compartment.
-
-        Returns:
-            compartment_list (list):
-                List of compartment names.
-            compartment_recordings_dict (dict):
-                Dictionary with compartment names as keys and list of recorded variables
-                as values.
-        """
-        ### check if chunk is valid
-        if self.chunk >= len(self.recordings) or self.chunk < 0:
-            print(
-                f"\nERROR PlotRecordings: chunk {self.chunk} is not valid.\n"
-                f"Number of chunks: {len(self.recordings)}\n"
-            )
-            quit()
-
-        ### get compartment names and recorded variables for each compartment
-        compartment_list = []
-        compartment_recordings_dict = {}
-        for recordings_key in self.recordings[self.chunk].keys():
-            if ";" not in recordings_key:
-                continue
-
-            ### get compartment
-            compartment, recorded_variable = recordings_key.split(";")
-            if compartment not in compartment_list:
-                compartment_list.append(compartment)
-                compartment_recordings_dict[compartment] = []
-
-            ### get recordings for compartment
-            if recorded_variable != "period" and recorded_variable != "parameter_dict":
-                compartment_recordings_dict[compartment].append(recorded_variable)
-
-        return compartment_list, compartment_recordings_dict
-
-    def _check_plan(self):
-        """
-        Check if plan is valid.
-        """
-
-        ### check if plan keys are valid
-        valid_keys = ["position", "compartment", "variable", "format"]
-        for key in self.plan.keys():
-            if key not in valid_keys:
-                print(
-                    f"\nERROR PlotRecordings: plan key {key} is not valid.\n"
-                    f"Valid keys are {valid_keys}.\n"
-                )
-                quit()
-
-        ### check if plan values are valid (have same length)
-        for key in self.plan.keys():
-            if len(self.plan[key]) != len(self.plan["position"]):
-                print(
-                    f"\nERROR PlotRecordings: plan value of key '{key}' has not the same length as plan value of key 'position'.\n"
-                )
-                quit()
-
-        ### check if plan positions are valid
-        ### check if min and max are valid
-        if get_minimum(self.plan["position"]) < 1:
-            print(
-                f"\nERROR PlotRecordings: plan position has to be >= 1.\n"
-                f"plan position: {self.plan['position']}\n"
-            )
-            quit()
-        if get_maximum(self.plan["position"]) > self.shape[0] * self.shape[1]:
-            print(
-                f"\nERROR PlotRecordings: plan position has to be <= shape[0] * shape[1].\n"
-                f"plan position: {self.plan['position']}\n"
-                f"shape: {self.shape}\n"
-            )
-            quit()
-        ### check if plan positions are unique
-        if len(np.unique(self.plan["position"])) != len(self.plan["position"]):
-            print(
-                f"\nERROR PlotRecordings: plan position has to be unique.\n"
-                f"plan position: {self.plan['position']}\n"
-            )
-            quit()
-
-        ### check if plan compartments are valid
-        for compartment in self.plan["compartment"]:
-            if compartment not in self._compartment_list:
-                print(
-                    f"\nERROR PlotRecordings: plan compartment {compartment} is not valid.\n"
-                    f"Valid compartments are {self._compartment_list}.\n"
-                )
-                quit()
-
-        ### check if plan variables are valid
-        for plot_idx in range(len(self.plan["variable"])):
-            compartment = self.plan["compartment"][plot_idx]
-            variable: str = self.plan["variable"][plot_idx]
-            ### check if variable contains a mathematical expression
-            if "+" in variable or "-" in variable or "*" in variable or "/" in variable:
-                ### separate variables
-                variable = variable.replace(" ", "")
-                variable = variable.replace("+", " ")
-                variable = variable.replace("-", " ")
-                variable = variable.replace("*", " ")
-                variable = variable.replace("/", " ")
-                variables_list = variable.split(" ")
-                ### remove numbers
-                variables_list = [var for var in variables_list if not var.isdigit()]
-                ### spike and axon_spike are not allowed in equations
-                if "spike" in variables_list or "axon_spike" in variables_list:
-                    print(
-                        f"\nERROR PlotRecordings: plan variable {variable} is not valid.\n"
-                        f"Variables 'spike' and 'axon_spike' are not allowed in equations.\n"
-                    )
-                    quit()
-            else:
-                variables_list = [variable]
-            ### check if variables are valid
-            for var in variables_list:
-                if var not in self._compartment_recordings_dict[compartment]:
-                    print(
-                        f"\nERROR PlotRecordings: plan variable {var} is not valid for compartment {compartment}.\n"
-                        f"Valid variables are {self._compartment_recordings_dict[compartment]}.\n"
-                    )
-                    quit()
-
-        ### check if plan formats are valid
-        valid_formats_spike = ["raster", "mean", "hybrid", "interspike", "cv"]
-        valid_formats_other = ["line", "line_mean", "matrix", "matrix_mean"]
-        for plot_idx in range(len(self.plan["format"])):
-            variable = self.plan["variable"][plot_idx]
-            format = self.plan["format"][plot_idx]
-            ### check if format is valid
-            if variable == "spike" or variable == "axon_spike":
-                if format not in valid_formats_spike:
-                    print(
-                        f"\nERROR PlotRecordings: plan format {format} is not valid for variable {variable}.\n"
-                        f"Valid formats are {valid_formats_spike}.\n"
-                    )
-                    quit()
-            else:
-                if format not in valid_formats_other:
-                    print(
-                        f"\nERROR PlotRecordings: plan format {format} is not valid for variable {variable}.\n"
-                        f"Valid formats are {valid_formats_other}.\n"
-                    )
-                    quit()
-
-    def _get_start_end_time(self):
-        """
-        Check if time_lim is given and valid. If it's not given get it from recordings.
-        Get timestep from recordings.
-
-        Returns:
-            start_time (float):
-                The start time of the recordings.
-            end_time (float):
-                The end time of the recordings.
-            time_step (float):
-                The timestep of the recordings.
-
-        Raises:
-            ValueError: If given time_lim is not within the chunk.
-        """
-
-        chunk_time_lims = self.recording_times.time_lims(chunk=self.chunk)
-        ### check if time_lim is given
-        if isinstance(self.time_lim, type(None)):
-            ### get start and end time from recording_times
-            start_time, end_time = chunk_time_lims
-        else:
-            ### check if time_lim is within chunk
-            if (
-                self.time_lim[0] < chunk_time_lims[0]
-                or self.time_lim[1] > chunk_time_lims[1]
-            ):
-                raise ValueError(
-                    f"\nERROR PlotRecordings: time_lim {self.time_lim} is not within chunk.\n"
-                    f"chunk time lims: {chunk_time_lims[0]} - {chunk_time_lims[1]}\n"
-                )
-            start_time, end_time = self.time_lim
-
-        ### get timestep
-        time_step = self.recordings[self.chunk]["dt"]
-
-        return start_time, end_time, time_step
-
-    def _get_time_arr_list(self):
-        """
-        Get combined time array for each subplot of plan.
-
-        Returns:
-            time_arr_list (list):
-                List with time arrays for each subplot of plan.
-        """
-        ### loop over compartments of plan
-        time_arr_dict = {}
-        for compartment in np.unique(self.plan["compartment"]):
-            actual_period = self.recordings[self.chunk][f"{compartment};period"]
-
-            ### get time array for each recording period of the chunk
-            time_arr_period_list = []
-            nr_periods = self.recording_times._get_nr_periods(
-                chunk=self.chunk, compartment=compartment
-            )
-            for period in range(nr_periods):
-                time_lims = self.recording_times.time_lims(
-                    chunk=self.chunk, compartment=compartment, period=period
-                )
-                start_time_preiod = time_lims[0]
-                end_time_period = round(
-                    time_lims[1] + actual_period, get_number_of_decimals(actual_period)
-                )
-                time_arr_period_list.append(
-                    np.arange(start_time_preiod, end_time_period, actual_period)
-                )
-
-            ### combine time arrays of periods
-            time_arr_dict[compartment] = np.concatenate(time_arr_period_list)
-
-        ### get time array for each subplot of plan
-        time_arr_list = []
-        for plot_idx in range(len(self.plan["position"])):
-            compartment = self.plan["compartment"][plot_idx]
-            time_arr_list.append(time_arr_dict[compartment])
-
-        return time_arr_list
-
-    def _get_raw_data_list(self):
-        """
-        Get raw data for each subplot of plan.
-
-        Returns:
-            data_list (dict):
-                List with data for each subplot of plan.
-        """
-        data_list = []
-        ### loop over subplots of plan
-        for plot_idx in range(len(self.plan["position"])):
-            compartment = self.plan["compartment"][plot_idx]
-            variable: str = self.plan["variable"][plot_idx]
-            ### check if variable is equation
-            if "+" in variable or "-" in variable or "*" in variable or "/" in variable:
-                ### get the values of the recorded variables of the compartment, store
-                ### them in dict
-                value_dict = {
-                    rec_var_name: self.recordings[self.chunk][
-                        f"{compartment};{rec_var_name}"
-                    ]
-                    for rec_var_name in self._compartment_recordings_dict[compartment]
-                }
-                ### evaluate equation with these values
-                variable_data = ef.evaluate_expression_with_dict(
-                    expression=variable, value_dict=value_dict
-                )
-            else:
-                ### get data from recordings
-                variable_data = self.recordings[self.chunk][f"{compartment};{variable}"]
-            ### append data to data_list
-            data_list.append(variable_data)
-
-        return data_list
-
-    def _plot(self):
-        """
-        Create plot.
-        """
-        ### create figure
-        plt.figure(figsize=([6.4 * self.shape[1], 4.8 * self.shape[0]]))
-
-        ### loop over subplots of plan
-        for plot_idx in range(len(self.plan["position"])):
-            ### create subplot
-            plt.subplot(self.shape[0], self.shape[1], self.plan["position"][plot_idx])
-
-            ### fill subplot
-            self._fill_subplot(plot_idx)
-
-        ### save figure
-        plt.tight_layout()
-        figname_parts = self.figname.split("/")
-        if len(figname_parts) > 1:
-            save_dir = "/".join(figname_parts[:-1])
-            sf.create_dir(save_dir)
-        plt.savefig(self.figname, dpi=self.dpi)
-        plt.close()
-
-    def _fill_subplot(self, plot_idx):
-        """
-        Fill subplot with data.
-
-        Args:
-            plot_idx (int):
-                The index of the subplot in the plan.
-        """
-        variable: str = self.plan["variable"][plot_idx]
-
-        ### general subplot settings
-        plt.xlabel("time [ms]")
-        plt.xlim(self._start_time, self._end_time)
-
-        if variable == "spike" or variable == "axon_spike":
-            ### spike recordings
-            self._fill_subplot_spike(plot_idx)
-        else:
-            ### other (array) recordings
-            self._fill_subplot_other(plot_idx)
-
-    def _fill_subplot_spike(self, plot_idx):
-        """
-        Fill subplot with spike data.
-
-        Args:
-            plot_idx (int):
-                The index of the subplot in the plan.
-        """
-        ### get data
-        compartment = self.plan["compartment"][plot_idx]
-        format: str = self.plan["format"][plot_idx]
-        data = self._raw_data_list[plot_idx]
-
-        ### get spike times and ranks
-        spike_times, spike_ranks = my_raster_plot(data)
-        spike_times = spike_times * self._time_step
-
-        ### get spikes within time_lims
-        mask: np.ndarray = (
-            (spike_times >= self._start_time).astype(int)
-            * (spike_times <= self._end_time).astype(int)
-        ).astype(bool)
-
-        ### check if there are no spikes
-        if mask.size == 0:
-            ### set title
-            plt.title(f"Spikes {compartment}")
-            ### print warning
-            print(
-                f"\n  WARNING PlotRecordings: {compartment} does not contain any spikes in the given time interval."
-            )
-            ### plot text
-            plt.text(
-                0.5,
-                0.5,
-                f"{compartment} does not contain any spikes.",
-                va="center",
-                ha="center",
-            )
-            plt.xticks([])
-            plt.yticks([])
-            plt.xlim(0, 1)
-            plt.xlabel("")
-            return
-
-        ### plot raster plot
-        if format == "raster" or format == "hybrid":
-            self._raster_plot(compartment, spike_ranks, spike_times, mask)
-
-        ### plot mean firing rate
-        if format == "mean" or format == "hybrid":
-            self._mean_firing_rate_plot(compartment, data, format)
-
-        ### plot interspike interval histogram
-        if format == "interspike":
-            self._interspike_interval_plot(compartment, data)
-
-        ### plot coefficient of variation histogram
-        if format == "cv":
-            self._coefficient_of_variation_plot(compartment, data)
-
-    def _raster_plot(self, compartment, spike_ranks, spike_times, mask):
-        """
-        Plot raster plot.
-
-        Args:
-            compartment (str):
-                The name of the compartment.
-            spike_ranks (array):
-                The spike ranks.
-            spike_times (array):
-                The spike times.
-            mask (array):
-                The mask for the spike times.
-        """
-        ### set title
-        plt.title(f"Spikes {compartment} ({spike_ranks.max() + 1})")
-        ### check if there is only one neuron
-        if spike_ranks.max() == 0:
-            marker, size = ["|", 3000]
-        else:
-            marker, size = [".", 3]
-        ### plot spikes
-        plt.scatter(
-            spike_times[mask],
-            spike_ranks[mask],
-            color="k",
-            marker=marker,
-            s=size,
-            linewidth=0.1,
-        )
-        ### set limits
-        plt.ylim(-0.5, spike_ranks.max() + 0.5)
-        ### set ylabel
-        plt.ylabel("# neurons")
-        ### set yticks
-        if spike_ranks.max() == 0:
-            plt.yticks([0])
-        else:
-            plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
-
-    def _mean_firing_rate_plot(self, compartment, data, format):
-        """
-        Plot mean firing rate.
-
-        Args:
-            compartment (str):
-                The name of the compartment.
-            data (array):
-                The spike data.
-            format (str):
-                The format of the plot.
-        """
-        ### set title
-        plt.title(f"Activity {compartment} ({len(data)})")
-        ### set axis
-        ax = plt.gca()
-        color = "k"
-        ### for hybrid format plot mean firing rate in second y-axis
-        if format == "hybrid":
-            ax = plt.gca().twinx()
-            color = "r"
-        ### get mean firing rate
-        time_arr, firing_rate = get_pop_rate(
-            spikes=data,
-            t_start=self._start_time,
-            t_end=self._end_time,
-            time_step=self._time_step,
-        )
-        ### plot mean firing rate
-        ax.plot(time_arr, firing_rate, color=color)
-        ### set limits
-        ax.set_xlim(self._start_time, self._end_time)
-        ### set ylabel
-        ax.set_ylabel("Mean firing rate [Hz]", color=color)
-        ax.tick_params(axis="y", colors=color)
-
-    def _interspike_interval_plot(self, compartment, data):
-        """
-        Plot interspike interval histogram.
-
-        Args:
-            compartment (str):
-                The name of the compartment.
-            data (dict):
-                The spike data.
-        """
-        ### set title
-        plt.title(f"Interspike interval histogram {compartment} ({len(data)})")
-        ### get interspike intervals
-        interspike_intervals_list = inter_spike_interval(spikes=data)
-        ### plot histogram
-        plt.hist(
-            interspike_intervals_list,
-            bins=100,
-            range=(0, 200),
-            density=True,
-            color="k",
-        )
-        ### set limits
-        plt.xlim(0, 200)
-        ### set ylabel
-        plt.ylabel("Probability")
-        plt.xlabel("Interspike interval [ms]")
-
-    def _coefficient_of_variation_plot(self, compartment, data):
-        """
-        Plot coefficient of variation histogram.
-
-        Args:
-            compartment (str):
-                The name of the compartment.
-            data (dict):
-                The spike data.
-        """
-        ### set title
-        plt.title(f"Coefficient of variation histogram {compartment} ({len(data)})")
-        ### get coefficient of variation
-        coefficient_of_variation_dict = coefficient_of_variation(
-            spikes=data,
-            per_neuron=True,
-        )
-        coefficient_of_variation_list = list(coefficient_of_variation_dict.values())
-        ### plot histogram
-        plt.hist(
-            coefficient_of_variation_list,
-            bins=100,
-            range=(0, 2),
-            density=True,
-            color="k",
-        )
-        ### set limits
-        plt.xlim(0, 2)
-        ### set ylabel
-        plt.ylabel("Probability")
-        plt.xlabel("Coefficient of variation")
-
-    def _fill_subplot_other(self, plot_idx):
-        """
-        Fill subplot with array data.
-
-        Args:
-            plot_idx (int):
-                The index of the subplot in the plan.
-        """
-        ### get data
-        compartment = self.plan["compartment"][plot_idx]
-        variable: str = self.plan["variable"][plot_idx]
-        format: str = self.plan["format"][plot_idx]
-        data_arr = self._raw_data_list[plot_idx]
-        time_arr = self._time_arr_list[plot_idx]
-
-        ### get data within time_lims
-        mask: np.ndarray = (
-            (time_arr >= self._start_time).astype(int)
-            * (time_arr <= self._end_time).astype(int)
-        ).astype(bool)
-
-        ### fill gaps in time_arr and data_arr with nan
-        time_arr, data_arr = time_data_add_nan(
-            time_arr=time_arr[mask], data_arr=data_arr[mask], axis=0
-        )
-
-        ### plot line plot
-        if "line" in format:
-            self._line_plot(
-                compartment,
-                variable,
-                time_arr,
-                data_arr,
-                plot_idx,
-                mean="mean" in format,
-            )
-
-        ### plot matrix plot
-        if "matrix" in format:
-            self._matrix_plot(
-                compartment,
-                variable,
-                time_arr,
-                data_arr,
-                plot_idx,
-                mean="mean" in format,
-            )
-
-    def _line_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean):
-        """
-        Plot line plot.
-
-        Args:
-            compartment (str):
-                The name of the compartment.
-            variable (str):
-                The name of the variable.
-            time_arr (array):
-                The time array.
-            data_arr (array):
-                The data array.
-            plot_idx (int):
-                The index of the subplot in the plan.
-            mean (bool):
-                If True, plot the mean of the data. Population: average over neurons.
-                Projection: average over preneurons (results in one line for each
-                postneuron).
-        """
-
-        ### set title
-        plt.title(f"Variable {variable} of {compartment} ({data_arr.shape[1]})")
-
-        ### Shape of data defines how to plot
-        ### 2D array where elements are no lists
-        ### = population data [time, neurons]
-        ### --> plot line for each neuron
-        if len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is not True:
-            ### mean -> average over neurons
-            if mean:
-                data_arr = np.mean(data_arr, 1, keepdims=True)
-            ### plot line for each neuron
-            for neuron in range(data_arr.shape[1]):
-                plt.plot(
-                    time_arr,
-                    data_arr[:, neuron],
-                    color="k",
-                )
-
-        ### 2D array where elements are lists
-        ### = projection data [time, postneurons][preneurons]
-        ### 3D array
-        ### = projection data [time, postneurons, preneurons]
-        ### --> plot line for each preneuron postneuron pair
-        elif len(data_arr.shape) == 3 or (
-            len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is True
-        ):
-            ### plot line for each preneuron postneuron pair
-            for post_neuron in range(data_arr.shape[1]):
-                ### the post_neuron has a constant number of preneurons
-                ### --> create array with preneuron indices [time, preneurons]
-                post_neuron_data = np.array(data_arr[:, post_neuron])
-                ### mean -> average over preneurons
-                if mean:
-                    post_neuron_data = np.mean(post_neuron_data, 1, keepdims=True)
-                for pre_neuron in range(post_neuron_data.shape[1]):
-                    plt.plot(
-                        time_arr,
-                        post_neuron_data[:, pre_neuron],
-                        color="k",
-                    )
-        else:
-            print(
-                f"\nERROR PlotRecordings: shape of data not supported, {compartment}, {variable} in plot {plot_idx}.\n"
-            )
-
-    def _matrix_plot(self, compartment, variable, time_arr, data_arr, plot_idx, mean):
-        """
-        Plot matrix plot.
-
-        Args:
-            compartment (str):
-                The name of the compartment.
-            variable (str):
-                The name of the variable.
-            time_arr (array):
-                The time array.
-            data_arr (array):
-                The data array.
-            plot_idx (int):
-                The index of the subplot in the plan.
-            mean (bool):
-                If True, plot the mean of the data. Population: average over neurons.
-                Projection: average over preneurons (results in one line for each
-                postneuron).
-        """
-        ### number of neurons i.e. postneurons
-        nr_neurons = data_arr.shape[1]
-
-        ### Shape of data defines how to plot
-        ### 2D array where elements are no lists
-        ### = population data [time, neurons]
-        ### --> plot matrix row for each neuron
-        ### mean -> average over neurons
-        if len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is not True:
-            ### mean -> average over neurons
-            if mean:
-                data_arr = np.mean(data_arr, 1, keepdims=True)
-
-        ### 2D array where elements are lists
-        ### = projection data [time, postneurons][preneurons]
-        ### 3D array
-        ### = projection data [time, postneurons, preneurons]
-        ### --> plot matrix row for each preneuron postneuron pair (has to reshape to 2D array [time, neuron pair])
-        ### mean -> average over preneurons
-        elif len(data_arr.shape) == 3 or (
-            len(data_arr.shape) == 2 and isinstance(data_arr[0, 0], list) is True
-        ):
-            array_2D_list = []
-            ### loop over postneurons
-            for post_neuron in range(data_arr.shape[1]):
-                ### the post_neuron has a constant number of preneurons
-                ### --> create array with preneuron indices [time, preneurons]
-                post_neuron_data = np.array(data_arr[:, post_neuron])
-                ### mean --> average over preneurons
-                if mean:
-                    post_neuron_data = np.mean(post_neuron_data, 1, keepdims=True)
-                ### append all preneurons arrays to array_2D_list
-                for pre_neuron in range(post_neuron_data.shape[1]):
-                    array_2D_list.append(post_neuron_data[:, pre_neuron])
-                ### append a None array to array_2D_list to separate postneurons
-                array_2D_list.append(np.empty(post_neuron_data.shape[0]) * np.nan)
-
-            ### convert array_2D_list to 2D array, not use last None array
-            data_arr = np.array(array_2D_list[:-1]).T
-
-        ### some other shape not supported
-        else:
-            print(
-                f"\nERROR PlotRecordings: shape of data not supported, {compartment}, {variable} in plot {plot_idx}.\n"
-            )
-
-        ### plot matrix row for each neuron or preneuron postneuron pair
-        plt.imshow(
-            data_arr.T,
-            aspect="auto",
-            vmin=np.nanmin(data_arr),
-            vmax=np.nanmax(data_arr),
-            extent=[
-                time_arr.min()
-                - self.recordings[self.chunk][f"{compartment};period"] / 2,
-                time_arr.max()
-                + self.recordings[self.chunk][f"{compartment};period"] / 2,
-                data_arr.shape[1] - 0.5,
-                -0.5,
-            ],
-            cmap="viridis",
-            interpolation="none",
-        )
-        if data_arr.shape[1] == 1:
-            plt.yticks([0])
-        else:
-            ### all y ticks
-            y_tick_positions_all_arr = np.arange(data_arr.shape[1])
-            ### boolean array of valid y ticks
-            valid_y_ticks = np.logical_not(np.isnan(data_arr).any(axis=0))
-            ### get y tick labels
-            if False in valid_y_ticks:
-                ### there are nan entries
-                ### split at nan entries
-                y_tick_positions_split_list = np.array_split(
-                    y_tick_positions_all_arr, np.where(np.logical_not(valid_y_ticks))[0]
-                )
-                ### decrease by 1 after each nan entry
-                y_tick_positions_split_list = [
-                    y_tick_positions_split - idx_split
-                    for idx_split, y_tick_positions_split in enumerate(
-                        y_tick_positions_split_list
-                    )
-                ]
-                ### join split arrays
-                y_tick_labels_all_arr = np.concatenate(y_tick_positions_split_list)
-            else:
-                y_tick_labels_all_arr = y_tick_positions_all_arr
-
-            valid_y_ticks_selected_idx_arr = np.linspace(
-                0,
-                np.sum(valid_y_ticks),
-                num=min([10, np.sum(valid_y_ticks)]),
-                dtype=int,
-                endpoint=False,
-            )
-            valid_y_ticks_selected_arr = y_tick_positions_all_arr[valid_y_ticks][
-                valid_y_ticks_selected_idx_arr
-            ]
-            valid_y_ticks_labels_selected_arr = y_tick_labels_all_arr[valid_y_ticks][
-                valid_y_ticks_selected_idx_arr
-            ]
-
-            plt.yticks(valid_y_ticks_selected_arr, valid_y_ticks_labels_selected_arr)
-
-        ### set title
-        plt.title(
-            f"Variable {variable} of {compartment} ({nr_neurons}) [{ef.sci(np.nanmin(data_arr))}, {ef.sci(np.nanmax(data_arr))}]"
-        )
-
-
- - - -
- - - - - - - - - - -
- - - - -

- __init__(figname, recordings, recording_times, shape, plan, chunk=0, time_lim=None, dpi=300) - -#

- - -
- -

Create and save the plot.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
figname - str - -
-

The name of the figure to be saved.

-
-
- required -
recordings - list - -
-

A recordings list obtained from CompNeuroMonitors.

-
-
- required -
recording_times - RecordingTimes - -
-

The RecordingTimes object containing the recording times obtained from -CompNeuroMonitors.

-
-
- required -
shape - tuple - -
-

The shape of the figure. (number of rows, number of columns)

-
-
- required -
plan - dict - -
-

Defines which recordings are plotted in which subplot and how. The plan -has to contain the following keys: "position", "compartment", -"variable", "format". The values of the keys have to be lists of the -same length. The values of the key "position" have to be integers -between 1 and the number of subplots (defined by shape). The values of -the key "compartment" have to be the names of the model compartments as -strings. The values of the key "variable" have to be strings containing -the names of the recorded variables or equations using the recorded -variables. The values of the key "format" have to be strings defining -how the recordings are plotted. The following formats are available for -spike recordings: "raster", "mean", "hybrid", "interspike". The -following formats are available for other recordings: "line", -"line_mean", "matrix", "matrix_mean".

-
-
- required -
chunk - int - -
-

The chunk of the recordings to be plotted. Default: 0.

-
-
- 0 -
time_lim - tuple - -
-

Defines the x-axis for all subplots. The tuple contains two -numbers: start and end time in ms. The times have to be -within the chunk. Default: None, i.e., the whole chunk is plotted.

-
-
- None -
dpi - int - -
-

The dpi of the saved figure. Default: 300.

-
-
- 300 -
- -
- Source code in CompNeuroPy/analysis_functions.py -
1794
-1795
-1796
-1797
-1798
-1799
-1800
-1801
-1802
-1803
-1804
-1805
-1806
-1807
-1808
-1809
-1810
-1811
-1812
-1813
-1814
-1815
-1816
-1817
-1818
-1819
-1820
-1821
-1822
-1823
-1824
-1825
-1826
-1827
-1828
-1829
-1830
-1831
-1832
-1833
-1834
-1835
-1836
-1837
-1838
-1839
-1840
-1841
-1842
-1843
-1844
-1845
-1846
-1847
-1848
-1849
-1850
-1851
-1852
-1853
-1854
-1855
-1856
-1857
-1858
-1859
-1860
-1861
-1862
-1863
-1864
-1865
-1866
-1867
-1868
-1869
-1870
-1871
-1872
-1873
-1874
-1875
-1876
-1877
-1878
@check_types()
-def __init__(
-    self,
-    figname: str,
-    recordings: list[dict],
-    recording_times: RecordingTimes,
-    shape: tuple[int, int],
-    plan: dict,
-    chunk: int = 0,
-    time_lim: None | tuple[float, float] = None,
-    dpi: int = 300,
-) -> None:
-    """
-    Create and save the plot.
-
-    Args:
-        figname (str):
-            The name of the figure to be saved.
-        recordings (list):
-            A recordings list obtained from CompNeuroMonitors.
-        recording_times (RecordingTimes):
-            The RecordingTimes object containing the recording times obtained from
-            CompNeuroMonitors.
-        shape (tuple):
-            The shape of the figure. (number of rows, number of columns)
-        plan (dict):
-            Defines which recordings are plotted in which subplot and how. The plan
-            has to contain the following keys: "position", "compartment",
-            "variable", "format". The values of the keys have to be lists of the
-            same length. The values of the key "position" have to be integers
-            between 1 and the number of subplots (defined by shape). The values of
-            the key "compartment" have to be the names of the model compartments as
-            strings. The values of the key "variable" have to be strings containing
-            the names of the recorded variables or equations using the recorded
-            variables. The values of the key "format" have to be strings defining
-            how the recordings are plotted. The following formats are available for
-            spike recordings: "raster", "mean", "hybrid", "interspike". The
-            following formats are available for other recordings: "line",
-            "line_mean", "matrix", "matrix_mean".
-        chunk (int, optional):
-            The chunk of the recordings to be plotted. Default: 0.
-        time_lim (tuple, optional):
-            Defines the x-axis for all subplots. The tuple contains two
-            numbers: start and end time in ms. The times have to be
-            within the chunk. Default: None, i.e., the whole chunk is plotted.
-        dpi (int, optional):
-            The dpi of the saved figure. Default: 300.
-    """
-    ### print start message
-    print(f"Generate fig {figname}", end="... ", flush=True)
-
-    ### set attributes
-    self.figname = figname
-    self.recordings = recordings
-    self.recording_times = recording_times
-    self.shape = shape
-    self.plan = plan
-    self.chunk = chunk
-    self.time_lim = time_lim
-    self.dpi = dpi
-
-    ### get available compartments (from recordings) and recorded variables for each
-    ### compartment
-    (
-        self._compartment_list,
-        self._compartment_recordings_dict,
-    ) = self._get_compartment_recordings()
-
-    ### check plan keys and values
-    self._check_plan()
-
-    ### get start and end time for plotting and timestep
-    self._start_time, self._end_time, self._time_step = self._get_start_end_time()
-
-    ### get compbined time array for recordings of each compartment
-    self._time_arr_list = self._get_time_arr_list()
-
-    ### get data from recordings for each subplot
-    self._raw_data_list = self._get_raw_data_list()
-
-    ### create plot
-    self._plot()
-
-    ### print end message
-    print("Done\n")
-
-
-
- -
- - - -
- -
- -
- - - -
- - - - -

- my_raster_plot(spikes) - -#

- - -
- -

Returns two vectors representing for each recorded spike 1) the spike times and 2) -the ranks of the neurons. The spike times are always in simulation steps (in -contrast to default ANNarchy raster_plot).

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
spikes - dict - -
-

ANNarchy spike dict of one population

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
t - array - -
-

spike times in simulation steps

-
-
n - array - -
-

ranks of the neurons

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
15
-16
-17
-18
-19
-20
-21
-22
-23
-24
-25
-26
-27
-28
-29
-30
-31
-32
-33
-34
def my_raster_plot(spikes: dict):
-    """
-    Returns two vectors representing for each recorded spike 1) the spike times and 2)
-    the ranks of the neurons. The spike times are always in simulation steps (in
-    contrast to default ANNarchy raster_plot).
-
-    Args:
-        spikes (dict):
-            ANNarchy spike dict of one population
-
-    Returns:
-        t (array):
-            spike times in simulation steps
-        n (array):
-            ranks of the neurons
-    """
-    t, n = raster_plot(spikes)
-    np.zeros(10)
-    t = np.round(t / dt(), 0).astype(int)
-    return t, n
-
-
-
- -
- - -
- - - - -

- get_nanmean(a, axis=None, dtype=None) - -#

- - -
- -

Same as np.nanmean but without printing warnings.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
a - array_like - -
-

Array containing numbers whose mean is desired. If a is not an -array, a conversion is attempted.

-
-
- required -
axis - None or int or tuple of ints - -
-

Axis or axes along which the means are computed. The default is to -compute the mean of the flattened array.

-

.. numpy versionadded:: 1.7.0

-

If this is a tuple of ints, a mean is performed over multiple axes, -instead of a single axis or all the axes as before.

-
-
- None -
dtype - data - type - -
-

Type to use in computing the mean. For integer inputs, the default -is float64; for floating point inputs, it is the same as the -input dtype.

-
-
- None -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
m - ndarray, see dtype parameter above - -
-

If out=None, returns a new array containing the mean values, -otherwise a reference to the output array is returned.

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
37
-38
-39
-40
-41
-42
-43
-44
-45
-46
-47
-48
-49
-50
-51
-52
-53
-54
-55
-56
-57
-58
-59
-60
-61
-62
-63
-64
-65
-66
def get_nanmean(a, axis=None, dtype=None):
-    """
-    Same as np.nanmean but without printing warnings.
-
-    Args:
-        a (array_like):
-            Array containing numbers whose mean is desired. If `a` is not an
-            array, a conversion is attempted.
-        axis (None or int or tuple of ints, optional):
-            Axis or axes along which the means are computed. The default is to
-            compute the mean of the flattened array.
-
-            .. numpy versionadded:: 1.7.0
-
-            If this is a tuple of ints, a mean is performed over multiple axes,
-            instead of a single axis or all the axes as before.
-        dtype (data-type, optional):
-            Type to use in computing the mean.  For integer inputs, the default
-            is `float64`; for floating point inputs, it is the same as the
-            input dtype.
-
-    Returns:
-        m (ndarray, see dtype parameter above):
-            If `out=None`, returns a new array containing the mean values,
-            otherwise a reference to the output array is returned.
-    """
-    with warnings.catch_warnings():
-        warnings.simplefilter("ignore", category=RuntimeWarning)
-        ret = np.nanmean(a, axis=axis, dtype=dtype)
-    return ret
-
-
-
- -
- - -
- - - - -

- get_nanstd(a, axis=None, dtype=None) - -#

- - -
- -

Same as np.nanstd but without printing warnings.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
a - array_like - -
-

Calculate the standard deviation of these values.

-
-
- required -
axis - None or int or tuple of ints - -
-

Axis or axes along which the standard deviation is computed. The -default is to compute the standard deviation of the flattened array.

-

.. numpy versionadded:: 1.7.0

-

If this is a tuple of ints, a standard deviation is performed over -multiple axes, instead of a single axis or all the axes as before.

-
-
- None -
dtype - dtype - -
-

Type to use in computing the standard deviation. For arrays of -integer type the default is float64, for arrays of float types it is -the same as the array type.

-
-
- None -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
standard_deviation - ndarray, see dtype parameter above - -
-

If out is None, return a new array containing the standard deviation, -otherwise return a reference to the output array.

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
69
-70
-71
-72
-73
-74
-75
-76
-77
-78
-79
-80
-81
-82
-83
-84
-85
-86
-87
-88
-89
-90
-91
-92
-93
-94
-95
-96
-97
def get_nanstd(a, axis=None, dtype=None):
-    """
-    Same as np.nanstd but without printing warnings.
-
-    Args:
-        a (array_like):
-            Calculate the standard deviation of these values.
-        axis (None or int or tuple of ints, optional):
-            Axis or axes along which the standard deviation is computed. The
-            default is to compute the standard deviation of the flattened array.
-
-            .. numpy versionadded:: 1.7.0
-
-            If this is a tuple of ints, a standard deviation is performed over
-            multiple axes, instead of a single axis or all the axes as before.
-        dtype (dtype, optional):
-            Type to use in computing the standard deviation. For arrays of
-            integer type the default is float64, for arrays of float types it is
-            the same as the array type.
-
-    Returns:
-        standard_deviation (ndarray, see dtype parameter above):
-            If `out` is None, return a new array containing the standard deviation,
-            otherwise return a reference to the output array.
-    """
-    with warnings.catch_warnings():
-        warnings.simplefilter("ignore", category=RuntimeWarning)
-        ret = np.nanstd(a, axis=axis, dtype=dtype)
-    return ret
-
-
-
- -
- - -
- - - - -

- get_population_power_spectrum(spikes, time_step, t_start=None, t_end=None, fft_size=None) - -#

- - -
- -

Generates power spectrum of population spikes, returns frequency_arr and -power_spectrum_arr. Using the Welch methode from: Welch, P. (1967). The use of fast -Fourier transform for the estimation of power spectra: a method based on time -averaging over short, modified periodograms. IEEE Transactions on audio and -electroacoustics, 15(2), 70-73.

-

The spike arrays are splitted into multiple arrays and then multiple FFTs are -performed and the results are averaged.

-

Size of splitted signals and the time step of the simulation determine the frequency -resolution and the maximum frequency: - maximum frequency [Hz] = 500 / time_step - frequency resolution [Hz] = 1000 / (time_step * fftSize)

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
spikes - dicitonary - -
-

ANNarchy spike dict of one population

-
-
- required -
time_step - float - -
-

time step of the simulation in ms

-
-
- required -
t_start - float or int - -
-

start time of analyzed data in ms. Default: time of first spike

-
-
- None -
t_end - float or int - -
-

end time of analyzed data in ms. Default: time of last spike

-
-
- None -
fft_size - int - -
-

signal size for the FFT (size of splitted arrays) -has to be a power of 2. Default: maximum

-
-
- None -
- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
frequency_arr - array - -
-

array with frequencies

-
-
spectrum - array - -
-

array with power spectrum

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
136
-137
-138
-139
-140
-141
-142
-143
-144
-145
-146
-147
-148
-149
-150
-151
-152
-153
-154
-155
-156
-157
-158
-159
-160
-161
-162
-163
-164
-165
-166
-167
-168
-169
-170
-171
-172
-173
-174
-175
-176
-177
-178
-179
-180
-181
-182
-183
-184
-185
-186
-187
-188
-189
-190
-191
-192
-193
-194
-195
-196
-197
-198
-199
-200
-201
-202
-203
-204
-205
-206
-207
-208
-209
-210
-211
-212
-213
-214
-215
-216
-217
-218
-219
-220
-221
-222
-223
-224
-225
-226
-227
-228
-229
-230
-231
-232
-233
-234
-235
-236
-237
-238
-239
-240
-241
-242
-243
-244
-245
-246
-247
-248
-249
-250
-251
-252
-253
-254
-255
-256
-257
-258
-259
-260
-261
-262
-263
-264
-265
-266
-267
-268
-269
-270
-271
-272
def get_population_power_spectrum(
-    spikes,
-    time_step,
-    t_start=None,
-    t_end=None,
-    fft_size=None,
-):
-    """
-    Generates power spectrum of population spikes, returns frequency_arr and
-    power_spectrum_arr. Using the Welch methode from: Welch, P. (1967). The use of fast
-    Fourier transform for the estimation of power spectra: a method based on time
-    averaging over short, modified periodograms. IEEE Transactions on audio and
-    electroacoustics, 15(2), 70-73.
-
-    The spike arrays are splitted into multiple arrays and then multiple FFTs are
-    performed and the results are averaged.
-
-    Size of splitted signals and the time step of the simulation determine the frequency
-    resolution and the maximum frequency:
-        maximum frequency [Hz] = 500 / time_step
-        frequency resolution [Hz] = 1000 / (time_step * fftSize)
-
-    Args:
-        spikes (dicitonary):
-            ANNarchy spike dict of one population
-        time_step (float):
-            time step of the simulation in ms
-        t_start (float or int, optional):
-            start time of analyzed data in ms. Default: time of first spike
-        t_end (float or int, optional):
-            end time of analyzed data in ms. Default: time of last spike
-        fft_size (int, optional):
-            signal size for the FFT (size of splitted arrays)
-            has to be a power of 2. Default: maximum
-
-    Returns:
-        frequency_arr (array):
-            array with frequencies
-        spectrum (array):
-            array with power spectrum
-    """
-
-    def ms_to_s(x):
-        return x / 1000
-
-    ### get population_size / sampling_frequency
-    populations_size = len(list(spikes.keys()))
-    sampling_frequency = 1 / ms_to_s(time_step)  # in Hz
-
-    ### check if there are spikes in data
-    t, _ = my_raster_plot(spikes)
-    if len(t) < 2:
-        ### there are no 2 spikes
-        print("WARNING: get_population_power_spectrum: <2 spikes!")
-        ### --> return None or zeros
-        if fft_size == None:
-            print(
-                "ERROR: get_population_power_spectrum: <2 spikes and no fft_size given!"
-            )
-            quit()
-        else:
-            frequency_arr = np.fft.fftfreq(fft_size, 1.0 / sampling_frequency)
-            frequency_arr_ret = frequency_arr[2 : int(fft_size / 2)]
-            spectrum_ret = np.zeros(frequency_arr_ret.shape)
-            return [frequency_arr_ret, spectrum_ret]
-
-    ### check if t_start / t_end are None
-    if t_start == None:
-        t_start = round(t.min() * time_step, get_number_of_decimals(time_step))
-    if t_end == None:
-        t_end = round(t.max() * time_step, get_number_of_decimals(time_step))
-
-    ### calculate time
-    simulation_time = round(t_end - t_start, get_number_of_decimals(time_step))  # in ms
-
-    ### get fft_size
-    ### if None --> as large as possible
-    if fft_size is None:
-        pow = 1
-        while (2 ** (pow + 1)) / sampling_frequency < ms_to_s(simulation_time):
-            pow = pow + 1
-        fft_size = 2**pow
-
-    if ms_to_s(simulation_time) < (fft_size / sampling_frequency):
-        ### catch a too large fft_size
-        print(
-            f"Too large fft_size {fft_size} for duration {simulation_time} ms. FFT_size has to be smaller than {int(ms_to_s(simulation_time)*sampling_frequency)}!"
-        )
-        return [np.zeros(int(fft_size / 2 - 2)), np.zeros(int(fft_size / 2 - 2))]
-    elif (np.log2(fft_size) - int(np.log2(fft_size))) != 0:
-        ### catch fft_size if its not power of 2
-        print("FFT_size hast to be power of 2!")
-        return [np.zeros(int(fft_size / 2 - 2)), np.zeros(int(fft_size / 2 - 2))]
-    else:
-        print(
-            f"power sepctrum, min = {1000 / (time_step * fft_size)}, max = {500 / time_step}"
-        )
-        ### calculate frequency powers
-        spectrum = np.zeros((populations_size, fft_size))
-        for neuron in range(populations_size):
-            ### sampling steps array
-            spiketrain = np.zeros(
-                int(np.round(ms_to_s(simulation_time) * sampling_frequency))
-            )
-            ### spike times as sampling steps
-            idx = (
-                np.round(
-                    ms_to_s((np.array(spikes[neuron]) * time_step)) * sampling_frequency
-                )
-            ).astype(np.int32)
-            ### cut the spikes before t_start and after t_end
-            idx_start = ms_to_s(t_start) * sampling_frequency
-            idx_end = ms_to_s(t_end) * sampling_frequency
-            mask = ((idx > idx_start).astype(int) * (idx < idx_end).astype(int)).astype(
-                bool
-            )
-            idx = (idx[mask] - idx_start).astype(np.int32)
-
-            ### set spiketrain array to one if there was a spike at sampling step
-            spiketrain[idx] = 1
-
-            ### generate multiple overlapping sequences out of the spike trains
-            spiketrain_sequences = _hanning_split_overlap(
-                spiketrain, fft_size, int(fft_size / 2)
-            )
-
-            ### generate power spectrum
-            spectrum[neuron] = get_nanmean(
-                np.abs(np.fft.fft(spiketrain_sequences)) ** 2, 0
-            )
-
-        ### mean spectrum over all neurons
-        spectrum = get_nanmean(spectrum, 0)
-
-        frequency_arr = np.fft.fftfreq(fft_size, 1.0 / sampling_frequency)
-
-        return (frequency_arr[2 : int(fft_size / 2)], spectrum[2 : int(fft_size / 2)])
-
-
-
- -
- - -
- - - - -

- get_power_spektrum_from_time_array(arr, presimulationTime, simulationTime, simulation_dt, samplingfrequency=250, fftSize=1024) - -#

- - -
- -

Generates power spectrum of time signal (returns frequencies_arr and power_arr). -Using the Welch methode (Welch,1967).

-

amplingfrequency: to sample the arr, in Hz --> max frequency = samplingfrequency / 2 -fftSize: signal size for FFT, duration (in s) = fftSize / samplingfrequency ---> frequency resolution = samplingfrequency / fftSize

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
arr - array - -
-

time array, value for each timestep

-
-
- required -
presimulationTime - float or int - -
-

simulation time which will not be analyzed

-
-
- required -
simulationTime - float or int - -
-

analyzed simulation time

-
-
- required -
simulation_dt - float or int - -
-

simulation timestep

-
-
- required -
samplingfrequency - float or int - -
-

sampling frequency for sampling the time array. Default: 250

-
-
- 250 -
fftSize - int - -
-

signal size for the FFT (size of splitted arrays) -has to be a power of 2. Default: 1024

-
-
- 1024 -
- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
frequency_arr - array - -
-

array with frequencies

-
-
spectrum - array - -
-

array with power spectrum

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
275
-276
-277
-278
-279
-280
-281
-282
-283
-284
-285
-286
-287
-288
-289
-290
-291
-292
-293
-294
-295
-296
-297
-298
-299
-300
-301
-302
-303
-304
-305
-306
-307
-308
-309
-310
-311
-312
-313
-314
-315
-316
-317
-318
-319
-320
-321
-322
-323
-324
-325
-326
-327
-328
-329
-330
def get_power_spektrum_from_time_array(
-    arr,
-    presimulationTime,
-    simulationTime,
-    simulation_dt,
-    samplingfrequency=250,
-    fftSize=1024,
-):
-    """
-    Generates power spectrum of time signal (returns frequencies_arr and power_arr).
-    Using the Welch methode (Welch,1967).
-
-    amplingfrequency: to sample the arr, in Hz --> max frequency = samplingfrequency / 2
-    fftSize: signal size for FFT, duration (in s) = fftSize / samplingfrequency
-    --> frequency resolution = samplingfrequency / fftSize
-
-    Args:
-        arr (array):
-            time array, value for each timestep
-        presimulationTime (float or int):
-            simulation time which will not be analyzed
-        simulationTime (float or int):
-            analyzed simulation time
-        simulation_dt (float or int):
-            simulation timestep
-        samplingfrequency (float or int, optional):
-            sampling frequency for sampling the time array. Default: 250
-        fftSize (int, optional):
-            signal size for the FFT (size of splitted arrays)
-            has to be a power of 2. Default: 1024
-
-    Returns:
-        frequency_arr (array):
-            array with frequencies
-        spectrum (array):
-            array with power spectrum
-    """
-
-    if (simulationTime / 1000) < (fftSize / samplingfrequency):
-        print("Simulation time has to be >=", fftSize / samplingfrequency, "s for FFT!")
-        return [np.zeros(int(fftSize / 2 - 2)), np.zeros(int(fftSize / 2 - 2))]
-    else:
-        ### sampling steps array
-        sampling_arr = arr[0 :: int((1 / samplingfrequency) * 1000 / simulation_dt)]
-
-        ### generate multiple overlapping sequences
-        sampling_arr_sequences = _hanning_split_overlap(
-            sampling_arr, fftSize, int(fftSize / 2)
-        )
-
-        ### generate power spectrum
-        spektrum = get_nanmean(np.abs(np.fft.fft(sampling_arr_sequences)) ** 2, 0)
-
-        frequenzen = np.fft.fftfreq(fftSize, 1.0 / samplingfrequency)
-
-        return (frequenzen[2 : int(fftSize / 2)], spektrum[2 : int(fftSize / 2)])
-
-
-
- -
- - -
- - - - -

- get_pop_rate(spikes, t_start=None, t_end=None, time_step=1, t_smooth_ms=-1) - -#

- - -
- -

Generates a smoothed population firing rate. Returns a time array and a firing rate -array.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
spikes - dictionary - -
-

ANNarchy spike dict of one population

-
-
- required -
t_start - float or int - -
-

start time of analyzed data in ms. Default: time of first spike

-
-
- None -
t_end - float or int - -
-

end time of analyzed data in ms. Default: time of last spike

-
-
- None -
time_step - float or int - -
-

time step of the simulation in ms. Default: 1

-
-
- 1 -
t_smooth_ms - float or int - -
-

time window for firing rate calculation in ms, if -1 --> time window sizes -are automatically detected. Default: -1

-
-
- -1 -
- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
time_arr - array - -
-

array with time steps in ms

-
-
rate - array - -
-

array with population rate in Hz for each time step

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
628
-629
-630
-631
-632
-633
-634
-635
-636
-637
-638
-639
-640
-641
-642
-643
-644
-645
-646
-647
-648
-649
-650
-651
-652
-653
-654
-655
-656
-657
-658
-659
-660
-661
-662
-663
-664
-665
-666
-667
-668
-669
-670
-671
-672
-673
-674
-675
-676
-677
-678
-679
-680
-681
-682
-683
-684
-685
-686
-687
-688
-689
-690
-691
-692
-693
-694
-695
-696
-697
-698
-699
-700
-701
-702
-703
-704
-705
-706
-707
-708
-709
-710
-711
-712
-713
-714
-715
-716
-717
-718
-719
-720
-721
-722
-723
def get_pop_rate(
-    spikes: dict,
-    t_start: float | int | None = None,
-    t_end: float | int | None = None,
-    time_step: float | int = 1,
-    t_smooth_ms: float | int = -1,
-):
-    """
-    Generates a smoothed population firing rate. Returns a time array and a firing rate
-    array.
-
-    Args:
-        spikes (dictionary):
-            ANNarchy spike dict of one population
-        t_start (float or int, optional):
-            start time of analyzed data in ms. Default: time of first spike
-        t_end (float or int, optional):
-            end time of analyzed data in ms. Default: time of last spike
-        time_step (float or int, optional):
-            time step of the simulation in ms. Default: 1
-        t_smooth_ms (float or int, optional):
-            time window for firing rate calculation in ms, if -1 --> time window sizes
-            are automatically detected. Default: -1
-
-    Returns:
-        time_arr (array):
-            array with time steps in ms
-        rate (array):
-            array with population rate in Hz for each time step
-    """
-    dt = time_step
-
-    t, _ = my_raster_plot(spikes)
-
-    ### check if there are spikes in population at all
-    if len(t) > 1:
-        if t_start == None:
-            t_start = round(t.min() * time_step, get_number_of_decimals(time_step))
-        if t_end == None:
-            t_end = round(t.max() * time_step, get_number_of_decimals(time_step))
-
-        duration = round(t_end - t_start, get_number_of_decimals(time_step))
-
-        ### if t_smooth is given --> use classic time_window method
-        if t_smooth_ms > 0:
-            return _get_pop_rate_old(
-                spikes, duration, dt=dt, t_start=t_start, t_smooth_ms=t_smooth_ms
-            )
-        else:
-            ### concatenate all spike times and sort them
-            spike_arr = dt * np.sort(
-                np.concatenate(
-                    [np.array(spikes[neuron]).astype(int) for neuron in spikes.keys()]
-                )
-            )
-            nr_neurons = len(list(spikes.keys()))
-            nr_spikes = spike_arr.size
-
-            ### use _recursive_rate to get firing rate
-            ### spike array is splitted in time bins
-            ### time bins widths are automatically found
-            time_population_rate, population_rate = _recursive_rate(
-                spike_arr / 1000.0,
-                t0=t_start / 1000.0,
-                t1=(t_start + duration) / 1000.0,
-                duration_init=duration / 1000.0,
-                nr_neurons=nr_neurons,
-                nr_spikes=nr_spikes,
-            )
-            ### time_population_rate was returned in s --> transform it into ms
-            time_population_rate = time_population_rate * 1000
-            time_arr0 = np.arange(t_start, t_start + duration, dt)
-            if len(time_population_rate) > 1:
-                ### interpolate
-                interpolate_func = interp1d(
-                    time_population_rate,
-                    population_rate,
-                    kind="linear",
-                    bounds_error=False,
-                    fill_value=(population_rate[0], population_rate[-1]),
-                )
-                population_rate_arr = interpolate_func(time_arr0)
-            else:
-                population_rate_arr = np.zeros(len(time_arr0))
-                mask = time_arr0 == time_population_rate[0]
-                population_rate_arr[mask] = population_rate[0]
-
-            ret = population_rate_arr
-    else:
-        if t_start == None or t_end == None:
-            return None
-        else:
-            duration = t_end - t_start
-            ret = np.zeros(int(duration / dt))
-
-    return (np.arange(t_start, t_start + duration, dt), ret)
-
-
-
- -
- - -
- - - - -

- plot_recordings(figname, recordings, recording_times, chunk, shape, plan, time_lim=None, dpi=300) - -#

- - -
- -

Plots the recordings of a single chunk from recordings. Plotted variables are -specified in plan.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
figname - str - -
-

path + name of figure (e.g. "figures/my_figure.png")

-
-
- required -
recordings - list - -
-

a recordings list from CompNeuroPy obtained with the function -get_recordings() from a CompNeuroMonitors object.

-
-
- required -
recording_times - object - -
-

recording_times object from CompNeuroPy obtained with the -function get_recording_times() from a CompNeuroMonitors object.

-
-
- required -
chunk - int - -
-

which chunk of recordings should be used (the index of chunk)

-
-
- required -
shape - tuple - -
-

Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns

-
-
- required -
plan - list of strings - -
-

Defines which recordings are plotted in which subplot and how. -Entries of the list have the structure: - "subplot_nr;model_component_name;variable_to_plot;format", - e.g. "1,my_pop1;v;line". - mode: defines how the data is plotted, available modes: - - for spike data: raster, mean, hybrid - - for other data: line, mean, matrix - - only for projection data: matrix_mean

-
-
- required -
time_lim - tuple - -
-

Defines the x-axis for all subplots. The list contains two -numbers: start and end time in ms. The times have to be -within the chunk. Default: None, i.e., time lims of chunk

-
-
- None -
dpi - int - -
-

The dpi of the saved figure. Default: 300

-
-
- 300 -
- -
- Source code in CompNeuroPy/analysis_functions.py -
726
-727
-728
-729
-730
-731
-732
-733
-734
-735
-736
-737
-738
-739
-740
-741
-742
-743
-744
-745
-746
-747
-748
-749
-750
-751
-752
-753
-754
-755
-756
-757
-758
-759
-760
-761
-762
-763
-764
-765
-766
-767
-768
-769
-770
-771
-772
-773
-774
-775
-776
-777
@check_types()
-def plot_recordings(
-    figname: str,
-    recordings: list,
-    recording_times: RecordingTimes,
-    chunk: int,
-    shape: tuple,
-    plan: list[str],
-    time_lim: None | tuple = None,
-    dpi: int = 300,
-):
-    """
-    Plots the recordings of a single chunk from recordings. Plotted variables are
-    specified in plan.
-
-    Args:
-        figname (str):
-            path + name of figure (e.g. "figures/my_figure.png")
-        recordings (list):
-            a recordings list from CompNeuroPy obtained with the function
-            get_recordings() from a CompNeuroMonitors object.
-        recording_times (object):
-            recording_times object from CompNeuroPy obtained with the
-            function get_recording_times() from a CompNeuroMonitors object.
-        chunk (int):
-            which chunk of recordings should be used (the index of chunk)
-        shape (tuple):
-            Defines the subplot arrangement e.g. (3,2) = 3 rows, 2 columns
-        plan (list of strings):
-            Defines which recordings are plotted in which subplot and how.
-            Entries of the list have the structure:
-                "subplot_nr;model_component_name;variable_to_plot;format",
-                e.g. "1,my_pop1;v;line".
-                mode: defines how the data is plotted, available modes:
-                    - for spike data: raster, mean, hybrid
-                    - for other data: line, mean, matrix
-                    - only for projection data: matrix_mean
-        time_lim (tuple, optional):
-            Defines the x-axis for all subplots. The list contains two
-            numbers: start and end time in ms. The times have to be
-            within the chunk. Default: None, i.e., time lims of chunk
-        dpi (int, optional):
-            The dpi of the saved figure. Default: 300
-    """
-    proc = Process(
-        target=_plot_recordings,
-        args=(figname, recordings, recording_times, chunk, shape, plan, time_lim, dpi),
-    )
-    proc.start()
-    proc.join()
-    if proc.exitcode != 0:
-        quit()
-
-
-
- -
- - -
- - - - -

- get_number_of_zero_decimals(nr) - -#

- - -
- -

For numbers which are smaller than zero get the number of digits after the decimal -point which are zero (plus 1). For the number 0 or numbers >=1 return zero, e.g.:

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
nr - float or int - -
-

the number from which the number of digits are obtained

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
decimals - int - -
-

number of digits after the decimal point which are zero (plus 1)

-
-
- - - -

Examples:

-
>>> get_number_of_zero_decimals(0.12)
-1
->>> get_number_of_zero_decimals(0.012)
-2
->>> get_number_of_zero_decimals(1.012)
-0
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1508
-1509
-1510
-1511
-1512
-1513
-1514
-1515
-1516
-1517
-1518
-1519
-1520
-1521
-1522
-1523
-1524
-1525
-1526
-1527
-1528
-1529
-1530
-1531
-1532
-1533
-1534
-1535
def get_number_of_zero_decimals(nr):
-    """
-    For numbers which are smaller than zero get the number of digits after the decimal
-    point which are zero (plus 1). For the number 0 or numbers >=1 return zero, e.g.:
-
-    Args:
-        nr (float or int):
-            the number from which the number of digits are obtained
-
-    Returns:
-        decimals (int):
-            number of digits after the decimal point which are zero (plus 1)
-
-    Examples:
-        >>> get_number_of_zero_decimals(0.12)
-        1
-        >>> get_number_of_zero_decimals(0.012)
-        2
-        >>> get_number_of_zero_decimals(1.012)
-        0
-    """
-    decimals = 0
-    if nr != 0:
-        while abs(nr) < 1:
-            nr = nr * 10
-            decimals = decimals + 1
-
-    return decimals
-
-
-
- -
- - -
- - - - -

- get_number_of_decimals(nr) - -#

- - -
- -

Get number of digits after the decimal point.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
nr - float or int - -
-

the number from which the number of digits are obtained

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
decimals - int - -
-

number of digits after the decimal point

-
-
- - - -

Examples:

-
>>> get_number_of_decimals(5)
-0
->>> get_number_of_decimals(5.1)
-1
->>> get_number_of_decimals(0.0101)
-4
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1538
-1539
-1540
-1541
-1542
-1543
-1544
-1545
-1546
-1547
-1548
-1549
-1550
-1551
-1552
-1553
-1554
-1555
-1556
-1557
-1558
-1559
-1560
-1561
-1562
-1563
-1564
def get_number_of_decimals(nr):
-    """
-    Get number of digits after the decimal point.
-
-    Args:
-        nr (float or int):
-            the number from which the number of digits are obtained
-
-    Returns:
-        decimals (int):
-            number of digits after the decimal point
-
-    Examples:
-        >>> get_number_of_decimals(5)
-        0
-        >>> get_number_of_decimals(5.1)
-        1
-        >>> get_number_of_decimals(0.0101)
-        4
-    """
-
-    if nr != int(nr):
-        decimals = len(str(nr).split(".")[1])
-    else:
-        decimals = 0
-
-    return decimals
-
-
-
- -
- - -
- - - - -

- sample_data_with_timestep(time_arr, data_arr, timestep) - -#

- - -
- -

Samples a data array each timestep using interpolation

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
time_arr - array - -
-

times of data_arr in ms

-
-
- required -
data_arr - array - -
-

array with data values from which will be sampled

-
-
- required -
timestep - float or int - -
-

timestep in ms for sampling

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
time_arr - array - -
-

sampled time array

-
-
data_arr - array - -
-

sampled data array

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1567
-1568
-1569
-1570
-1571
-1572
-1573
-1574
-1575
-1576
-1577
-1578
-1579
-1580
-1581
-1582
-1583
-1584
-1585
-1586
-1587
-1588
-1589
-1590
-1591
-1592
-1593
-1594
-1595
-1596
-1597
-1598
-1599
def sample_data_with_timestep(time_arr, data_arr, timestep):
-    """
-    Samples a data array each timestep using interpolation
-
-    Args:
-        time_arr (array):
-            times of data_arr in ms
-        data_arr (array):
-            array with data values from which will be sampled
-        timestep (float or int):
-            timestep in ms for sampling
-
-    Returns:
-        time_arr (array):
-            sampled time array
-        data_arr (array):
-            sampled data array
-    """
-    interpolate_func = interp1d(
-        time_arr, data_arr, bounds_error=False, fill_value="extrapolate"
-    )
-    min_time = round(
-        round(time_arr[0] / timestep, 0) * timestep,
-        get_number_of_decimals(timestep),
-    )
-    max_time = round(
-        round(time_arr[-1] / timestep, 0) * timestep,
-        get_number_of_decimals(timestep),
-    )
-    new_time_arr = np.arange(min_time, max_time + timestep, timestep)
-    new_data_arr = interpolate_func(new_time_arr)
-
-    return (new_time_arr, new_data_arr)
-
-
-
- -
- - -
- - - - -

- time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0) - -#

- - -
- -

If there are gaps in time_arr --> fill them with respective time values. -Fill the corresponding data_arr values with nan.

-

By default it is tried to fill the time array with continuously increasing times -based on the smallest time difference found there can still be discontinuities after -filling the arrays (because existing time values are not changed).

-

But one can also give a fixed fill time step.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
time_arr - 1D array - -
-

times of data_arr in ms

-
-
- required -
data_arr - nD array - -
-

the size of the specified dimension of data array must have the same length -as time_arr

-
-
- required -
fill_time_step - number, optional, default=None - -
-

if there are gaps they are filled with this time step

-
-
- None -
axis - int - -
-

which dimension of the data_arr belongs to the time_arr

-
-
- 0 -
- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
time_arr - 1D array - -
-

time array with gaps filled

-
-
data_arr - nD array - -
-

data array with gaps filled

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1602
-1603
-1604
-1605
-1606
-1607
-1608
-1609
-1610
-1611
-1612
-1613
-1614
-1615
-1616
-1617
-1618
-1619
-1620
-1621
-1622
-1623
-1624
-1625
-1626
-1627
-1628
-1629
-1630
-1631
-1632
-1633
-1634
-1635
-1636
-1637
-1638
-1639
-1640
-1641
-1642
-1643
-1644
-1645
-1646
-1647
-1648
-1649
-1650
-1651
-1652
-1653
-1654
-1655
-1656
-1657
-1658
-1659
-1660
-1661
-1662
-1663
-1664
-1665
-1666
-1667
-1668
-1669
-1670
-1671
-1672
-1673
-1674
-1675
-1676
-1677
-1678
-1679
-1680
-1681
-1682
-1683
-1684
def time_data_add_nan(time_arr, data_arr, fill_time_step=None, axis=0):
-    """
-    If there are gaps in time_arr --> fill them with respective time values.
-    Fill the corresponding data_arr values with nan.
-
-    By default it is tried to fill the time array with continuously increasing times
-    based on the smallest time difference found there can still be discontinuities after
-    filling the arrays (because existing time values are not changed).
-
-    But one can also give a fixed fill time step.
-
-    Args:
-        time_arr (1D array):
-            times of data_arr in ms
-        data_arr (nD array):
-            the size of the specified dimension of data array must have the same length
-            as time_arr
-        fill_time_step (number, optional, default=None):
-            if there are gaps they are filled with this time step
-        axis (int):
-            which dimension of the data_arr belongs to the time_arr
-
-    Returns:
-        time_arr (1D array):
-            time array with gaps filled
-        data_arr (nD array):
-            data array with gaps filled
-    """
-    time_arr = time_arr.astype(float)
-    data_arr = data_arr.astype(float)
-    data_arr_shape = data_arr.shape
-
-    if data_arr_shape[axis] != time_arr.size:
-        print(
-            "ERROR time_data_add_nan: time_arr must have same length as specified axis (default=0) of data_arr!"
-        )
-        quit()
-
-    ### find gaps
-    time_diff_arr = np.round(np.diff(time_arr), 6)
-    if isinstance(fill_time_step, type(None)):
-        time_diff_min = time_diff_arr.min()
-    else:
-        time_diff_min = fill_time_step
-    gaps_arr = time_diff_arr > time_diff_min
-
-    ### split arrays at gaps
-    time_arr_split = np.split(
-        time_arr, indices_or_sections=np.where(gaps_arr)[0] + 1, axis=0
-    )
-    data_arr_split = np.split(
-        data_arr, indices_or_sections=np.where(gaps_arr)[0] + 1, axis=axis
-    )
-
-    ### fill gaps between splits
-    data_arr_append_shape = list(data_arr_shape)
-    for split_arr_idx in range(len(time_arr_split) - 1):
-        ### get gaps boundaries
-        current_end = time_arr_split[split_arr_idx][-1]
-        next_start = time_arr_split[split_arr_idx + 1][0]
-        ### create gap filling arrays
-        time_arr_append = np.arange(
-            current_end + time_diff_min, next_start, time_diff_min
-        )
-        data_arr_append_shape[axis] = time_arr_append.size
-        data_arr_append = np.zeros(tuple(data_arr_append_shape)) * np.nan
-        ### append gap filling arrays to splitted arrays
-        time_arr_split[split_arr_idx] = np.append(
-            arr=time_arr_split[split_arr_idx],
-            values=time_arr_append,
-            axis=0,
-        )
-        data_arr_split[split_arr_idx] = np.append(
-            arr=data_arr_split[split_arr_idx],
-            values=data_arr_append,
-            axis=axis,
-        )
-
-    ### combine splitted arrays again
-    time_arr = np.concatenate(time_arr_split, axis=0)
-    data_arr = np.concatenate(data_arr_split, axis=axis)
-
-    return (time_arr, data_arr)
-
-
-
- -
- - -
- - - - -

- rmse(a, b) - -#

- - -
- -

Calculates the root-mean-square error between two arrays.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
a - array - -
-

first array

-
-
- required -
b - array - -
-

second array

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
rmse - float - -
-

root-mean-square error

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1687
-1688
-1689
-1690
-1691
-1692
-1693
-1694
-1695
-1696
-1697
-1698
-1699
-1700
-1701
-1702
def rmse(a, b):
-    """
-    Calculates the root-mean-square error between two arrays.
-
-    Args:
-        a (array):
-            first array
-        b (array):
-            second array
-
-    Returns:
-        rmse (float):
-            root-mean-square error
-    """
-
-    return np.sqrt(np.mean((a - b) ** 2))
-
-
-
- -
- - -
- - - - -

- rsse(a, b) - -#

- - -
- -

Calculates the root-sum-square error between two arrays.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
a - array - -
-

first array

-
-
- required -
b - array - -
-

second array

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
rsse - float - -
-

root-sum-square error

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1705
-1706
-1707
-1708
-1709
-1710
-1711
-1712
-1713
-1714
-1715
-1716
-1717
-1718
-1719
-1720
def rsse(a, b):
-    """
-    Calculates the root-sum-square error between two arrays.
-
-    Args:
-        a (array):
-            first array
-        b (array):
-            second array
-
-    Returns:
-        rsse (float):
-            root-sum-square error
-    """
-
-    return np.sqrt(np.sum((a - b) ** 2))
-
-
-
- -
- - -
- - - - -

- get_minimum(input_data) - -#

- - -
- -

Returns the minimum of the input data.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
input_data - list, np.ndarray, tuple, or float - -
-

The input data from which the minimum is to be obtained.

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
minimum - float - -
-

The minimum of the input data.

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1723
-1724
-1725
-1726
-1727
-1728
-1729
-1730
-1731
-1732
-1733
-1734
-1735
-1736
-1737
-1738
-1739
-1740
-1741
-1742
-1743
-1744
-1745
-1746
-1747
def get_minimum(input_data: list | np.ndarray | tuple | float):
-    """
-    Returns the minimum of the input data.
-
-    Args:
-        input_data (list, np.ndarray, tuple, or float):
-            The input data from which the minimum is to be obtained.
-
-    Returns:
-        minimum (float):
-            The minimum of the input data.
-    """
-    if isinstance(input_data, (list, np.ndarray, tuple)):
-        # If the input is a list, numpy array, or tuple, we handle them as follows
-        flattened_list = [
-            item
-            for sublist in input_data
-            for item in (
-                sublist if isinstance(sublist, (list, np.ndarray, tuple)) else [sublist]
-            )
-        ]
-        return float(min(flattened_list))
-    else:
-        # If the input is a single value, return it as the minimum
-        return float(input_data)
-
-
-
- -
- - -
- - - - -

- get_maximum(input_data) - -#

- - -
- -

Returns the maximum of the input data.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
input_data - list, np.ndarray, tuple, or float - -
-

The input data from which the maximum is to be obtained.

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
maximum - float - -
-

The maximum of the input data.

-
-
- -
- Source code in CompNeuroPy/analysis_functions.py -
1750
-1751
-1752
-1753
-1754
-1755
-1756
-1757
-1758
-1759
-1760
-1761
-1762
-1763
-1764
-1765
-1766
-1767
-1768
-1769
-1770
-1771
-1772
-1773
-1774
-1775
def get_maximum(input_data: list | np.ndarray | tuple | float):
-    """
-    Returns the maximum of the input data.
-
-    Args:
-        input_data (list, np.ndarray, tuple, or float):
-            The input data from which the maximum is to be obtained.
-
-    Returns:
-        maximum (float):
-            The maximum of the input data.
-    """
-
-    if isinstance(input_data, (list, np.ndarray, tuple)):
-        # If the input is a list, numpy array, or tuple, we handle them as follows
-        flattened_list = [
-            item
-            for sublist in input_data
-            for item in (
-                sublist if isinstance(sublist, (list, np.ndarray, tuple)) else [sublist]
-            )
-        ]
-        return float(max(flattened_list))
-    else:
-        # If the input is a single value, return it as the maximum
-        return float(input_data)
-
-
-
- -
- - - -
- -
- -
- - - - - - - - - - - - - -
-
- - - -
- -
- - - -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/site/additional/extra_functions/index.html b/site/additional/extra_functions/index.html deleted file mode 100644 index 84e9dc4..0000000 --- a/site/additional/extra_functions/index.html +++ /dev/null @@ -1,4151 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Extra Functions - CompNeuroPy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - -

Extra Functions

- -
- - - - -
- - - -
- - - - - - - - -
- - - - -

- Cmap - - -#

- - -
- - -

Class to create a colormap with a given name and range. The colormap can be called -with a value between 0 and 1 to get the corresponding rgb value.

- -
- Source code in CompNeuroPy/extra_functions.py -
127
-128
-129
-130
-131
-132
-133
-134
-135
-136
-137
-138
-139
-140
-141
-142
-143
-144
-145
-146
-147
-148
-149
-150
-151
-152
-153
-154
-155
-156
-157
-158
-159
-160
-161
-162
-163
-164
-165
-166
-167
-168
-169
-170
-171
-172
-173
-174
-175
-176
-177
-178
-179
-180
-181
class Cmap:
-    """
-    Class to create a colormap with a given name and range. The colormap can be called
-    with a value between 0 and 1 to get the corresponding rgb value.
-    """
-
-    def __init__(self, cmap_name, vmin, vmax):
-        """
-        Args:
-            cmap_name (str):
-                Name of the colormap
-            vmin (float):
-                Lower limit of the colormap
-            vmax (float):
-                Upper limit of the colormap
-        """
-        self.cmap_name = cmap_name
-        self.cmap = plt.get_cmap(cmap_name)
-        self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
-        self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)
-
-    def __call__(self, x, alpha=1):
-        """
-        Returns the rgba value of the colormap at the given value.
-
-        Args:
-            x (float):
-                Value between 0 and 1
-            alpha (float):
-                Alpha value of the rgba value
-
-        Returns:
-            rgba (tuple):
-                RGBA value of the colormap at the given value
-        """
-        vals = self.get_rgb(x)
-        if isinstance(vals, tuple):
-            vals = vals[:3] + (alpha,)
-        else:
-            vals[:, -1] = alpha
-        return vals
-
-    def get_rgb(self, val):
-        """
-        Returns the rgb value of the colormap at the given value.
-
-        Args:
-            val (float):
-                Value between 0 and 1
-
-        Returns:
-            rgb (tuple):
-                RGB value of the colormap at the given value
-        """
-        return self.scalarMap.to_rgba(val)
-
-
- - - -
- - - - - - - - - - -
- - - - -

- __init__(cmap_name, vmin, vmax) - -#

- - -
- - - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
cmap_name - str - -
-

Name of the colormap

-
-
- required -
vmin - float - -
-

Lower limit of the colormap

-
-
- required -
vmax - float - -
-

Upper limit of the colormap

-
-
- required -
- -
- Source code in CompNeuroPy/extra_functions.py -
133
-134
-135
-136
-137
-138
-139
-140
-141
-142
-143
-144
-145
-146
def __init__(self, cmap_name, vmin, vmax):
-    """
-    Args:
-        cmap_name (str):
-            Name of the colormap
-        vmin (float):
-            Lower limit of the colormap
-        vmax (float):
-            Upper limit of the colormap
-    """
-    self.cmap_name = cmap_name
-    self.cmap = plt.get_cmap(cmap_name)
-    self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
-    self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)
-
-
-
- -
- - -
- - - - -

- __call__(x, alpha=1) - -#

- - -
- -

Returns the rgba value of the colormap at the given value.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
x - float - -
-

Value between 0 and 1

-
-
- required -
alpha - float - -
-

Alpha value of the rgba value

-
-
- 1 -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
rgba - tuple - -
-

RGBA value of the colormap at the given value

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
148
-149
-150
-151
-152
-153
-154
-155
-156
-157
-158
-159
-160
-161
-162
-163
-164
-165
-166
-167
def __call__(self, x, alpha=1):
-    """
-    Returns the rgba value of the colormap at the given value.
-
-    Args:
-        x (float):
-            Value between 0 and 1
-        alpha (float):
-            Alpha value of the rgba value
-
-    Returns:
-        rgba (tuple):
-            RGBA value of the colormap at the given value
-    """
-    vals = self.get_rgb(x)
-    if isinstance(vals, tuple):
-        vals = vals[:3] + (alpha,)
-    else:
-        vals[:, -1] = alpha
-    return vals
-
-
-
- -
- - -
- - - - -

- get_rgb(val) - -#

- - -
- -

Returns the rgb value of the colormap at the given value.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
val - float - -
-

Value between 0 and 1

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
rgb - tuple - -
-

RGB value of the colormap at the given value

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
169
-170
-171
-172
-173
-174
-175
-176
-177
-178
-179
-180
-181
def get_rgb(self, val):
-    """
-    Returns the rgb value of the colormap at the given value.
-
-    Args:
-        val (float):
-            Value between 0 and 1
-
-    Returns:
-        rgb (tuple):
-            RGB value of the colormap at the given value
-    """
-    return self.scalarMap.to_rgba(val)
-
-
-
- -
- - - -
- -
- -
- -
- - - - -

- DecisionTree - - -#

- - -
- - -

Class to create a decision tree.

- -
- Source code in CompNeuroPy/extra_functions.py -
350
-351
-352
-353
-354
-355
-356
-357
-358
-359
-360
-361
-362
-363
-364
-365
-366
-367
-368
-369
-370
-371
-372
-373
-374
-375
-376
-377
-378
-379
-380
-381
-382
-383
-384
-385
-386
-387
-388
-389
-390
-391
-392
-393
-394
-395
-396
-397
-398
-399
-400
-401
-402
-403
-404
-405
-406
-407
-408
-409
-410
-411
-412
-413
-414
-415
-416
-417
-418
-419
-420
-421
-422
-423
-424
-425
-426
-427
-428
-429
-430
-431
-432
-433
-434
-435
-436
-437
-438
-439
-440
-441
-442
-443
-444
-445
class DecisionTree:
-    """
-    Class to create a decision tree.
-    """
-
-    def __init__(self):
-        """
-        Create a new empty decision tree.
-        """
-        ### node list is a list of lists
-        ### first idx = level of tree
-        ### second idx = all nodes in the level
-        self.node_list = [[]]
-
-    def node(self, parent=None, prob=0, name=None):
-        """
-        Create a new node in the decision tree.
-
-        Args:
-            parent (node object):
-                Parent node of the new node
-            prob (float):
-                Probability of the new node
-            name (str):
-                Name of the new node
-
-        Returns:
-            new_node (node object):
-                The new node
-        """
-
-        ### create new node
-        new_node = DecisionTreeNode(tree=self, parent=parent, prob=prob, name=name)
-        ### add it to node_list
-        if len(self.node_list) == new_node.level:
-            self.node_list.append([])
-        self.node_list[new_node.level].append(new_node)
-        ### return the node object
-        return new_node
-
-    def get_path_prod(self, name):
-        """
-        Get the path and path product of a node with a given name.
-
-        Args:
-            name (str):
-                Name of the node
-
-        Returns:
-            path (str):
-                Path to the node
-            path_prod (float):
-                Path product of the node
-        """
-
-        ### search for all nodes with name
-        ### start from behind
-        search_node_list = []
-        path_list = []
-        path_prod_list = []
-        for level in range(len(self.node_list) - 1, -1, -1):
-            for node in self.node_list[level]:
-                if node.name == name:
-                    search_node_list.append(node)
-        ### get the paths and path products for the found nodes
-        for node in search_node_list:
-            path, path_prod = self._get_path_prod_rec(node)
-            path_list.append(path)
-            path_prod_list.append(path_prod)
-        ### return the paths and path products
-        return [
-            [path_list[idx], path_prod_list[idx]]
-            for idx in range(len(search_node_list))
-        ]
-
-    def _get_path_prod_rec(self, node):
-        """
-        Recursive function to get the path and path product of a node.
-
-        Args:
-            node (node object):
-                Node to get the path and path product of
-
-        Returns:
-            path_str (str):
-                Path to the node
-            prob (float):
-                Path product of the node
-        """
-        node: DecisionTreeNode = node
-
-        if node.parent == None:
-            return ["/" + node.name, node.prob]
-        else:
-            path_str, prob = self._get_path_prod_rec(node.parent)
-            return [path_str + "/" + node.name, prob * node.prob]
-
-
- - - -
- - - - - - - - - - -
- - - - -

- __init__() - -#

- - -
- -

Create a new empty decision tree.

- -
- Source code in CompNeuroPy/extra_functions.py -
355
-356
-357
-358
-359
-360
-361
-362
def __init__(self):
-    """
-    Create a new empty decision tree.
-    """
-    ### node list is a list of lists
-    ### first idx = level of tree
-    ### second idx = all nodes in the level
-    self.node_list = [[]]
-
-
-
- -
- - -
- - - - -

- node(parent=None, prob=0, name=None) - -#

- - -
- -

Create a new node in the decision tree.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
parent - node object - -
-

Parent node of the new node

-
-
- None -
prob - float - -
-

Probability of the new node

-
-
- 0 -
name - str - -
-

Name of the new node

-
-
- None -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
new_node - node object - -
-

The new node

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
364
-365
-366
-367
-368
-369
-370
-371
-372
-373
-374
-375
-376
-377
-378
-379
-380
-381
-382
-383
-384
-385
-386
-387
-388
def node(self, parent=None, prob=0, name=None):
-    """
-    Create a new node in the decision tree.
-
-    Args:
-        parent (node object):
-            Parent node of the new node
-        prob (float):
-            Probability of the new node
-        name (str):
-            Name of the new node
-
-    Returns:
-        new_node (node object):
-            The new node
-    """
-
-    ### create new node
-    new_node = DecisionTreeNode(tree=self, parent=parent, prob=prob, name=name)
-    ### add it to node_list
-    if len(self.node_list) == new_node.level:
-        self.node_list.append([])
-    self.node_list[new_node.level].append(new_node)
-    ### return the node object
-    return new_node
-
-
-
- -
- - -
- - - - -

- get_path_prod(name) - -#

- - -
- -

Get the path and path product of a node with a given name.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
name - str - -
-

Name of the node

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
path - str - -
-

Path to the node

-
-
path_prod - float - -
-

Path product of the node

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
390
-391
-392
-393
-394
-395
-396
-397
-398
-399
-400
-401
-402
-403
-404
-405
-406
-407
-408
-409
-410
-411
-412
-413
-414
-415
-416
-417
-418
-419
-420
-421
-422
-423
def get_path_prod(self, name):
-    """
-    Get the path and path product of a node with a given name.
-
-    Args:
-        name (str):
-            Name of the node
-
-    Returns:
-        path (str):
-            Path to the node
-        path_prod (float):
-            Path product of the node
-    """
-
-    ### search for all nodes with name
-    ### start from behind
-    search_node_list = []
-    path_list = []
-    path_prod_list = []
-    for level in range(len(self.node_list) - 1, -1, -1):
-        for node in self.node_list[level]:
-            if node.name == name:
-                search_node_list.append(node)
-    ### get the paths and path products for the found nodes
-    for node in search_node_list:
-        path, path_prod = self._get_path_prod_rec(node)
-        path_list.append(path)
-        path_prod_list.append(path_prod)
-    ### return the paths and path products
-    return [
-        [path_list[idx], path_prod_list[idx]]
-        for idx in range(len(search_node_list))
-    ]
-
-
-
- -
- - - -
- -
- -
- -
- - - - -

- DecisionTreeNode - - -#

- - -
- - -

Class to create a node in a decision tree.

- -
- Source code in CompNeuroPy/extra_functions.py -
452
-453
-454
-455
-456
-457
-458
-459
-460
-461
-462
-463
-464
-465
-466
-467
-468
-469
-470
-471
-472
-473
-474
-475
-476
-477
-478
-479
-480
-481
-482
-483
-484
-485
-486
-487
-488
-489
-490
-491
-492
-493
-494
-495
-496
-497
-498
-499
-500
-501
-502
-503
-504
-505
-506
-507
-508
-509
-510
-511
-512
class DecisionTreeNode:
-    """
-    Class to create a node in a decision tree.
-    """
-
-    id_counter = 0
-
-    def __init__(self, tree: DecisionTree, parent=None, prob=0, name=""):
-        """
-        Create a new node in a decision tree.
-
-        Args:
-            tree (DecisionTree object):
-                Decision tree the node belongs to
-            parent (node object):
-                Parent node of the new node
-            prob (float):
-                Probability of the new node
-            name (str):
-                Name of the new node
-        """
-        self.tree = tree
-        parent: DecisionTreeNode = parent
-        self.parent = parent
-        self.prob = prob
-        self.name = name
-        self.id = int(self.id_counter)
-        self.id_counter += 1
-        if parent != None:
-            self.level = int(parent.level + 1)
-        else:
-            self.level = int(0)
-
-    def add(self, name, prob):
-        """
-        Add a child node to the node.
-
-        Args:
-            name (str):
-                Name of the new node
-            prob (float):
-                Probability of the new node
-
-        Returns:
-            new_node (node object):
-                The new node
-        """
-
-        return self.tree.node(parent=self, prob=prob, name=name)
-
-    def get_path_prod(self):
-        """
-        Get the path and path product of the node.
-
-        Returns:
-            path (str):
-                Path to the node
-            path_prod (float):
-                Path product of the node
-        """
-        return self.tree._get_path_prod_rec(self)
-
-
- - - -
- - - - - - - - - - -
- - - - -

- __init__(tree, parent=None, prob=0, name='') - -#

- - -
- -

Create a new node in a decision tree.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
tree - DecisionTree object - -
-

Decision tree the node belongs to

-
-
- required -
parent - node object - -
-

Parent node of the new node

-
-
- None -
prob - float - -
-

Probability of the new node

-
-
- 0 -
name - str - -
-

Name of the new node

-
-
- '' -
- -
- Source code in CompNeuroPy/extra_functions.py -
459
-460
-461
-462
-463
-464
-465
-466
-467
-468
-469
-470
-471
-472
-473
-474
-475
-476
-477
-478
-479
-480
-481
-482
-483
def __init__(self, tree: DecisionTree, parent=None, prob=0, name=""):
-    """
-    Create a new node in a decision tree.
-
-    Args:
-        tree (DecisionTree object):
-            Decision tree the node belongs to
-        parent (node object):
-            Parent node of the new node
-        prob (float):
-            Probability of the new node
-        name (str):
-            Name of the new node
-    """
-    self.tree = tree
-    parent: DecisionTreeNode = parent
-    self.parent = parent
-    self.prob = prob
-    self.name = name
-    self.id = int(self.id_counter)
-    self.id_counter += 1
-    if parent != None:
-        self.level = int(parent.level + 1)
-    else:
-        self.level = int(0)
-
-
-
- -
- - -
- - - - -

- add(name, prob) - -#

- - -
- -

Add a child node to the node.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
name - str - -
-

Name of the new node

-
-
- required -
prob - float - -
-

Probability of the new node

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
new_node - node object - -
-

The new node

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
485
-486
-487
-488
-489
-490
-491
-492
-493
-494
-495
-496
-497
-498
-499
-500
def add(self, name, prob):
-    """
-    Add a child node to the node.
-
-    Args:
-        name (str):
-            Name of the new node
-        prob (float):
-            Probability of the new node
-
-    Returns:
-        new_node (node object):
-            The new node
-    """
-
-    return self.tree.node(parent=self, prob=prob, name=name)
-
-
-
- -
- - -
- - - - -

- get_path_prod() - -#

- - -
- -

Get the path and path product of the node.

- - - -

Returns:

- - - - - - - - - - - - - - - - - -
Name TypeDescription
path - str - -
-

Path to the node

-
-
path_prod - float - -
-

Path product of the node

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
502
-503
-504
-505
-506
-507
-508
-509
-510
-511
-512
def get_path_prod(self):
-    """
-    Get the path and path product of the node.
-
-    Returns:
-        path (str):
-            Path to the node
-        path_prod (float):
-            Path product of the node
-    """
-    return self.tree._get_path_prod_rec(self)
-
-
-
- -
- - - -
- -
- -
- - - -
- - - - -

- print_df(df) - -#

- - -
- -

Prints the complete dataframe df

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
df - pandas dataframe - -
-

Dataframe to be printed

-
-
- required -
- -
- Source code in CompNeuroPy/extra_functions.py -
14
-15
-16
-17
-18
-19
-20
-21
-22
-23
-24
-25
def print_df(df):
-    """
-    Prints the complete dataframe df
-
-    Args:
-        df (pandas dataframe):
-            Dataframe to be printed
-    """
-    with pd.option_context(
-        "display.max_rows", None
-    ):  # more options can be specified also
-        print(df)
-
-
-
- -
- - -
- - - - -

- flatten_list(lst) - -#

- - -
- -

Retuns flattened list

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
lst - list of lists or mixed - -
-

values and lists): -List to be flattened

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
new_list - list - -
-

Flattened list

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
28
-29
-30
-31
-32
-33
-34
-35
-36
-37
-38
-39
-40
-41
-42
-43
-44
-45
-46
-47
-48
-49
-50
-51
-52
-53
-54
-55
-56
def flatten_list(lst):
-    """
-    Retuns flattened list
-
-    Args:
-        lst (list of lists or mixed: values and lists):
-            List to be flattened
-
-    Returns:
-        new_list (list):
-            Flattened list
-    """
-
-    ### if lists in lst --> upack them and retunr flatten_list of new list
-    new_lst = []
-    list_in_lst = False
-    for val in lst:
-        if isinstance(val, list):
-            list_in_lst = True
-            for sub_val in val:
-                new_lst.append(sub_val)
-        else:
-            new_lst.append(val)
-
-    if list_in_lst:
-        return flatten_list(new_lst)
-    ### else return lst
-    else:
-        return lst
-
-
-
- -
- - -
- - - - -

- remove_key(d, key) - -#

- - -
- -

Removes an element from a dict, returns the new dict

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
d - dict - -
-

Dict to be modified

-
-
- required -
key - str - -
-

Key to be removed

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
r - dict - -
-

Modified dict

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
59
-60
-61
-62
-63
-64
-65
-66
-67
-68
-69
-70
-71
-72
-73
-74
-75
def remove_key(d, key):
-    """
-    Removes an element from a dict, returns the new dict
-
-    Args:
-        d (dict):
-            Dict to be modified
-        key (str):
-            Key to be removed
-
-    Returns:
-        r (dict):
-            Modified dict
-    """
-    r = dict(d)
-    del r[key]
-    return r
-
-
-
- -
- - -
- - - - -

- suppress_stdout() - -#

- - -
- -

Suppresses the print output of a function

- - - -

Examples:

-
with suppress_stdout():
-    print("this will not be printed")
-
- -
- Source code in CompNeuroPy/extra_functions.py -
78
-79
-80
-81
-82
-83
-84
-85
-86
-87
-88
-89
-90
-91
-92
-93
-94
-95
@contextmanager
-def suppress_stdout():
-    """
-    Suppresses the print output of a function
-
-    Examples:
-        ```python
-        with suppress_stdout():
-            print("this will not be printed")
-        ```
-    """
-    with open(os.devnull, "w") as devnull:
-        old_stdout = sys.stdout
-        sys.stdout = devnull
-        try:
-            yield
-        finally:
-            sys.stdout = old_stdout
-
-
-
- -
- - -
- - - - -

- sci(nr) - -#

- - -
- -

Rounds a number to a single decimal. -If number is smaller than 0 it is converted to scientific notation with 1 decimal.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
nr - float or int - -
-

Number to be converted

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
str - str - -
-

String of the number in scientific notation

-
-
- - - -

Examples:

-
>>> sci(0.0001)
-'1.0e-4'
->>> sci(1.77)
-'1.8'
->>> sci(1.77e-5)
-'1.8e-5'
->>> sci(177.22)
-'177.2'
-
- -
- Source code in CompNeuroPy/extra_functions.py -
 98
- 99
-100
-101
-102
-103
-104
-105
-106
-107
-108
-109
-110
-111
-112
-113
-114
-115
-116
-117
-118
-119
-120
-121
-122
-123
-124
def sci(nr):
-    """
-    Rounds a number to a single decimal.
-    If number is smaller than 0 it is converted to scientific notation with 1 decimal.
-
-    Args:
-        nr (float or int):
-            Number to be converted
-
-    Returns:
-        str (str):
-            String of the number in scientific notation
-
-    Examples:
-        >>> sci(0.0001)
-        '1.0e-4'
-        >>> sci(1.77)
-        '1.8'
-        >>> sci(1.77e-5)
-        '1.8e-5'
-        >>> sci(177.22)
-        '177.2'
-    """
-    if af.get_number_of_zero_decimals(nr) == 0:
-        return str(round(nr, 1))
-    else:
-        return f"{nr*10**af.get_number_of_zero_decimals(nr):.1f}e-{af.get_number_of_zero_decimals(nr)}"
-
-
-
- -
- - -
- - - - -

- create_cm(colors, name='my_cmap', N=256, gamma=1.0, vmin=0, vmax=1) - -#

- - -
- -

Create a LinearSegmentedColormap from a list of colors.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
colors - array-like of colors or array-like of (value, color - -
-

If only colors are given, they are equidistantly mapped from the -range :math:[0, 1]; i.e. 0 maps to colors[0] and 1 maps to -colors[-1]. -If (value, color) pairs are given, the mapping is from value -to color. This can be used to divide the range unevenly.

-
-
- required -
name - str - -
-

The name of the colormap, by default 'my_cmap'.

-
-
- 'my_cmap' -
N - int - -
-

The number of rgb quantization levels, by default 256.

-
-
- 256 -
gamma - float - -
-

Gamma correction value, by default 1.0.

-
-
- 1.0 -
vmin - float - -
-

The minimum value of the colormap, by default 0.

-
-
- 0 -
vmax - float - -
-

The maximum value of the colormap, by default 1.

-
-
- 1 -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
linear_colormap - _LinearColormapClass - -
-

The colormap object

-
-
- -
- Source code in CompNeuroPy/extra_functions.py -
203
-204
-205
-206
-207
-208
-209
-210
-211
-212
-213
-214
-215
-216
-217
-218
-219
-220
-221
-222
-223
-224
-225
-226
-227
-228
-229
-230
-231
-232
-233
-234
-235
-236
-237
-238
-239
-240
-241
-242
-243
-244
-245
-246
-247
-248
-249
-250
-251
-252
-253
-254
-255
-256
-257
-258
-259
-260
-261
-262
-263
-264
-265
-266
-267
-268
-269
-270
-271
-272
-273
-274
-275
-276
-277
-278
-279
-280
-281
-282
-283
-284
-285
-286
-287
-288
-289
-290
-291
def create_cm(colors, name="my_cmap", N=256, gamma=1.0, vmin=0, vmax=1):
-    """
-    Create a `LinearSegmentedColormap` from a list of colors.
-
-    Args:
-        colors (array-like of colors or array-like of (value, color)):
-            If only colors are given, they are equidistantly mapped from the
-            range :math:`[0, 1]`; i.e. 0 maps to ``colors[0]`` and 1 maps to
-            ``colors[-1]``.
-            If (value, color) pairs are given, the mapping is from *value*
-            to *color*. This can be used to divide the range unevenly.
-        name (str, optional):
-            The name of the colormap, by default 'my_cmap'.
-        N (int, optional):
-            The number of rgb quantization levels, by default 256.
-        gamma (float, optional):
-            Gamma correction value, by default 1.0.
-        vmin (float, optional):
-            The minimum value of the colormap, by default 0.
-        vmax (float, optional):
-            The maximum value of the colormap, by default 1.
-
-    Returns:
-        linear_colormap (_LinearColormapClass):
-            The colormap object
-    """
-    if not np.iterable(colors):
-        raise ValueError("colors must be iterable")
-
-    if (
-        isinstance(colors[0], Sized)
-        and len(colors[0]) == 2
-        and not isinstance(colors[0], str)
-    ):
-        # List of value, color pairs
-        vals, colors = zip(*colors)
-        vals = np.array(vals).astype(float)
-        colors = list(colors)
-        ### insert values for 0 and 1 if not given
-        ### they equal the colors of the borders of the given range
-        if vals.min() != 0.0:
-            colors = [colors[np.argmin(vals)]] + colors
-            vals = np.insert(vals, 0, 0.0)
-        if vals.max() != 1.0:
-            colors = colors + [colors[np.argmax(vals)]]
-            vals = np.insert(vals, len(vals), 1.0)
-    else:
-        vals = np.linspace(0, 1, len(colors))
-
-    ### sort values and colors, they have to increase
-    sort_idx = np.argsort(vals)
-    vals = vals[sort_idx]
-    colors = [colors[idx] for idx in sort_idx]
-
-    r_g_b_a = np.zeros((len(colors), 4))
-    for color_idx, color in enumerate(colors):
-        if isinstance(color, str):
-            ### color given by name
-            r_g_b_a[color_idx] = to_rgba_array(color)
-        else:
-            ### color given by rgb(maybe a) value
-            color = np.array(color).astype(float)
-            ### check color size
-            if len(color) != 3 and len(color) != 4:
-                raise ValueError(
-                    "colors must be names or consist of 3 (rgb) or 4 (rgba) numbers"
-                )
-            if color.max() > 1:
-                ### assume that max value is 255
-                color[:3] = color[:3] / 255
-            if len(color) == 4:
-                ### gamma already given
-                r_g_b_a[color_idx] = color
-            else:
-                ### add gamma
-                r_g_b_a[color_idx] = np.concatenate([color, np.array([gamma])])
-    r = r_g_b_a[:, 0]
-    g = r_g_b_a[:, 1]
-    b = r_g_b_a[:, 2]
-    a = r_g_b_a[:, 3]
-
-    cdict = {
-        "red": np.column_stack([vals, r, r]),
-        "green": np.column_stack([vals, g, g]),
-        "blue": np.column_stack([vals, b, b]),
-        "alpha": np.column_stack([vals, a, a]),
-    }
-
-    return _LinearColormapClass(name, cdict, N, gamma, vmin, vmax)
-
-
-
- -
- - -
- - - - -

- evaluate_expression_with_dict(expression, value_dict) - -#

- - -
- -

Evaluate a mathematical expression using values from a dictionary.

-

This function takes a mathematical expression as a string and a dictionary -containing variable names as keys and corresponding values as numpy arrays. -It replaces the variable names in the expression with their corresponding -values from the dictionary and evaluates the expression.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
expression - str - -
-

A mathematical expression to be evaluated. Variable -names in the expression should match the keys in the value_dict.

-
-
- required -
value_dict - dict - -
-

A dictionary containing variable names (strings) as -keys and corresponding numpy arrays or numbers as values.

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
result - value or array - -
-

The result of evaluating the expression using the provided values.

-
-
- - - -

Examples:

-
>>> my_dict = {"a": np.ones(10), "b": np.arange(10)}
->>> my_string = "a*2-b+10"
->>> evaluate_expression_with_dict(my_string, my_dict)
-array([12., 11., 10.,  9.,  8.,  7.,  6.,  5.,  4.,  3.])
-
- -
- Source code in CompNeuroPy/extra_functions.py -
519
-520
-521
-522
-523
-524
-525
-526
-527
-528
-529
-530
-531
-532
-533
-534
-535
-536
-537
-538
-539
-540
-541
-542
-543
-544
-545
-546
-547
-548
-549
-550
-551
-552
-553
-554
-555
-556
-557
def evaluate_expression_with_dict(expression, value_dict):
-    """
-    Evaluate a mathematical expression using values from a dictionary.
-
-    This function takes a mathematical expression as a string and a dictionary
-    containing variable names as keys and corresponding values as numpy arrays.
-    It replaces the variable names in the expression with their corresponding
-    values from the dictionary and evaluates the expression.
-
-    Args:
-        expression (str):
-            A mathematical expression to be evaluated. Variable
-            names in the expression should match the keys in the value_dict.
-        value_dict (dict):
-            A dictionary containing variable names (strings) as
-            keys and corresponding numpy arrays or numbers as values.
-
-    Returns:
-        result (value or array):
-            The result of evaluating the expression using the provided values.
-
-    Examples:
-        >>> my_dict = {"a": np.ones(10), "b": np.arange(10)}
-        >>> my_string = "a*2-b+10"
-        >>> evaluate_expression_with_dict(my_string, my_dict)
-        array([12., 11., 10.,  9.,  8.,  7.,  6.,  5.,  4.,  3.])
-    """
-    # Replace dictionary keys in the expression with their corresponding values
-    ### replace names with dict entries
-    expression = _replace_names_with_dict(
-        expression=expression, name_of_dict="value_dict", dictionary=value_dict
-    )
-
-    ### evaluate the new expression
-    try:
-        result = eval(expression)
-        return result
-    except Exception as e:
-        raise ValueError(f"Error while evaluating expression: {str(e)}")
-
-
-
- -
- - - -
- -
- -
- - - - - - - - - - - - - -
-
- - - -
- -
- - - -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/site/additional/model_functions/index.html b/site/additional/model_functions/index.html deleted file mode 100644 index 57f6809..0000000 --- a/site/additional/model_functions/index.html +++ /dev/null @@ -1,1618 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Model Functions - CompNeuroPy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - -

Model Functions

- -
- - - - -
- - - -
- - - - - - - - - - -
- - - - -

- compile_in_folder(folder_name, net=None, clean=False, silent=False) - -#

- - -
- -

Creates the compilation folder in annarchy_folders/ or uses existing ones. Compiles -the current network.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
folder_name - str - -
-

Name of the folder within annarchy_folders/

-
-
- required -
net - ANNarchy network - -
-

ANNarchy network. Default: None.

-
-
- None -
clean - bool - -
-

If True, the library is recompiled entirely, else only the changes since -last compilation are compiled. Default: False.

-
-
- False -
silent - bool - -
-

Suppress output. Defaults to False.

-
-
- False -
- -
- Source code in CompNeuroPy/model_functions.py -
13
-14
-15
-16
-17
-18
-19
-20
-21
-22
-23
-24
-25
-26
-27
-28
-29
-30
-31
-32
-33
-34
-35
def compile_in_folder(folder_name, net=None, clean=False, silent=False):
-    """
-    Creates the compilation folder in annarchy_folders/ or uses existing ones. Compiles
-    the current network.
-
-    Args:
-        folder_name (str):
-            Name of the folder within annarchy_folders/
-        net (ANNarchy network, optional):
-            ANNarchy network. Default: None.
-        clean (bool, optional):
-            If True, the library is recompiled entirely, else only the changes since
-            last compilation are compiled. Default: False.
-        silent (bool, optional):
-            Suppress output. Defaults to False.
-    """
-    sf.create_dir("annarchy_folders/" + folder_name, print_info=False)
-    if isinstance(net, type(None)):
-        compile("annarchy_folders/" + folder_name, clean=clean, silent=silent)
-    else:
-        net.compile("annarchy_folders/" + folder_name, clean=clean, silent=silent)
-    if os.getcwd().split("/")[-1] == "annarchy_folders":
-        os.chdir("../")
-
-
-
- -
- - -
- - - - -

- annarchy_compiled(net_id=0) - -#

- - -
- -

Check if ANNarchy network was compiled.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
net_id - int - -
-

Network ID. Default: 0.

-
-
- 0 -
- -
- Source code in CompNeuroPy/model_functions.py -
38
-39
-40
-41
-42
-43
-44
-45
-46
def annarchy_compiled(net_id=0):
-    """
-    Check if ANNarchy network was compiled.
-
-    Args:
-        net_id (int, optional):
-            Network ID. Default: 0.
-    """
-    return Global._network[net_id]["compiled"]
-
-
-
- -
- - -
- - - - -

- get_full_model() - -#

- - -
- -

Return all current population and projection names.

- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
model_dict - dict - -
-

Dictionary with keys "populations" and "projections" and values lists of -population and projection names, respectively.

-
-
- -
- Source code in CompNeuroPy/model_functions.py -
49
-50
-51
-52
-53
-54
-55
-56
-57
-58
-59
-60
-61
def get_full_model():
-    """
-    Return all current population and projection names.
-
-    Returns:
-        model_dict (dict):
-            Dictionary with keys "populations" and "projections" and values lists of
-            population and projection names, respectively.
-    """
-    return {
-        "populations": [pop.name for pop in populations()],
-        "projections": [proj.name for proj in projections()],
-    }
-
-
-
- -
- - -
- - - - -

- cnp_clear(functions=True, neurons=True, synapses=True, constants=True) - -#

- - -
- -

Like clear with ANNarchy, but CompNeuroModel objects are also cleared.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
functions - bool - -
-

If True, all functions are cleared. Default: True.

-
-
- True -
neurons - bool - -
-

If True, all neurons are cleared. Default: True.

-
-
- True -
synapses - bool - -
-

If True, all synapses are cleared. Default: True.

-
-
- True -
constants - bool - -
-

If True, all constants are cleared. Default: True.

-
-
- True -
- -
- Source code in CompNeuroPy/model_functions.py -
64
-65
-66
-67
-68
-69
-70
-71
-72
-73
-74
-75
-76
-77
-78
-79
-80
-81
-82
def cnp_clear(functions=True, neurons=True, synapses=True, constants=True):
-    """
-    Like clear with ANNarchy, but CompNeuroModel objects are also cleared.
-
-    Args:
-        functions (bool, optional):
-            If True, all functions are cleared. Default: True.
-        neurons (bool, optional):
-            If True, all neurons are cleared. Default: True.
-        synapses (bool, optional):
-            If True, all synapses are cleared. Default: True.
-        constants (bool, optional):
-            If True, all constants are cleared. Default: True.
-    """
-    clear(functions=functions, neurons=neurons, synapses=synapses, constants=constants)
-    for model_name in CompNeuroModel._initialized_models.keys():
-        CompNeuroModel._initialized_models[model_name] = False
-    for model_name in CompNeuroModel._compiled_models.keys():
-        CompNeuroModel._compiled_models[model_name] = False
-
-
-
- -
- - - -
- -
- -
- - - - - - - - - - - - - -
-
- - - -
- -
- - - -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/site/additional/simulation_functions/index.html b/site/additional/simulation_functions/index.html deleted file mode 100644 index 98abd9d..0000000 --- a/site/additional/simulation_functions/index.html +++ /dev/null @@ -1,2008 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Simulation Functions - CompNeuroPy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - -

Simulation Functions

- -
- - - - -
- - - -
- - - - - - - - - - -
- - - - -

- current_step(pop, t1=500, t2=500, a1=0, a2=100) - -#

- - -
- -

Stimulates a given population in two periods with two input currents.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
pop - str - -
-

population name of population, which should be stimulated with input current -neuron model of population has to contain "I_app" as input current

-
-
- required -
t1 - int - -
-

time in ms before current step

-
-
- 500 -
t2 - int - -
-

time in ms after current step

-
-
- 500 -
a1 - int - -
-

current amplitude before current step

-
-
- 0 -
a2 - int - -
-

current amplitude after current step

-
-
- 100 -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
return_dict - dict - -
-

dictionary containing:

-
    -
  • duration (int): duration of the simulation
  • -
-
-
- -
- Source code in CompNeuroPy/simulation_functions.py -
 4
- 5
- 6
- 7
- 8
- 9
-10
-11
-12
-13
-14
-15
-16
-17
-18
-19
-20
-21
-22
-23
-24
-25
-26
-27
-28
-29
-30
-31
-32
-33
-34
-35
-36
-37
-38
-39
-40
-41
-42
-43
def current_step(pop, t1=500, t2=500, a1=0, a2=100):
-    """
-    Stimulates a given population in two periods with two input currents.
-
-    Args:
-        pop (str):
-            population name of population, which should be stimulated with input current
-            neuron model of population has to contain "I_app" as input current
-        t1 (int):
-            time in ms before current step
-        t2 (int):
-            time in ms after current step
-        a1 (int):
-            current amplitude before current step
-        a2 (int):
-            current amplitude after current step
-
-    Returns:
-        return_dict (dict):
-            dictionary containing:
-
-            - duration (int): duration of the simulation
-    """
-
-    ### save prev input current
-    I_prev = get_population(pop).I_app
-
-    ### first/pre current step simulation
-    get_population(pop).I_app = a1
-    simulate(t1)
-
-    ### second/post current step simulation
-    get_population(pop).I_app = a2
-    simulate(t2)
-
-    ### reset input current to previous value
-    get_population(pop).I_app = I_prev
-
-    ### return some additional information which could be usefull
-    return {"duration": t1 + t2}
-
-
-
- -
- - -
- - - - -

- current_stim(pop, t=500, a=100) - -#

- - -
- -

Stimulates a given population during specified period 't' with input current with -amplitude 'a', after this stimulation the current is reset to initial value -(before stimulation).

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
pop - str - -
-

population name of population, which should be stimulated with input current -neuron model of population has to contain "I_app" as input current

-
-
- required -
t - int - -
-

duration in ms

-
-
- 500 -
a - int - -
-

current amplitude

-
-
- 100 -
- -
- Source code in CompNeuroPy/simulation_functions.py -
46
-47
-48
-49
-50
-51
-52
-53
-54
-55
-56
-57
-58
-59
-60
-61
-62
def current_stim(pop, t=500, a=100):
-    """
-    Stimulates a given population during specified period 't' with input current with
-    amplitude 'a', after this stimulation the current is reset to initial value
-    (before stimulation).
-
-    Args:
-        pop (str):
-            population name of population, which should be stimulated with input current
-            neuron model of population has to contain "I_app" as input current
-        t (int):
-            duration in ms
-        a (int):
-            current amplitude
-    """
-
-    return current_step(pop, t1=t, t2=0, a1=a, a2=0)
-
-
-
- -
- - -
- - - - -

- current_ramp(pop, a0, a1, dur, n) - -#

- - -
- -

Conducts multiple current stimulations with constantly changing current inputs. -After this current_ramp stimulation the current amplitude is reset to the initial -value (before current ramp).

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
pop - str - -
-

population name of population, which should be stimulated with input current -neuron model of population has to contain "I_app" as input current

-
-
- required -
a0 - int - -
-

initial current amplitude (of first stimulation)

-
-
- required -
a1 - int - -
-

final current amplitude (of last stimulation)

-
-
- required -
dur - int - -
-

duration of the complete current ramp (all stimulations)

-
-
- required -
n - int - -
-

number of stimulations

-
-
- required -
-
-

Warning

-

dur/n should be divisible by the simulation time step without remainder

-
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
return_dict - dict - -
-

dictionary containing:

-
    -
  • da (int): current step size
  • -
  • dur_stim (int): duration of one stimulation
  • -
-
-
- - - -

Raises:

- - - - - - - - - - - - - -
TypeDescription
- AssertionError - -
-

if resulting duration of one stimulation is not divisible by the -simulation time step without remainder

-
-
- -
- Source code in CompNeuroPy/simulation_functions.py -
 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
-100
-101
-102
-103
-104
-105
-106
-107
-108
-109
-110
-111
-112
-113
-114
-115
def current_ramp(pop, a0, a1, dur, n):
-    """
-    Conducts multiple current stimulations with constantly changing current inputs.
-    After this current_ramp stimulation the current amplitude is reset to the initial
-    value (before current ramp).
-
-
-    Args:
-        pop (str):
-            population name of population, which should be stimulated with input current
-            neuron model of population has to contain "I_app" as input current
-        a0 (int):
-            initial current amplitude (of first stimulation)
-        a1 (int):
-            final current amplitude (of last stimulation)
-        dur (int):
-            duration of the complete current ramp (all stimulations)
-        n (int):
-            number of stimulations
-
-    !!! warning
-        dur/n should be divisible by the simulation time step without remainder
-
-    Returns:
-        return_dict (dict):
-            dictionary containing:
-
-            - da (int): current step size
-            - dur_stim (int): duration of one stimulation
-
-    Raises:
-        AssertionError: if resulting duration of one stimulation is not divisible by the
-            simulation time step without remainder
-    """
-
-    assert (dur / n) / dt() % 1 == 0, (
-        "ERROR current_ramp: dur/n should result in a duration (for a single stimulation) which is divisible by the simulation time step (without remainder)\ncurrent duration = "
-        + str(dur / n)
-        + ", timestep = "
-        + str(dt())
-        + "!\n"
-    )
-
-    da = (a1 - a0) / (n - 1)  # for n stimulations only n-1 steps occur
-    dur_stim = dur / n
-    amp = a0
-    for _ in range(n):
-        current_stim(pop, t=dur_stim, a=amp)
-        amp = amp + da
-
-    return {"da": da, "dur_stim": dur_stim}
-
-
-
- -
- - -
- - - - -

- increasing_current(pop, a0, da, nr_steps, dur_step) - -#

- - -
- -

Conducts multiple current stimulations with constantly increasing current inputs. -After this increasing_current stimulation the current amplitude is reset to the -initial value (before increasing_current).

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
pop - str - -
-

population name of population, which should be stimulated with input current -neuron model of population has to contain "I_app" as input current

-
-
- required -
a0 - int - -
-

initial current amplitude (of first stimulation)

-
-
- required -
da - int - -
-

current step size

-
-
- required -
nr_steps - int - -
-

number of stimulations

-
-
- required -
dur_step - int - -
-

duration of one stimulation

-
-
- required -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
return_dict - dict - -
-

dictionary containing:

-
    -
  • current_list (list): list of current amplitudes for each stimulation
  • -
-
-
- -
- Source code in CompNeuroPy/simulation_functions.py -
118
-119
-120
-121
-122
-123
-124
-125
-126
-127
-128
-129
-130
-131
-132
-133
-134
-135
-136
-137
-138
-139
-140
-141
-142
-143
-144
-145
-146
-147
-148
-149
-150
def increasing_current(pop, a0, da, nr_steps, dur_step):
-    """
-    Conducts multiple current stimulations with constantly increasing current inputs.
-    After this increasing_current stimulation the current amplitude is reset to the
-    initial value (before increasing_current).
-
-    Args:
-        pop (str):
-            population name of population, which should be stimulated with input current
-            neuron model of population has to contain "I_app" as input current
-        a0 (int):
-            initial current amplitude (of first stimulation)
-        da (int):
-            current step size
-        nr_steps (int):
-            number of stimulations
-        dur_step (int):
-            duration of one stimulation
-
-    Returns:
-        return_dict (dict):
-            dictionary containing:
-
-            - current_list (list): list of current amplitudes for each stimulation
-    """
-    current_list = []
-    a = a0
-    for _ in range(nr_steps):
-        current_list.append(a)
-        current_stim(pop, t=dur_step, a=a)
-        a += da
-
-    return {"current_list": current_list}
-
-
-
- -
- - - -
- -
- -
- - - - - - - - - - - - - -
-
- - - -
- -
- - - -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/site/additional/simulation_requirements/index.html b/site/additional/simulation_requirements/index.html deleted file mode 100644 index 4dba76c..0000000 --- a/site/additional/simulation_requirements/index.html +++ /dev/null @@ -1,1489 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - Simulation Requirements - CompNeuroPy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - -

Simulation Requirements

- -
- - - - -
- - - -
- - - - - - - - -
- - - - -

- ReqPopHasAttr - - -#

- - -
- - -

Checks if population(s) contains the attribute(s) (parameters or variables)

- -
- Source code in CompNeuroPy/simulation_requirements.py -
 4
- 5
- 6
- 7
- 8
- 9
-10
-11
-12
-13
-14
-15
-16
-17
-18
-19
-20
-21
-22
-23
-24
-25
-26
-27
-28
-29
-30
-31
-32
-33
-34
-35
-36
-37
-38
-39
-40
-41
-42
class ReqPopHasAttr:
-    """
-    Checks if population(s) contains the attribute(s) (parameters or variables)
-    """
-
-    def __init__(self, pop, attr):
-        """
-        Args:
-            pop (str or list of strings):
-                population name(s)
-            attr (str or list of strings):
-                attribute name(s)
-        """
-        self.pop_name_list = pop
-        self.attr_name_list = attr
-        ### convert single strings into list
-        if not (isinstance(pop, list)):
-            self.pop_name_list = [pop]
-        if not (isinstance(attr, list)):
-            self.attr_name_list = [attr]
-
-    def run(self):
-        """
-        Checks if population(s) contains the attribute(s) (parameters or variables)
-
-        Raises:
-            ValueError: if population(s) does not contain the attribute(s)
-        """
-        for attr_name in self.attr_name_list:
-            for pop_name in self.pop_name_list:
-                pop: Population = get_population(pop_name)
-                if not (attr_name in pop.attributes):
-                    raise ValueError(
-                        "Population "
-                        + pop_name
-                        + " does not contain attribute "
-                        + attr_name
-                        + "!\n"
-                    )
-
-
- - - -
- - - - - - - - - - -
- - - - -

- __init__(pop, attr) - -#

- - -
- - - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
pop - str or list of strings - -
-

population name(s)

-
-
- required -
attr - str or list of strings - -
-

attribute name(s)

-
-
- required -
- -
- Source code in CompNeuroPy/simulation_requirements.py -
 9
-10
-11
-12
-13
-14
-15
-16
-17
-18
-19
-20
-21
-22
-23
def __init__(self, pop, attr):
-    """
-    Args:
-        pop (str or list of strings):
-            population name(s)
-        attr (str or list of strings):
-            attribute name(s)
-    """
-    self.pop_name_list = pop
-    self.attr_name_list = attr
-    ### convert single strings into list
-    if not (isinstance(pop, list)):
-        self.pop_name_list = [pop]
-    if not (isinstance(attr, list)):
-        self.attr_name_list = [attr]
-
-
-
- -
- - -
- - - - -

- run() - -#

- - -
- -

Checks if population(s) contains the attribute(s) (parameters or variables)

- - - -

Raises:

- - - - - - - - - - - - - -
TypeDescription
- ValueError - -
-

if population(s) does not contain the attribute(s)

-
-
- -
- Source code in CompNeuroPy/simulation_requirements.py -
25
-26
-27
-28
-29
-30
-31
-32
-33
-34
-35
-36
-37
-38
-39
-40
-41
-42
def run(self):
-    """
-    Checks if population(s) contains the attribute(s) (parameters or variables)
-
-    Raises:
-        ValueError: if population(s) does not contain the attribute(s)
-    """
-    for attr_name in self.attr_name_list:
-        for pop_name in self.pop_name_list:
-            pop: Population = get_population(pop_name)
-            if not (attr_name in pop.attributes):
-                raise ValueError(
-                    "Population "
-                    + pop_name
-                    + " does not contain attribute "
-                    + attr_name
-                    + "!\n"
-                )
-
-
-
- -
- - - -
- -
- -
- - - - -
- -
- -
- - - - - - - - - - - - - -
-
- - - -
- -
- - - -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/site/additional/system_functions/index.html b/site/additional/system_functions/index.html deleted file mode 100644 index 039bc76..0000000 --- a/site/additional/system_functions/index.html +++ /dev/null @@ -1,2004 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - System Functions - CompNeuroPy - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - -

System Functions

- -
- - - - -
- - - -
- - - - - - - - - - -
- - - - -

- clear_dir(path) - -#

- - -
- -

Deletes all files and subdirectories in the specified folder.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
path - str - -
-

Path to the folder to clear.

-
-
- required -
- -
- Source code in CompNeuroPy/system_functions.py -
 9
-10
-11
-12
-13
-14
-15
-16
-17
-18
-19
-20
-21
-22
-23
-24
-25
-26
-27
-28
-29
-30
-31
-32
-33
-34
def clear_dir(path):
-    """
-    Deletes all files and subdirectories in the specified folder.
-
-    Args:
-        path (str):
-            Path to the folder to clear.
-    """
-    try:
-        if not os.path.exists(path):
-            print(f"The folder '{path}' does not exist.")
-            return
-
-        for filename in os.listdir(path):
-            file_path = os.path.join(path, filename)
-            try:
-                if os.path.isfile(file_path) or os.path.islink(file_path):
-                    os.unlink(file_path)
-                elif os.path.isdir(file_path):
-                    shutil.rmtree(file_path)
-            except Exception:
-                print(traceback.format_exc())
-                print(f"Failed to delete {file_path}")
-    except Exception:
-        print(traceback.format_exc())
-        print(f"Failed to clear {path}")
-
-
-
- -
- - -
- - - - -

- create_dir(path, print_info=False, clear=False) - -#

- - -
- -

Creates a directory.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
path - str - -
-

Path to the directory to create.

-
-
- required -
print_info - bool - -
-

Whether to print information about the directory creation. Default: False.

-
-
- False -
clear - bool - -
-

Whether to clear the directory if it already exists. Default: False.

-
-
- False -
- -
- Source code in CompNeuroPy/system_functions.py -
37
-38
-39
-40
-41
-42
-43
-44
-45
-46
-47
-48
-49
-50
-51
-52
-53
-54
-55
-56
-57
-58
-59
-60
-61
-62
-63
-64
-65
-66
-67
-68
-69
-70
-71
-72
-73
-74
-75
-76
-77
-78
-79
def create_dir(path, print_info=False, clear=False):
-    """
-    Creates a directory.
-
-    Args:
-        path (str):
-            Path to the directory to create.
-
-        print_info (bool, optional):
-            Whether to print information about the directory creation. Default: False.
-
-        clear (bool, optional):
-            Whether to clear the directory if it already exists. Default: False.
-    """
-    try:
-        if isinstance(path, str):
-            if len(path) > 0:
-                os.makedirs(path)
-        else:
-            print("create_dir, ERROR: path is no str")
-    except Exception:
-        if os.path.isdir(path):
-            if print_info:
-                print(path + " already exists")
-            if clear:
-                ### clear folder
-                ### do you really want?
-                answer = input(f"Do you really want to clear {path} (y/n):")
-                while answer != "y" and answer != "n":
-                    print("please enter y or n")
-                    answer = input(f"Do you really want to clear {path} (y/n):")
-                ### clear or not depending on answer
-                if answer == "y":
-                    clear_dir(path)
-                    if print_info:
-                        print(path + " already exists and was cleared.")
-                else:
-                    if print_info:
-                        print(path + " already exists and was not cleared.")
-        else:
-            print(traceback.format_exc())
-            print("could not create " + path + " folder")
-            quit()
-
-
-
- -
- - -
- - - - -

- save_variables(variable_list, name_list, path='./') - -#

- - -
- - - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
variable_list - list - -
-

variables to save

-
-
- required -
name_list - list - -
-

names of the save files of the variables

-
-
- required -
path - str or list - -
-

save path for all variables, or save path for each variable of the -variable_list. Default: "./"

-
-
- './' -
- - - -

Examples:

-
import numpy as np
-from CompNeuroPy import save_variables, load_variables
-
-### create variables
-var1 = np.random.rand(10)
-var2 = np.random.rand(10)
-
-### save variables
-save_variables([var1, var2], ["var1_file", "var2_file"], "my_variables_folder")
-
-### load variables
-loaded_variables = load_variables(["var1", "var2"], "my_variables_folder")
-
-### use loaded variables
-print(loaded_variables["var1_file"])
-print(loaded_variables["var2_file"])
-
- -
- Source code in CompNeuroPy/system_functions.py -
 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
-100
-101
-102
-103
-104
-105
-106
-107
-108
-109
-110
-111
-112
-113
-114
-115
-116
-117
-118
-119
-120
-121
-122
-123
-124
-125
-126
-127
-128
-129
def save_variables(variable_list: list, name_list: list, path: str | list = "./"):
-    """
-    Args:
-        variable_list (list):
-            variables to save
-        name_list (list):
-            names of the save files of the variables
-        path (str or list):
-            save path for all variables, or save path for each variable of the
-            variable_list. Default: "./"
-
-    Examples:
-        ```python
-        import numpy as np
-        from CompNeuroPy import save_variables, load_variables
-
-        ### create variables
-        var1 = np.random.rand(10)
-        var2 = np.random.rand(10)
-
-        ### save variables
-        save_variables([var1, var2], ["var1_file", "var2_file"], "my_variables_folder")
-
-        ### load variables
-        loaded_variables = load_variables(["var1", "var2"], "my_variables_folder")
-
-        ### use loaded variables
-        print(loaded_variables["var1_file"])
-        print(loaded_variables["var2_file"])
-        ```
-    """
-    for idx in range(len(variable_list)):
-        ### set save path
-        if isinstance(path, str):
-            save_path = path
-        else:
-            save_path = path[idx]
-        if save_path.endswith("/"):
-            save_path = save_path[:-1]
-        ### set file name
-        file_name = f"{name_list[idx]}.pkl"
-        ### set variable
-        variable = variable_list[idx]
-        ### generate save folder
-        create_dir(save_path)
-        ### Saving a variable to a file
-        with open(f"{save_path}/{file_name}", "wb") as file:
-            pickle.dump(variable, file)
-
-
-
- -
- - -
- - - - -

- load_variables(name_list, path='./') - -#

- - -
- - - - -

Parameters:

- - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
name_list - list - -
-

names of the save files of the variables

-
-
- required -
path - str or list - -
-

save path for all variables, or save path for each variable of the -variable_list. Default: "./"

-
-
- './' -
- - - -

Returns:

- - - - - - - - - - - - - -
Name TypeDescription
variable_dict - dict - -
-

dictionary with the loaded variables, keys are the names of the -files, values are the loaded variables

-
-
- - - -

Examples:

-
import numpy as np
-from CompNeuroPy import save_variables, load_variables
-
-### create variables
-var1 = np.random.rand(10)
-var2 = np.random.rand(10)
-
-### save variables
-save_variables([var1, var2], ["var1_file", "var2_file"], "my_variables_folder")
-
-### load variables
-loaded_variables = load_variables(["var1", "var2"], "my_variables_folder")
-
-### use loaded variables
-print(loaded_variables["var1_file"])
-print(loaded_variables["var2_file"])
-
- -
- Source code in CompNeuroPy/system_functions.py -
132
-133
-134
-135
-136
-137
-138
-139
-140
-141
-142
-143
-144
-145
-146
-147
-148
-149
-150
-151
-152
-153
-154
-155
-156
-157
-158
-159
-160
-161
-162
-163
-164
-165
-166
-167
-168
-169
-170
-171
-172
-173
-174
-175
-176
-177
-178
-179
-180
-181
-182
-183
def load_variables(name_list: list, path: str | list = "./"):
-    """
-    Args:
-        name_list (list):
-            names of the save files of the variables
-        path (str or list, optional):
-            save path for all variables, or save path for each variable of the
-            variable_list. Default: "./"
-
-    Returns:
-        variable_dict (dict):
-            dictionary with the loaded variables, keys are the names of the
-            files, values are the loaded variables
-
-    Examples:
-        ```python
-        import numpy as np
-        from CompNeuroPy import save_variables, load_variables
-
-        ### create variables
-        var1 = np.random.rand(10)
-        var2 = np.random.rand(10)
-
-        ### save variables
-        save_variables([var1, var2], ["var1_file", "var2_file"], "my_variables_folder")
-
-        ### load variables
-        loaded_variables = load_variables(["var1", "var2"], "my_variables_folder")
-
-        ### use loaded variables
-        print(loaded_variables["var1_file"])
-        print(loaded_variables["var2_file"])
-        ```
-    """
-    variable_dict = {}
-    for idx in range(len(name_list)):
-        ### set save path
-        if isinstance(path, str):
-            save_path = path
-        else:
-            save_path = path[idx]
-        if save_path.endswith("/"):
-            save_path = save_path[:-1]
-        ### set file name
-        file_name = f"{name_list[idx]}.pkl"
-        ### Loading the variable from the file
-        with open(f"{save_path}/{file_name}", "rb") as file:
-            loaded_variable = pickle.load(file)
-        ### store variable in variable_dict
-        variable_dict[name_list[idx]] = loaded_variable
-
-    return variable_dict
-
-
-
- -
- - -
- - - - -

- timing_decorator(threshold=0.1) - -#

- - -
- -

Decorator to measure the execution time of a function.

- - - -

Parameters:

- - - - - - - - - - - - - - - - - -
NameTypeDescriptionDefault
threshold - float - -
-

Threshold in seconds. If the execution time of the function is -larger than this threshold, the execution time is printed. Default: 0.1.

-
-
- 0.1 -
- -
- Source code in CompNeuroPy/system_functions.py -
186
-187
-188
-189
-190
-191
-192
-193
-194
-195
-196
-197
-198
-199
-200
-201
-202
-203
-204
-205
-206
-207
-208
-209
def timing_decorator(threshold=0.1):
-    """
-    Decorator to measure the execution time of a function.
-
-    Args:
-        threshold (float, optional):
-            Threshold in seconds. If the execution time of the function is
-            larger than this threshold, the execution time is printed. Default: 0.1.
-    """
-
-    def decorator(func):
-        @wraps(func)
-        def wrapper(*args, **kwargs):
-            start_time = time()
-            result = func(*args, **kwargs)
-            end_time = time()
-            execution_time = end_time - start_time
-            if execution_time >= threshold:
-                print(f"{func.__name__} took {execution_time:.4f} seconds")
-            return result
-
-        return wrapper
-
-    return decorator
-
-
-
- -
- - - -
- -
- -
- - - - - - - - - - - - - -
-
- - - -
- -
- - - -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/site/assets/_mkdocstrings.css b/site/assets/_mkdocstrings.css deleted file mode 100644 index 049a254..0000000 --- a/site/assets/_mkdocstrings.css +++ /dev/null @@ -1,64 +0,0 @@ - -/* Avoid breaking parameter names, etc. in table cells. */ -.doc-contents td code { - word-break: normal !important; -} - -/* No line break before first paragraph of descriptions. */ -.doc-md-description, -.doc-md-description>p:first-child { - display: inline; -} - -/* Max width for docstring sections tables. */ -.doc .md-typeset__table, -.doc .md-typeset__table table { - display: table !important; - width: 100%; -} - -.doc .md-typeset__table tr { - display: table-row; -} - -/* Defaults in Spacy table style. */ -.doc-param-default { - float: right; -} - -/* Keep headings consistent. */ -h1.doc-heading, -h2.doc-heading, -h3.doc-heading, -h4.doc-heading, -h5.doc-heading, -h6.doc-heading { - font-weight: 400; - line-height: 1.5; - color: inherit; - text-transform: none; -} - -h1.doc-heading { - font-size: 1.6rem; -} - -h2.doc-heading { - font-size: 1.2rem; -} - -h3.doc-heading { - font-size: 1.15rem; -} - -h4.doc-heading { - font-size: 1.10rem; -} - -h5.doc-heading { - font-size: 1.05rem; -} - -h6.doc-heading { - font-size: 1rem; -} \ No newline at end of file diff --git a/site/assets/javascripts/bundle.d7c377c4.min.js b/site/assets/javascripts/bundle.d7c377c4.min.js deleted file mode 100644 index 6a0bcf8..0000000 --- a/site/assets/javascripts/bundle.d7c377c4.min.js +++ /dev/null @@ -1,29 +0,0 @@ -"use strict";(()=>{var Mi=Object.create;var gr=Object.defineProperty;var Li=Object.getOwnPropertyDescriptor;var _i=Object.getOwnPropertyNames,Ft=Object.getOwnPropertySymbols,Ai=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,ro=Object.prototype.propertyIsEnumerable;var to=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&to(e,r,t[r]);if(Ft)for(var r of Ft(t))ro.call(t,r)&&to(e,r,t[r]);return e};var oo=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Ft)for(var o of Ft(e))t.indexOf(o)<0&&ro.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Ci=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of _i(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Li(t,n))||o.enumerable});return e};var jt=(e,t,r)=>(r=e!=null?Mi(Ai(e)):{},Ci(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var no=(e,t,r)=>new Promise((o,n)=>{var i=c=>{try{a(r.next(c))}catch(p){n(p)}},s=c=>{try{a(r.throw(c))}catch(p){n(p)}},a=c=>c.done?o(c.value):Promise.resolve(c.value).then(i,s);a((r=r.apply(e,t)).next())});var ao=yr((Er,io)=>{(function(e,t){typeof Er=="object"&&typeof io!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(C){return!!(C&&C!==document&&C.nodeName!=="HTML"&&C.nodeName!=="BODY"&&"classList"in C&&"contains"in C.classList)}function c(C){var ct=C.type,Ve=C.tagName;return!!(Ve==="INPUT"&&s[ct]&&!C.readOnly||Ve==="TEXTAREA"&&!C.readOnly||C.isContentEditable)}function p(C){C.classList.contains("focus-visible")||(C.classList.add("focus-visible"),C.setAttribute("data-focus-visible-added",""))}function l(C){C.hasAttribute("data-focus-visible-added")&&(C.classList.remove("focus-visible"),C.removeAttribute("data-focus-visible-added"))}function f(C){C.metaKey||C.altKey||C.ctrlKey||(a(r.activeElement)&&p(r.activeElement),o=!0)}function u(C){o=!1}function d(C){a(C.target)&&(o||c(C.target))&&p(C.target)}function y(C){a(C.target)&&(C.target.classList.contains("focus-visible")||C.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(C.target))}function b(C){document.visibilityState==="hidden"&&(n&&(o=!0),D())}function D(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function Q(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(C){C.target.nodeName&&C.target.nodeName.toLowerCase()==="html"||(o=!1,Q())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",b,!0),D(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Kr=yr((kt,qr)=>{/*! - * clipboard.js v2.0.11 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */(function(t,r){typeof kt=="object"&&typeof qr=="object"?qr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof kt=="object"?kt.ClipboardJS=r():t.ClipboardJS=r()})(kt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Oi}});var s=i(279),a=i.n(s),c=i(370),p=i.n(c),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var d=function(_){var O=f()(_);return u("cut"),O},y=d;function b(V){var _=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[_?"right":"left"]="-9999px";var $=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat($,"px"),O.setAttribute("readonly",""),O.value=V,O}var D=function(_,O){var $=b(_);O.container.appendChild($);var N=f()($);return u("copy"),$.remove(),N},Q=function(_){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},$="";return typeof _=="string"?$=D(_,O):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?$=D(_.value,O):($=f()(_),u("copy")),$},J=Q;function C(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?C=function(O){return typeof O}:C=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},C(V)}var ct=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=_.action,$=O===void 0?"copy":O,N=_.container,Y=_.target,ke=_.text;if($!=="copy"&&$!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&C(Y)==="object"&&Y.nodeType===1){if($==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if($==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:N});if(Y)return $==="cut"?y(Y):J(Y,{container:N})},Ve=ct;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(O){return typeof O}:Fe=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Fe(V)}function vi(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function eo(V,_){for(var O=0;O<_.length;O++){var $=_[O];$.enumerable=$.enumerable||!1,$.configurable=!0,"value"in $&&($.writable=!0),Object.defineProperty(V,$.key,$)}}function gi(V,_,O){return _&&eo(V.prototype,_),O&&eo(V,O),V}function xi(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function($,N){return $.__proto__=N,$},br(V,_)}function yi(V){var _=Ti();return function(){var $=Rt(V),N;if(_){var Y=Rt(this).constructor;N=Reflect.construct($,arguments,Y)}else N=$.apply(this,arguments);return Ei(this,N)}}function Ei(V,_){return _&&(Fe(_)==="object"||typeof _=="function")?_:wi(V)}function wi(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Ti(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Rt(V){return Rt=Object.setPrototypeOf?Object.getPrototypeOf:function(O){return O.__proto__||Object.getPrototypeOf(O)},Rt(V)}function vr(V,_){var O="data-clipboard-".concat(V);if(_.hasAttribute(O))return _.getAttribute(O)}var Si=function(V){xi(O,V);var _=yi(O);function O($,N){var Y;return vi(this,O),Y=_.call(this),Y.resolveOptions(N),Y.listenClick($),Y}return gi(O,[{key:"resolveOptions",value:function(){var N=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof N.action=="function"?N.action:this.defaultAction,this.target=typeof N.target=="function"?N.target:this.defaultTarget,this.text=typeof N.text=="function"?N.text:this.defaultText,this.container=Fe(N.container)==="object"?N.container:document.body}},{key:"listenClick",value:function(N){var Y=this;this.listener=p()(N,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(N){var Y=N.delegateTarget||N.currentTarget,ke=this.action(Y)||"copy",It=Ve({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(It?"success":"error",{action:ke,text:It,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(N){return vr("action",N)}},{key:"defaultTarget",value:function(N){var Y=vr("target",N);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(N){return vr("text",N)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(N){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(N,Y)}},{key:"cut",value:function(N){return y(N)}},{key:"isSupported",value:function(){var N=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof N=="string"?[N]:N,ke=!!document.queryCommandSupported;return Y.forEach(function(It){ke=ke&&!!document.queryCommandSupported(It)}),ke}}]),O}(a()),Oi=Si},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==n;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}o.exports=s},438:function(o,n,i){var s=i(828);function a(l,f,u,d,y){var b=p.apply(this,arguments);return l.addEventListener(u,b,y),{destroy:function(){l.removeEventListener(u,b,y)}}}function c(l,f,u,d,y){return typeof l.addEventListener=="function"?a.apply(null,arguments):typeof u=="function"?a.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(b){return a(b,f,u,d,y)}))}function p(l,f,u,d){return function(y){y.delegateTarget=s(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=c},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(o,n,i){var s=i(879),a=i(438);function c(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(y))throw new TypeError("Third argument must be a Function");if(s.node(u))return p(u,d,y);if(s.nodeList(u))return l(u,d,y);if(s.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function p(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(b){b.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(b){b.removeEventListener(d,y)})}}}function f(u,d,y){return a(document.body,u,d,y)}o.exports=c},817:function(o){function n(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),p=document.createRange();p.selectNodeContents(i),c.removeAllRanges(),c.addRange(p),s=c.toString()}return s}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function p(){c.off(i,p),s.apply(a,arguments)}return p._=s,this.on(i,p,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,p=a.length;for(c;c{"use strict";/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var Wa=/["'&<>]/;Vn.exports=Ua;function Ua(e){var t=""+e,r=Wa.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function z(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function K(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||a(u,d)})})}function a(u,d){try{c(o[u](d))}catch(y){f(i[0][3],y)}}function c(u){u.value instanceof ot?Promise.resolve(u.value.v).then(p,l):f(i[0][2],u)}function p(u){a("next",u)}function l(u){a("throw",u)}function f(u,d){u(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function po(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof be=="function"?be(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),n(a,c,s.done,s.value)})}}function n(i,s,a,c){Promise.resolve(c).then(function(p){i({value:p,done:a})},s)}}function k(e){return typeof e=="function"}function pt(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var Ut=pt(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: -`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` - `):"",this.name="UnsubscriptionError",this.errors=r}});function ze(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var je=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=be(s),c=a.next();!c.done;c=a.next()){var p=c.value;p.remove(this)}}catch(b){t={error:b}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(b){i=b instanceof Ut?b.errors:[b]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=be(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{lo(y)}catch(b){i=i!=null?i:[],b instanceof Ut?i=K(K([],z(i)),z(b.errors)):i.push(b)}}}catch(b){o={error:b}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new Ut(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)lo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&ze(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&ze(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=je.EMPTY;function Nt(e){return e instanceof je||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function lo(e){k(e)?e():e.unsubscribe()}var He={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var lt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,s=n.isStopped,a=n.observers;return i||s?Tr:(this.currentObservers=null,a.push(r),new je(function(){o.currentObservers=null,ze(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,s=o.isStopped;n?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new I;return r.source=this,r},t.create=function(r,o){return new xo(r,o)},t}(I);var xo=function(e){se(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(x);var St={now:function(){return(St.delegate||Date).now()},delegate:void 0};var Ot=function(e){se(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=St);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,s=o._infiniteTimeWindow,a=o._timestampProvider,c=o._windowTime;n||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,s=n._buffer,a=s.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var s=r.actions;o!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==o&&(ut.cancelAnimationFrame(o),r._scheduled=void 0)},t}(zt);var wo=function(e){se(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(qt);var ge=new wo(Eo);var M=new I(function(e){return e.complete()});function Kt(e){return e&&k(e.schedule)}function Cr(e){return e[e.length-1]}function Ge(e){return k(Cr(e))?e.pop():void 0}function Ae(e){return Kt(Cr(e))?e.pop():void 0}function Qt(e,t){return typeof Cr(e)=="number"?e.pop():t}var dt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Yt(e){return k(e==null?void 0:e.then)}function Bt(e){return k(e[ft])}function Gt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Jt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Wi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Xt=Wi();function Zt(e){return k(e==null?void 0:e[Xt])}function er(e){return co(this,arguments,function(){var r,o,n,i;return Wt(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,ot(r.read())];case 3:return o=s.sent(),n=o.value,i=o.done,i?[4,ot(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,ot(n)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function tr(e){return k(e==null?void 0:e.getReader)}function F(e){if(e instanceof I)return e;if(e!=null){if(Bt(e))return Ui(e);if(dt(e))return Ni(e);if(Yt(e))return Di(e);if(Gt(e))return To(e);if(Zt(e))return Vi(e);if(tr(e))return zi(e)}throw Jt(e)}function Ui(e){return new I(function(t){var r=e[ft]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Ni(e){return new I(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?v(function(n,i){return e(n,i,o)}):pe,ue(1),r?$e(t):Uo(function(){return new or}))}}function Rr(e){return e<=0?function(){return M}:g(function(t,r){var o=[];t.subscribe(E(r,function(n){o.push(n),e=2,!0))}function de(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(p){var l,f,u,d=0,y=!1,b=!1,D=function(){f==null||f.unsubscribe(),f=void 0},Q=function(){D(),l=u=void 0,y=b=!1},J=function(){var C=l;Q(),C==null||C.unsubscribe()};return g(function(C,ct){d++,!b&&!y&&D();var Ve=u=u!=null?u:r();ct.add(function(){d--,d===0&&!b&&!y&&(f=jr(J,c))}),Ve.subscribe(ct),!l&&d>0&&(l=new it({next:function(Fe){return Ve.next(Fe)},error:function(Fe){b=!0,D(),f=jr(Q,n,Fe),Ve.error(Fe)},complete:function(){y=!0,D(),f=jr(Q,s),Ve.complete()}}),F(C).subscribe(l))})(p)}}function jr(e,t){for(var r=[],o=2;oe.next(document)),e}function W(e,t=document){return Array.from(t.querySelectorAll(e))}function U(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function Ie(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}var ca=L(h(document.body,"focusin"),h(document.body,"focusout")).pipe(ye(1),q(void 0),m(()=>Ie()||document.body),Z(1));function vt(e){return ca.pipe(m(t=>e.contains(t)),X())}function qo(e,t){return L(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?ye(t):pe,q(!1))}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Ko(e){return L(h(window,"load"),h(window,"resize")).pipe(Le(0,ge),m(()=>Ue(e)),q(Ue(e)))}function ir(e){return{x:e.scrollLeft,y:e.scrollTop}}function et(e){return L(h(e,"scroll"),h(window,"resize")).pipe(Le(0,ge),m(()=>ir(e)),q(ir(e)))}function Qo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Qo(e,r)}function S(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Qo(o,n);return o}function ar(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function gt(e){let t=S("script",{src:e});return H(()=>(document.head.appendChild(t),L(h(t,"load"),h(t,"error").pipe(w(()=>kr(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),A(()=>document.head.removeChild(t)),ue(1))))}var Yo=new x,pa=H(()=>typeof ResizeObserver=="undefined"?gt("https://unpkg.com/resize-observer-polyfill"):R(void 0)).pipe(m(()=>new ResizeObserver(e=>{for(let t of e)Yo.next(t)})),w(e=>L(Ke,R(e)).pipe(A(()=>e.disconnect()))),Z(1));function le(e){return{width:e.offsetWidth,height:e.offsetHeight}}function Se(e){return pa.pipe(T(t=>t.observe(e)),w(t=>Yo.pipe(v(({target:r})=>r===e),A(()=>t.unobserve(e)),m(()=>le(e)))),q(le(e)))}function xt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function sr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var Bo=new x,la=H(()=>R(new IntersectionObserver(e=>{for(let t of e)Bo.next(t)},{threshold:0}))).pipe(w(e=>L(Ke,R(e)).pipe(A(()=>e.disconnect()))),Z(1));function yt(e){return la.pipe(T(t=>t.observe(e)),w(t=>Bo.pipe(v(({target:r})=>r===e),A(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function Go(e,t=16){return et(e).pipe(m(({y:r})=>{let o=le(e),n=xt(e);return r>=n.height-o.height-t}),X())}var cr={drawer:U("[data-md-toggle=drawer]"),search:U("[data-md-toggle=search]")};function Jo(e){return cr[e].checked}function Ye(e,t){cr[e].checked!==t&&cr[e].click()}function Ne(e){let t=cr[e];return h(t,"change").pipe(m(()=>t.checked),q(t.checked))}function ma(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function fa(){return L(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(q(!1))}function Xo(){let e=h(window,"keydown").pipe(v(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:Jo("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),v(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!ma(o,r)}return!0}),de());return fa().pipe(w(t=>t?M:e))}function me(){return new URL(location.href)}function st(e,t=!1){if(G("navigation.instant")&&!t){let r=S("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function Zo(){return new x}function en(){return location.hash.slice(1)}function pr(e){let t=S("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function ua(e){return L(h(window,"hashchange"),e).pipe(m(en),q(en()),v(t=>t.length>0),Z(1))}function tn(e){return ua(e).pipe(m(t=>ce(`[id="${t}"]`)),v(t=>typeof t!="undefined"))}function At(e){let t=matchMedia(e);return nr(r=>t.addListener(()=>r(t.matches))).pipe(q(t.matches))}function rn(){let e=matchMedia("print");return L(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(q(e.matches))}function Dr(e,t){return e.pipe(w(r=>r?t():M))}function lr(e,t){return new I(r=>{let o=new XMLHttpRequest;o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network Error"))}),o.addEventListener("abort",()=>{r.error(new Error("Request aborted"))}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let i=Number(o.getResponseHeader("Content-Length"))||0;t.progress$.next(n.loaded/i*100)}}),t.progress$.next(5)),o.send()})}function De(e,t){return lr(e,t).pipe(w(r=>r.text()),m(r=>JSON.parse(r)),Z(1))}function on(e,t){let r=new DOMParser;return lr(e,t).pipe(w(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),Z(1))}function nn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function an(){return L(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(nn),q(nn()))}function sn(){return{width:innerWidth,height:innerHeight}}function cn(){return h(window,"resize",{passive:!0}).pipe(m(sn),q(sn()))}function pn(){return B([an(),cn()]).pipe(m(([e,t])=>({offset:e,size:t})),Z(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(te("size")),n=B([o,r]).pipe(m(()=>Ue(e)));return B([r,t,n]).pipe(m(([{height:i},{offset:s,size:a},{x:c,y:p}])=>({offset:{x:s.x-c,y:s.y-p+i},size:a})))}function da(e){return h(e,"message",t=>t.data)}function ha(e){let t=new x;return t.subscribe(r=>e.postMessage(r)),t}function ln(e,t=new Worker(e)){let r=da(t),o=ha(t),n=new x;n.subscribe(o);let i=o.pipe(ee(),oe(!0));return n.pipe(ee(),Re(r.pipe(j(i))),de())}var ba=U("#__config"),Et=JSON.parse(ba.textContent);Et.base=`${new URL(Et.base,me())}`;function he(){return Et}function G(e){return Et.features.includes(e)}function we(e,t){return typeof t!="undefined"?Et.translations[e].replace("#",t.toString()):Et.translations[e]}function Oe(e,t=document){return U(`[data-md-component=${e}]`,t)}function ne(e,t=document){return W(`[data-md-component=${e}]`,t)}function va(e){let t=U(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>U(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function mn(e){if(!G("announce.dismiss")||!e.childElementCount)return M;if(!e.hidden){let t=U(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return H(()=>{let t=new x;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),va(e).pipe(T(r=>t.next(r)),A(()=>t.complete()),m(r=>P({ref:e},r)))})}function ga(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function fn(e,t){let r=new x;return r.subscribe(({hidden:o})=>{e.hidden=o}),ga(e,t).pipe(T(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))}function Ct(e,t){return t==="inline"?S("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},S("div",{class:"md-tooltip__inner md-typeset"})):S("div",{class:"md-tooltip",id:e,role:"tooltip"},S("div",{class:"md-tooltip__inner md-typeset"}))}function un(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return S("aside",{class:"md-annotation",tabIndex:0},Ct(t),S("a",{href:r,class:"md-annotation__index",tabIndex:-1},S("span",{"data-md-annotation-id":e})))}else return S("aside",{class:"md-annotation",tabIndex:0},Ct(t),S("span",{class:"md-annotation__index",tabIndex:-1},S("span",{"data-md-annotation-id":e})))}function dn(e){return S("button",{class:"md-clipboard md-icon",title:we("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function Vr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(c=>!e.terms[c]).reduce((c,p)=>[...c,S("del",null,p)," "],[]).slice(0,-1),i=he(),s=new URL(e.location,i.base);G("search.highlight")&&s.searchParams.set("h",Object.entries(e.terms).filter(([,c])=>c).reduce((c,[p])=>`${c} ${p}`.trim(),""));let{tags:a}=he();return S("a",{href:`${s}`,class:"md-search-result__link",tabIndex:-1},S("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&S("div",{class:"md-search-result__icon md-icon"}),r>0&&S("h1",null,e.title),r<=0&&S("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(c=>{let p=a?c in a?`md-tag-icon md-tag--${a[c]}`:"md-tag-icon":"";return S("span",{class:`md-tag ${p}`},c)}),o>0&&n.length>0&&S("p",{class:"md-search-result__terms"},we("search.result.term.missing"),": ",...n)))}function hn(e){let t=e[0].score,r=[...e],o=he(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),s=r.findIndex(l=>l.scoreVr(l,1)),...c.length?[S("details",{class:"md-search-result__more"},S("summary",{tabIndex:-1},S("div",null,c.length>0&&c.length===1?we("search.result.more.one"):we("search.result.more.other",c.length))),...c.map(l=>Vr(l,1)))]:[]];return S("li",{class:"md-search-result__item"},p)}function bn(e){return S("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>S("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?ar(r):r)))}function zr(e){let t=`tabbed-control tabbed-control--${e}`;return S("div",{class:t,hidden:!0},S("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function vn(e){return S("div",{class:"md-typeset__scrollwrap"},S("div",{class:"md-typeset__table"},e))}function xa(e){let t=he(),r=new URL(`../${e.version}/`,t.base);return S("li",{class:"md-version__item"},S("a",{href:`${r}`,class:"md-version__link"},e.title))}function gn(e,t){return S("div",{class:"md-version"},S("button",{class:"md-version__current","aria-label":we("select.version")},t.title),S("ul",{class:"md-version__list"},e.map(xa)))}var ya=0;function Ea(e,t){document.body.append(e);let{width:r}=le(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=sr(t),n=typeof o!="undefined"?et(o):R({x:0,y:0}),i=L(vt(t),qo(t)).pipe(X());return B([i,n]).pipe(m(([s,a])=>{let{x:c,y:p}=Ue(t),l=le(t),f=t.closest("table");return f&&t.parentElement&&(c+=f.offsetLeft+t.parentElement.offsetLeft,p+=f.offsetTop+t.parentElement.offsetTop),{active:s,offset:{x:c-a.x+l.width/2-r/2,y:p-a.y+l.height+8}}}))}function Be(e){let t=e.title;if(!t.length)return M;let r=`__tooltip_${ya++}`,o=Ct(r,"inline"),n=U(".md-typeset",o);return n.innerHTML=t,H(()=>{let i=new x;return i.subscribe({next({offset:s}){o.style.setProperty("--md-tooltip-x",`${s.x}px`),o.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),L(i.pipe(v(({active:s})=>s)),i.pipe(ye(250),v(({active:s})=>!s))).subscribe({next({active:s}){s?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,ge)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(_t(125,ge),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?o.style.setProperty("--md-tooltip-0",`${-s}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ea(o,e).pipe(T(s=>i.next(s)),A(()=>i.complete()),m(s=>P({ref:e},s)))}).pipe(qe(ie))}function wa(e,t){let r=H(()=>B([Ko(e),et(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:s,height:a}=le(e);return{x:o-i.x+s/2,y:n-i.y+a/2}}));return vt(e).pipe(w(o=>r.pipe(m(n=>({active:o,offset:n})),ue(+!o||1/0))))}function xn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return H(()=>{let i=new x,s=i.pipe(ee(),oe(!0));return i.subscribe({next({offset:a}){e.style.setProperty("--md-tooltip-x",`${a.x}px`),e.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),yt(e).pipe(j(s)).subscribe(a=>{e.toggleAttribute("data-md-visible",a)}),L(i.pipe(v(({active:a})=>a)),i.pipe(ye(250),v(({active:a})=>!a))).subscribe({next({active:a}){a?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,ge)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(_t(125,ge),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?e.style.setProperty("--md-tooltip-0",`${-a}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(j(s),v(a=>!(a.metaKey||a.ctrlKey))).subscribe(a=>{a.stopPropagation(),a.preventDefault()}),h(n,"mousedown").pipe(j(s),ae(i)).subscribe(([a,{active:c}])=>{var p;if(a.button!==0||a.metaKey||a.ctrlKey)a.preventDefault();else if(c){a.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(p=Ie())==null||p.blur()}}),r.pipe(j(s),v(a=>a===o),Qe(125)).subscribe(()=>e.focus()),wa(e,t).pipe(T(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))})}function Ta(e){return e.tagName==="CODE"?W(".c, .c1, .cm",e):[e]}function Sa(e){let t=[];for(let r of Ta(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let s;for(;s=/(\(\d+\))(!)?/.exec(i.textContent);){let[,a,c]=s;if(typeof c=="undefined"){let p=i.splitText(s.index);i=p.splitText(a.length),t.push(p)}else{i.textContent=a,t.push(i);break}}}}return t}function yn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,s=new Map;for(let a of Sa(t)){let[,c]=a.textContent.match(/\((\d+)\)/);ce(`:scope > li:nth-child(${c})`,e)&&(s.set(c,un(c,i)),a.replaceWith(s.get(c)))}return s.size===0?M:H(()=>{let a=new x,c=a.pipe(ee(),oe(!0)),p=[];for(let[l,f]of s)p.push([U(".md-typeset",f),U(`:scope > li:nth-child(${l})`,e)]);return o.pipe(j(c)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of p)l?yn(f,u):yn(u,f)}),L(...[...s].map(([,l])=>xn(l,t,{target$:r}))).pipe(A(()=>a.complete()),de())})}function En(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return En(t)}}function wn(e,t){return H(()=>{let r=En(e);return typeof r!="undefined"?fr(r,e,t):M})}var Tn=jt(Kr());var Oa=0;function Sn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Sn(t)}}function Ma(e){return Se(e).pipe(m(({width:t})=>({scrollable:xt(e).width>t})),te("scrollable"))}function On(e,t){let{matches:r}=matchMedia("(hover)"),o=H(()=>{let n=new x,i=n.pipe(Rr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let s=[];if(Tn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Oa++}`;let p=dn(c.id);c.insertBefore(p,e),G("content.tooltips")&&s.push(Be(p))}let a=e.closest(".highlight");if(a instanceof HTMLElement){let c=Sn(a);if(typeof c!="undefined"&&(a.classList.contains("annotate")||G("content.code.annotate"))){let p=fr(c,e,t);s.push(Se(a).pipe(j(i),m(({width:l,height:f})=>l&&f),X(),w(l=>l?p:M)))}}return Ma(e).pipe(T(c=>n.next(c)),A(()=>n.complete()),m(c=>P({ref:e},c)),Re(...s))});return G("content.lazy")?yt(e).pipe(v(n=>n),ue(1),w(()=>o)):o}function La(e,{target$:t,print$:r}){let o=!0;return L(t.pipe(m(n=>n.closest("details:not([open])")),v(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(v(n=>n||!o),T(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Mn(e,t){return H(()=>{let r=new x;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),La(e,t).pipe(T(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}var Ln=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Qr,Aa=0;function Ca(){return typeof mermaid=="undefined"||mermaid instanceof Element?gt("https://unpkg.com/mermaid@10.6.1/dist/mermaid.min.js"):R(void 0)}function _n(e){return e.classList.remove("mermaid"),Qr||(Qr=Ca().pipe(T(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Ln,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),Z(1))),Qr.subscribe(()=>no(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Aa++}`,r=S("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),s=r.attachShadow({mode:"closed"});s.innerHTML=n,e.replaceWith(r),i==null||i(s)})),Qr.pipe(m(()=>({ref:e})))}var An=S("table");function Cn(e){return e.replaceWith(An),An.replaceWith(vn(e)),R({ref:e})}function ka(e){let t=e.find(r=>r.checked)||e[0];return L(...e.map(r=>h(r,"change").pipe(m(()=>U(`label[for="${r.id}"]`))))).pipe(q(U(`label[for="${t.id}"]`)),m(r=>({active:r})))}function kn(e,{viewport$:t,target$:r}){let o=U(".tabbed-labels",e),n=W(":scope > input",e),i=zr("prev");e.append(i);let s=zr("next");return e.append(s),H(()=>{let a=new x,c=a.pipe(ee(),oe(!0));B([a,Se(e)]).pipe(j(c),Le(1,ge)).subscribe({next([{active:p},l]){let f=Ue(p),{width:u}=le(p);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=ir(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),B([et(o),Se(o)]).pipe(j(c)).subscribe(([p,l])=>{let f=xt(o);i.hidden=p.x<16,s.hidden=p.x>f.width-l.width-16}),L(h(i,"click").pipe(m(()=>-1)),h(s,"click").pipe(m(()=>1))).pipe(j(c)).subscribe(p=>{let{width:l}=le(o);o.scrollBy({left:l*p,behavior:"smooth"})}),r.pipe(j(c),v(p=>n.includes(p))).subscribe(p=>p.click()),o.classList.add("tabbed-labels--linked");for(let p of n){let l=U(`label[for="${p.id}"]`);l.replaceChildren(S("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(j(c),v(f=>!(f.metaKey||f.ctrlKey)),T(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&a.pipe(Ee(1),ae(t)).subscribe(([{active:p},{offset:l}])=>{let f=p.innerText.trim();if(p.hasAttribute("data-md-switching"))p.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of W("[data-tabs]"))for(let b of W(":scope > input",y)){let D=U(`label[for="${b.id}"]`);if(D!==p&&D.innerText.trim()===f){D.setAttribute("data-md-switching",""),b.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),a.pipe(j(c)).subscribe(()=>{for(let p of W("audio, video",e))p.pause()}),ka(n).pipe(T(p=>a.next(p)),A(()=>a.complete()),m(p=>P({ref:e},p)))}).pipe(qe(ie))}function Hn(e,{viewport$:t,target$:r,print$:o}){return L(...W(".annotate:not(.highlight)",e).map(n=>wn(n,{target$:r,print$:o})),...W("pre:not(.mermaid) > code",e).map(n=>On(n,{target$:r,print$:o})),...W("pre.mermaid",e).map(n=>_n(n)),...W("table:not([class])",e).map(n=>Cn(n)),...W("details",e).map(n=>Mn(n,{target$:r,print$:o})),...W("[data-tabs]",e).map(n=>kn(n,{viewport$:t,target$:r})),...W("[title]",e).filter(()=>G("content.tooltips")).map(n=>Be(n)))}function Ha(e,{alert$:t}){return t.pipe(w(r=>L(R(!0),R(!1).pipe(Qe(2e3))).pipe(m(o=>({message:r,active:o})))))}function $n(e,t){let r=U(".md-typeset",e);return H(()=>{let o=new x;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ha(e,t).pipe(T(n=>o.next(n)),A(()=>o.complete()),m(n=>P({ref:e},n)))})}function $a({viewport$:e}){if(!G("header.autohide"))return R(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ce(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),X()),o=Ne("search");return B([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),X(),w(n=>n?r:R(!1)),q(!1))}function Pn(e,t){return H(()=>B([Se(e),$a(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),X((r,o)=>r.height===o.height&&r.hidden===o.hidden),Z(1))}function Rn(e,{header$:t,main$:r}){return H(()=>{let o=new x,n=o.pipe(ee(),oe(!0));o.pipe(te("active"),Ze(t)).subscribe(([{active:s},{hidden:a}])=>{e.classList.toggle("md-header--shadow",s&&!a),e.hidden=a});let i=fe(W("[title]",e)).pipe(v(()=>G("content.tooltips")),re(s=>Be(s)));return r.subscribe(o),t.pipe(j(n),m(s=>P({ref:e},s)),Re(i.pipe(j(n))))})}function Pa(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=le(e);return{active:o>=n}}),te("active"))}function In(e,t){return H(()=>{let r=new x;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=ce(".md-content h1");return typeof o=="undefined"?M:Pa(o,t).pipe(T(n=>r.next(n)),A(()=>r.complete()),m(n=>P({ref:e},n)))})}function Fn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),X()),n=o.pipe(w(()=>Se(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),te("bottom"))));return B([o,n,t]).pipe(m(([i,{top:s,bottom:a},{offset:{y:c},size:{height:p}}])=>(p=Math.max(0,p-Math.max(0,s-c,i)-Math.max(0,p+c-a)),{offset:s-i,height:p,active:s-i<=c})),X((i,s)=>i.offset===s.offset&&i.height===s.height&&i.active===s.active))}function Ra(e){let t=__md_get("__palette")||{index:e.findIndex(r=>matchMedia(r.getAttribute("data-md-color-media")).matches)};return R(...e).pipe(re(r=>h(r,"change").pipe(m(()=>r))),q(e[Math.max(0,t.index)]),m(r=>({index:e.indexOf(r),color:{media:r.getAttribute("data-md-color-media"),scheme:r.getAttribute("data-md-color-scheme"),primary:r.getAttribute("data-md-color-primary"),accent:r.getAttribute("data-md-color-accent")}})),Z(1))}function jn(e){let t=W("input",e),r=S("meta",{name:"theme-color"});document.head.appendChild(r);let o=S("meta",{name:"color-scheme"});document.head.appendChild(o);let n=At("(prefers-color-scheme: light)");return H(()=>{let i=new x;return i.subscribe(s=>{if(document.body.setAttribute("data-md-color-switching",""),s.color.media==="(prefers-color-scheme)"){let a=matchMedia("(prefers-color-scheme: light)"),c=document.querySelector(a.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");s.color.scheme=c.getAttribute("data-md-color-scheme"),s.color.primary=c.getAttribute("data-md-color-primary"),s.color.accent=c.getAttribute("data-md-color-accent")}for(let[a,c]of Object.entries(s.color))document.body.setAttribute(`data-md-color-${a}`,c);for(let a=0;a{let s=Oe("header"),a=window.getComputedStyle(s);return o.content=a.colorScheme,a.backgroundColor.match(/\d+/g).map(c=>(+c).toString(16).padStart(2,"0")).join("")})).subscribe(s=>r.content=`#${s}`),i.pipe(Me(ie)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ra(t).pipe(j(n.pipe(Ee(1))),at(),T(s=>i.next(s)),A(()=>i.complete()),m(s=>P({ref:e},s)))})}function Wn(e,{progress$:t}){return H(()=>{let r=new x;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(T(o=>r.next({value:o})),A(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Yr=jt(Kr());function Ia(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Un({alert$:e}){Yr.default.isSupported()&&new I(t=>{new Yr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ia(U(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(T(t=>{t.trigger.focus()}),m(()=>we("clipboard.copied"))).subscribe(e)}function Fa(e){if(e.length<2)return[""];let[t,r]=[...e].sort((n,i)=>n.length-i.length).map(n=>n.replace(/[^/]+$/,"")),o=0;if(t===r)o=t.length;else for(;t.charCodeAt(o)===r.charCodeAt(o);)o++;return e.map(n=>n.replace(t.slice(0,o),""))}function ur(e){let t=__md_get("__sitemap",sessionStorage,e);if(t)return R(t);{let r=he();return on(new URL("sitemap.xml",e||r.base)).pipe(m(o=>Fa(W("loc",o).map(n=>n.textContent))),xe(()=>M),$e([]),T(o=>__md_set("__sitemap",o,sessionStorage,e)))}}function Nn(e){let t=ce("[rel=canonical]",e);typeof t!="undefined"&&(t.href=t.href.replace("//localhost:","//127.0.0.1:"));let r=new Map;for(let o of W(":scope > *",e)){let n=o.outerHTML;for(let i of["href","src"]){let s=o.getAttribute(i);if(s===null)continue;let a=new URL(s,t==null?void 0:t.href),c=o.cloneNode();c.setAttribute(i,`${a}`),n=c.outerHTML;break}r.set(n,o)}return r}function Dn({location$:e,viewport$:t,progress$:r}){let o=he();if(location.protocol==="file:")return M;let n=ur().pipe(m(l=>l.map(f=>`${new URL(f,o.base)}`))),i=h(document.body,"click").pipe(ae(n),w(([l,f])=>{if(!(l.target instanceof Element))return M;let u=l.target.closest("a");if(u===null)return M;if(u.target||l.metaKey||l.ctrlKey)return M;let d=new URL(u.href);return d.search=d.hash="",f.includes(`${d}`)?(l.preventDefault(),R(new URL(u.href))):M}),de());i.pipe(ue(1)).subscribe(()=>{let l=ce("link[rel=icon]");typeof l!="undefined"&&(l.href=l.href)}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),i.pipe(ae(t)).subscribe(([l,{offset:f}])=>{history.scrollRestoration="manual",history.replaceState(f,""),history.pushState(null,"",l)}),i.subscribe(e);let s=e.pipe(q(me()),te("pathname"),Ee(1),w(l=>lr(l,{progress$:r}).pipe(xe(()=>(st(l,!0),M))))),a=new DOMParser,c=s.pipe(w(l=>l.text()),w(l=>{let f=a.parseFromString(l,"text/html");for(let b of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let D=ce(b),Q=ce(b,f);typeof D!="undefined"&&typeof Q!="undefined"&&D.replaceWith(Q)}let u=Nn(document.head),d=Nn(f.head);for(let[b,D]of d)D.getAttribute("rel")==="stylesheet"||D.hasAttribute("src")||(u.has(b)?u.delete(b):document.head.appendChild(D));for(let b of u.values())b.getAttribute("rel")==="stylesheet"||b.hasAttribute("src")||b.remove();let y=Oe("container");return We(W("script",y)).pipe(w(b=>{let D=f.createElement("script");if(b.src){for(let Q of b.getAttributeNames())D.setAttribute(Q,b.getAttribute(Q));return b.replaceWith(D),new I(Q=>{D.onload=()=>Q.complete()})}else return D.textContent=b.textContent,b.replaceWith(D),M}),ee(),oe(f))}),de());return h(window,"popstate").pipe(m(me)).subscribe(e),e.pipe(q(me()),Ce(2,1),v(([l,f])=>l.pathname===f.pathname&&l.hash!==f.hash),m(([,l])=>l)).subscribe(l=>{var f,u;history.state!==null||!l.hash?window.scrollTo(0,(u=(f=history.state)==null?void 0:f.y)!=null?u:0):(history.scrollRestoration="auto",pr(l.hash),history.scrollRestoration="manual")}),e.pipe(Ir(i),q(me()),Ce(2,1),v(([l,f])=>l.pathname===f.pathname&&l.hash===f.hash),m(([,l])=>l)).subscribe(l=>{history.scrollRestoration="auto",pr(l.hash),history.scrollRestoration="manual",history.back()}),c.pipe(ae(e)).subscribe(([,l])=>{var f,u;history.state!==null||!l.hash?window.scrollTo(0,(u=(f=history.state)==null?void 0:f.y)!=null?u:0):pr(l.hash)}),t.pipe(te("offset"),ye(100)).subscribe(({offset:l})=>{history.replaceState(l,"")}),c}var qn=jt(zn());function Kn(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,s)=>`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return s=>(0,qn.default)(s).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function Ht(e){return e.type===1}function dr(e){return e.type===3}function Qn(e,t){let r=ln(e);return L(R(location.protocol!=="file:"),Ne("search")).pipe(Pe(o=>o),w(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function Yn({document$:e}){let t=he(),r=De(new URL("../versions.json",t.base)).pipe(xe(()=>M)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:s,aliases:a})=>s===i||a.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),w(n=>h(document.body,"click").pipe(v(i=>!i.metaKey&&!i.ctrlKey),ae(o),w(([i,s])=>{if(i.target instanceof Element){let a=i.target.closest("a");if(a&&!a.target&&n.has(a.href)){let c=a.href;return!i.target.closest(".md-version")&&n.get(c)===s?M:(i.preventDefault(),R(c))}}return M}),w(i=>{let{version:s}=n.get(i);return ur(new URL(i)).pipe(m(a=>{let p=me().href.replace(t.base,"");return a.includes(p.split("#")[0])?new URL(`../${s}/${p}`,t.base):new URL(i)}))})))).subscribe(n=>st(n,!0)),B([r,o]).subscribe(([n,i])=>{U(".md-header__topic").appendChild(gn(n,i))}),e.pipe(w(()=>o)).subscribe(n=>{var s;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let a=((s=t.version)==null?void 0:s.default)||"latest";Array.isArray(a)||(a=[a]);e:for(let c of a)for(let p of n.aliases.concat(n.version))if(new RegExp(c,"i").test(p)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let a of ne("outdated"))a.hidden=!1})}function Da(e,{worker$:t}){let{searchParams:r}=me();r.has("q")&&(Ye("search",!0),e.value=r.get("q"),e.focus(),Ne("search").pipe(Pe(i=>!i)).subscribe(()=>{let i=me();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=vt(e),n=L(t.pipe(Pe(Ht)),h(e,"keyup"),o).pipe(m(()=>e.value),X());return B([n,o]).pipe(m(([i,s])=>({value:i,focus:s})),Z(1))}function Bn(e,{worker$:t}){let r=new x,o=r.pipe(ee(),oe(!0));B([t.pipe(Pe(Ht)),r],(i,s)=>s).pipe(te("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(te("focus")).subscribe(({focus:i})=>{i&&Ye("search",i)}),h(e.form,"reset").pipe(j(o)).subscribe(()=>e.focus());let n=U("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),Da(e,{worker$:t}).pipe(T(i=>r.next(i)),A(()=>r.complete()),m(i=>P({ref:e},i)),Z(1))}function Gn(e,{worker$:t,query$:r}){let o=new x,n=Go(e.parentElement).pipe(v(Boolean)),i=e.parentElement,s=U(":scope > :first-child",e),a=U(":scope > :last-child",e);Ne("search").subscribe(l=>a.setAttribute("role",l?"list":"presentation")),o.pipe(ae(r),Wr(t.pipe(Pe(Ht)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:s.textContent=f.length?we("search.result.none"):we("search.result.placeholder");break;case 1:s.textContent=we("search.result.one");break;default:let u=ar(l.length);s.textContent=we("search.result.other",u)}});let c=o.pipe(T(()=>a.innerHTML=""),w(({items:l})=>L(R(...l.slice(0,10)),R(...l.slice(10)).pipe(Ce(4),Nr(n),w(([f])=>f)))),m(hn),de());return c.subscribe(l=>a.appendChild(l)),c.pipe(re(l=>{let f=ce("details",l);return typeof f=="undefined"?M:h(f,"toggle").pipe(j(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(v(dr),m(({data:l})=>l)).pipe(T(l=>o.next(l)),A(()=>o.complete()),m(l=>P({ref:e},l)))}function Va(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=me();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function Jn(e,t){let r=new x,o=r.pipe(ee(),oe(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(j(o)).subscribe(n=>n.preventDefault()),Va(e,t).pipe(T(n=>r.next(n)),A(()=>r.complete()),m(n=>P({ref:e},n)))}function Xn(e,{worker$:t,keyboard$:r}){let o=new x,n=Oe("search-query"),i=L(h(n,"keydown"),h(n,"focus")).pipe(Me(ie),m(()=>n.value),X());return o.pipe(Ze(i),m(([{suggest:a},c])=>{let p=c.split(/([\s-]+)/);if(a!=null&&a.length&&p[p.length-1]){let l=a[a.length-1];l.startsWith(p[p.length-1])&&(p[p.length-1]=l)}else p.length=0;return p})).subscribe(a=>e.innerHTML=a.join("").replace(/\s/g," ")),r.pipe(v(({mode:a})=>a==="search")).subscribe(a=>{switch(a.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(v(dr),m(({data:a})=>a)).pipe(T(a=>o.next(a)),A(()=>o.complete()),m(()=>({ref:e})))}function Zn(e,{index$:t,keyboard$:r}){let o=he();try{let n=Qn(o.search,t),i=Oe("search-query",e),s=Oe("search-result",e);h(e,"click").pipe(v(({target:c})=>c instanceof Element&&!!c.closest("a"))).subscribe(()=>Ye("search",!1)),r.pipe(v(({mode:c})=>c==="search")).subscribe(c=>{let p=Ie();switch(c.type){case"Enter":if(p===i){let l=new Map;for(let f of W(":first-child [href]",s)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}c.claim()}break;case"Escape":case"Tab":Ye("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof p=="undefined")i.focus();else{let l=[i,...W(":not(details) > [href], summary, details[open] [href]",s)],f=Math.max(0,(Math.max(0,l.indexOf(p))+l.length+(c.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}c.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(v(({mode:c})=>c==="global")).subscribe(c=>{switch(c.type){case"f":case"s":case"/":i.focus(),i.select(),c.claim();break}});let a=Bn(i,{worker$:n});return L(a,Gn(s,{worker$:n,query$:a})).pipe(Re(...ne("search-share",e).map(c=>Jn(c,{query$:a})),...ne("search-suggest",e).map(c=>Xn(c,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function ei(e,{index$:t,location$:r}){return B([t,r.pipe(q(me()),v(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>Kn(o.config)(n.searchParams.get("h"))),m(o=>{var s;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let a=i.nextNode();a;a=i.nextNode())if((s=a.parentElement)!=null&&s.offsetHeight){let c=a.textContent,p=o(c);p.length>c.length&&n.set(a,p)}for(let[a,c]of n){let{childNodes:p}=S("span",null,c);a.replaceWith(...Array.from(p))}return{ref:e,nodes:n}}))}function za(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return B([r,t]).pipe(m(([{offset:i,height:s},{offset:{y:a}}])=>(s=s+Math.min(n,Math.max(0,a-i))-n,{height:s,locked:a>=i+n})),X((i,s)=>i.height===s.height&&i.locked===s.locked))}function Br(e,o){var n=o,{header$:t}=n,r=oo(n,["header$"]);let i=U(".md-sidebar__scrollwrap",e),{y:s}=Ue(i);return H(()=>{let a=new x,c=a.pipe(ee(),oe(!0)),p=a.pipe(Le(0,ge));return p.pipe(ae(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*s}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),p.pipe(Pe()).subscribe(()=>{for(let l of W(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=le(f);f.scrollTo({top:u-d/2})}}}),fe(W("label[tabindex]",e)).pipe(re(l=>h(l,"click").pipe(Me(ie),m(()=>l),j(c)))).subscribe(l=>{let f=U(`[id="${l.htmlFor}"]`);U(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),za(e,r).pipe(T(l=>a.next(l)),A(()=>a.complete()),m(l=>P({ref:e},l)))})}function ti(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Lt(De(`${r}/releases/latest`).pipe(xe(()=>M),m(o=>({version:o.tag_name})),$e({})),De(r).pipe(xe(()=>M),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),$e({}))).pipe(m(([o,n])=>P(P({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return De(r).pipe(m(o=>({repositories:o.public_repos})),$e({}))}}function ri(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return De(r).pipe(xe(()=>M),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),$e({}))}function oi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return ti(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ri(r,o)}return M}var qa;function Ka(e){return qa||(qa=H(()=>{let t=__md_get("__source",sessionStorage);if(t)return R(t);if(ne("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return M}return oi(e.href).pipe(T(o=>__md_set("__source",o,sessionStorage)))}).pipe(xe(()=>M),v(t=>Object.keys(t).length>0),m(t=>({facts:t})),Z(1)))}function ni(e){let t=U(":scope > :last-child",e);return H(()=>{let r=new x;return r.subscribe(({facts:o})=>{t.appendChild(bn(o)),t.classList.add("md-source__repository--active")}),Ka(e).pipe(T(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}function Qa(e,{viewport$:t,header$:r}){return Se(document.body).pipe(w(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),te("hidden"))}function ii(e,t){return H(()=>{let r=new x;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?R({hidden:!1}):Qa(e,t)).pipe(T(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}function Ya(e,{viewport$:t,header$:r}){let o=new Map,n=W("[href^=\\#]",e);for(let a of n){let c=decodeURIComponent(a.hash.substring(1)),p=ce(`[id="${c}"]`);typeof p!="undefined"&&o.set(a,p)}let i=r.pipe(te("height"),m(({height:a})=>{let c=Oe("main"),p=U(":scope > :first-child",c);return a+.8*(p.offsetTop-c.offsetTop)}),de());return Se(document.body).pipe(te("height"),w(a=>H(()=>{let c=[];return R([...o].reduce((p,[l,f])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=f.tagName;)c.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return p.set([...c=[...c,l]].reverse(),u)},new Map))}).pipe(m(c=>new Map([...c].sort(([,p],[,l])=>p-l))),Ze(i),w(([c,p])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(a.height);for(;f.length;){let[,b]=f[0];if(b-p=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...c]]),X((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([a,c])=>({prev:a.map(([p])=>p),next:c.map(([p])=>p)})),q({prev:[],next:[]}),Ce(2,1),m(([a,c])=>a.prev.length{let i=new x,s=i.pipe(ee(),oe(!0));if(i.subscribe(({prev:a,next:c})=>{for(let[p]of c)p.classList.remove("md-nav__link--passed"),p.classList.remove("md-nav__link--active");for(let[p,[l]]of a.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",p===a.length-1)}),G("toc.follow")){let a=L(t.pipe(ye(1),m(()=>{})),t.pipe(ye(250),m(()=>"smooth")));i.pipe(v(({prev:c})=>c.length>0),Ze(o.pipe(Me(ie))),ae(a)).subscribe(([[{prev:c}],p])=>{let[l]=c[c.length-1];if(l.offsetHeight){let f=sr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=le(f);f.scrollTo({top:u-d/2,behavior:p})}}})}return G("navigation.tracking")&&t.pipe(j(s),te("offset"),ye(250),Ee(1),j(n.pipe(Ee(1))),at({delay:250}),ae(i)).subscribe(([,{prev:a}])=>{let c=me(),p=a[a.length-1];if(p&&p.length){let[l]=p,{hash:f}=new URL(l.href);c.hash!==f&&(c.hash=f,history.replaceState({},"",`${c}`))}else c.hash="",history.replaceState({},"",`${c}`)}),Ya(e,{viewport$:t,header$:r}).pipe(T(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))})}function Ba(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:s}})=>s),Ce(2,1),m(([s,a])=>s>a&&a>0),X()),i=r.pipe(m(({active:s})=>s));return B([i,n]).pipe(m(([s,a])=>!(s&&a)),X(),j(o.pipe(Ee(1))),oe(!0),at({delay:250}),m(s=>({hidden:s})))}function si(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new x,s=i.pipe(ee(),oe(!0));return i.subscribe({next({hidden:a}){e.hidden=a,a?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(j(s),te("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),h(e,"click").subscribe(a=>{a.preventDefault(),window.scrollTo({top:0})}),Ba(e,{viewport$:t,main$:o,target$:n}).pipe(T(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))}function ci({document$:e}){e.pipe(w(()=>W(".md-ellipsis")),re(t=>yt(t).pipe(j(e.pipe(Ee(1))),v(r=>r),m(()=>t),ue(1))),v(t=>t.offsetWidth{let r=t.innerText,o=t.closest("a")||t;return o.title=r,Be(o).pipe(j(e.pipe(Ee(1))),A(()=>o.removeAttribute("title")))})).subscribe(),e.pipe(w(()=>W(".md-status")),re(t=>Be(t))).subscribe()}function pi({document$:e,tablet$:t}){e.pipe(w(()=>W(".md-toggle--indeterminate")),T(r=>{r.indeterminate=!0,r.checked=!1}),re(r=>h(r,"change").pipe(Ur(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ae(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function Ga(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function li({document$:e}){e.pipe(w(()=>W("[data-md-scrollfix]")),T(t=>t.removeAttribute("data-md-scrollfix")),v(Ga),re(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function mi({viewport$:e,tablet$:t}){B([Ne("search"),t]).pipe(m(([r,o])=>r&&!o),w(r=>R(r).pipe(Qe(r?400:100))),ae(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function Ja(){return location.protocol==="file:"?gt(`${new URL("search/search_index.js",Gr.base)}`).pipe(m(()=>__index),Z(1)):De(new URL("search/search_index.json",Gr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var rt=zo(),Pt=Zo(),wt=tn(Pt),Jr=Xo(),_e=pn(),hr=At("(min-width: 960px)"),ui=At("(min-width: 1220px)"),di=rn(),Gr=he(),hi=document.forms.namedItem("search")?Ja():Ke,Xr=new x;Un({alert$:Xr});var Zr=new x;G("navigation.instant")&&Dn({location$:Pt,viewport$:_e,progress$:Zr}).subscribe(rt);var fi;((fi=Gr.version)==null?void 0:fi.provider)==="mike"&&Yn({document$:rt});L(Pt,wt).pipe(Qe(125)).subscribe(()=>{Ye("drawer",!1),Ye("search",!1)});Jr.pipe(v(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=ce("link[rel=prev]");typeof t!="undefined"&&st(t);break;case"n":case".":let r=ce("link[rel=next]");typeof r!="undefined"&&st(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});ci({document$:rt});pi({document$:rt,tablet$:hr});li({document$:rt});mi({viewport$:_e,tablet$:hr});var tt=Pn(Oe("header"),{viewport$:_e}),$t=rt.pipe(m(()=>Oe("main")),w(e=>Fn(e,{viewport$:_e,header$:tt})),Z(1)),Xa=L(...ne("consent").map(e=>fn(e,{target$:wt})),...ne("dialog").map(e=>$n(e,{alert$:Xr})),...ne("header").map(e=>Rn(e,{viewport$:_e,header$:tt,main$:$t})),...ne("palette").map(e=>jn(e)),...ne("progress").map(e=>Wn(e,{progress$:Zr})),...ne("search").map(e=>Zn(e,{index$:hi,keyboard$:Jr})),...ne("source").map(e=>ni(e))),Za=H(()=>L(...ne("announce").map(e=>mn(e)),...ne("content").map(e=>Hn(e,{viewport$:_e,target$:wt,print$:di})),...ne("content").map(e=>G("search.highlight")?ei(e,{index$:hi,location$:Pt}):M),...ne("header-title").map(e=>In(e,{viewport$:_e,header$:tt})),...ne("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Dr(ui,()=>Br(e,{viewport$:_e,header$:tt,main$:$t})):Dr(hr,()=>Br(e,{viewport$:_e,header$:tt,main$:$t}))),...ne("tabs").map(e=>ii(e,{viewport$:_e,header$:tt})),...ne("toc").map(e=>ai(e,{viewport$:_e,header$:tt,main$:$t,target$:wt})),...ne("top").map(e=>si(e,{viewport$:_e,header$:tt,main$:$t,target$:wt})))),bi=rt.pipe(w(()=>Za),Re(Xa),Z(1));bi.subscribe();window.document$=rt;window.location$=Pt;window.target$=wt;window.keyboard$=Jr;window.viewport$=_e;window.tablet$=hr;window.screen$=ui;window.print$=di;window.alert$=Xr;window.progress$=Zr;window.component$=bi;})(); -//# sourceMappingURL=bundle.d7c377c4.min.js.map - diff --git a/site/assets/javascripts/bundle.d7c377c4.min.js.map b/site/assets/javascripts/bundle.d7c377c4.min.js.map deleted file mode 100644 index a57d388..0000000 --- a/site/assets/javascripts/bundle.d7c377c4.min.js.map +++ /dev/null @@ -1,7 +0,0 @@ -{ - "version": 3, - "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/sample.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], - "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2023 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an