From bfb676340ec1ffabacbcf8017ce3470b8638098f Mon Sep 17 00:00:00 2001 From: Alex Dewar Date: Tue, 23 Apr 2024 16:03:04 +0100 Subject: [PATCH 1/2] pre-commit: Add codespell hook --- .pre-commit-config.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1f36b368a..a80d7b6be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -35,3 +35,7 @@ repos: hooks: - id: pretty-format-toml args: [--autofix, --indent, "4", --no-sort] + - repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell From 6c343655c785cf9a79bcd26b6df1627fad7a082c Mon Sep 17 00:00:00 2001 From: Alex Dewar Date: Tue, 23 Apr 2024 16:24:11 +0100 Subject: [PATCH 2/2] Run codespell hook --- .codespell_ignore.txt | 3 +++ .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .pre-commit-config.yaml | 1 + CHANGELOG.md | 4 +-- docs/advanced-guide/extending-muse.ipynb | 10 ++++--- docs/application-flow.rst | 2 +- docs/glossary.rst | 2 +- docs/inputs/agents.rst | 2 +- docs/inputs/commodities.rst | 4 +-- docs/inputs/technodata.rst | 4 +-- docs/inputs/technodata_timeslices.rst | 2 +- docs/inputs/toml.rst | 26 +++++++++---------- docs/inputs/toml_primer.rst | 4 +-- docs/installation/pipx-based.rst | 6 ++++- docs/muse-components.rst | 4 +-- docs/overview.rst | 4 +-- .../add-gdp-correlation-demand.ipynb | 2 +- docs/user-guide/add-solar.ipynb | 2 +- .../min-max-timeslice-constraints.ipynb | 2 +- docs/user-guide/modify-timing-data.ipynb | 2 +- src/muse/agents/agent.py | 18 ++++++------- src/muse/carbon_budget.py | 4 +-- src/muse/commodities.py | 2 +- src/muse/constraints.py | 16 ++++++------ src/muse/decisions.py | 8 +++--- src/muse/demand_matching.py | 16 ++++++------ src/muse/demand_share.py | 2 +- src/muse/errors.py | 6 ++--- src/muse/examples.py | 2 +- src/muse/hooks.py | 6 ++--- src/muse/investments.py | 2 +- src/muse/mca.py | 2 +- src/muse/objectives.py | 4 +-- src/muse/outputs/cache.py | 4 +-- src/muse/production.py | 6 ++--- src/muse/quantities.py | 6 ++--- src/muse/readers/csv.py | 6 ++--- src/muse/readers/toml.py | 10 +++---- src/muse/registration.py | 2 +- src/muse/regressions.py | 6 ++--- src/muse/sectors/__init__.py | 2 +- src/muse/sectors/legacy_sector.py | 10 +++---- src/muse/sectors/sector.py | 2 +- src/muse/sectors/subsector.py | 2 +- src/muse/timeslices.py | 2 +- src/muse/utilities.py | 22 ++++++++-------- tests/conftest.py | 4 +-- tests/test_aggregoutput.py | 6 ++--- tests/test_decisions.py | 2 +- tests/test_demand_share.py | 2 +- tests/test_mca.py | 2 +- tests/test_outputs.py | 4 +-- 52 files changed, 143 insertions(+), 133 deletions(-) create mode 100644 .codespell_ignore.txt diff --git a/.codespell_ignore.txt b/.codespell_ignore.txt new file mode 100644 index 000000000..e8de7e0e6 --- /dev/null +++ b/.codespell_ignore.txt @@ -0,0 +1,3 @@ +datas +raison +fom diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 77bb7e3b2..ab0814cf3 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -13,7 +13,7 @@ A clear and concise description of what the bug is, including error messages. ## To Reproduce -Steps to reproduce the behavior. Attache any input file that might be required. +Steps to reproduce the behavior. Attach any input file that might be required. ## Expected behavior diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a80d7b6be..5d98ff04c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,3 +39,4 @@ repos: rev: v2.2.6 hooks: - id: codespell + args: [--ignore-words, .codespell_ignore.txt] diff --git a/CHANGELOG.md b/CHANGELOG.md index 12910cfc9..343aec3cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,8 +9,8 @@ - Tutorials in the documentation are run as tests ([#177](https://github.com/SGIModel/MUSE_OS/pull/177)) - Clean notebooks before running them as tests in the documentaition ([#173](https://github.com/EnergySystemsModellingLab/MUSE_OS/pull/173)) - Add technology-granularity to sectoral outputs ([#214](https://github.com/SGIModel/MUSE_OS/pull/214)) -- Add descripion to bisection method ([#149](https://github.com/SGIModel/MUSE_OS/pull/149)) -- Add descripion to demo cases ([#139](https://github.com/SGIModel/MUSE_OS/pull/139)) +- Add description to bisection method ([#149](https://github.com/SGIModel/MUSE_OS/pull/149)) +- Add description to demo cases ([#139](https://github.com/SGIModel/MUSE_OS/pull/139)) - Run link-checker only once a week ([#148](https://github.com/SGIModel/MUSE_OS/pull/148)) - Update documentation for installing MUSE ([#138](https://github.com/SGIModel/MUSE_OS/pull/138)) - Updating pyproject.toml with valid python versions ([#121](https://github.com/SGIModel/MUSE_OS/pull/121)) diff --git a/docs/advanced-guide/extending-muse.ipynb b/docs/advanced-guide/extending-muse.ipynb index 6590fede6..67b92467a 100644 --- a/docs/advanced-guide/extending-muse.ipynb +++ b/docs/advanced-guide/extending-muse.ipynb @@ -255,12 +255,12 @@ "source": [ "### Cached quantities\n", "\n", - "The result of intermediate calculations are often useful for post-morten analysis or\n", + "The result of intermediate calculations are often useful for post-mortem analysis or\n", "simply to have a more detailed picture of the evolution of the calculation over time.\n", "The process of adding a new quantity to cache and output has three steps:\n", "\n", "1. Register the function with `register_cached_quantity` that will deal with the \n", - " consolidation of the cached quantity prior to outputing in such a way it can be\n", + " consolidation of the cached quantity prior to outputting in such a way it can be\n", " accepted by one of the sinks. It can also be used to modify what is saved, filtering\n", " by technologies or agents, for example.\n", "2. Cache the quantity in each iteration of the market using\n", @@ -318,7 +318,7 @@ "source": [ "The above function is nearly identical to `muse.outputs.cache.capacity` but filtering\n", "the output such that only information related to retorfit agents is included in the\n", - "output. As a function with the same name intended to chache the `capacity` already\n", + "output. As a function with the same name intended to cache the `capacity` already\n", "exists, we have to set `overwrite = True` in the decorator, so that it replaces the\n", "built in version.\n", "\n", @@ -581,7 +581,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As previously demonstrated, we can easily add new functionality to MUSE. However, running a jupyter notebook is not always the best approach. It is also possible to store functions in an arbitrary pthon file, such as the following:" + "As previously demonstrated, we can easily add new functionality to MUSE. However,\n", + "running a jupyter notebook is not always the best approach. It is also possible to store\n", + "functions in an arbitrary Python file, such as the following:" ] }, { diff --git a/docs/application-flow.rst b/docs/application-flow.rst index 4ed4a63af..6e20a7429 100644 --- a/docs/application-flow.rst +++ b/docs/application-flow.rst @@ -318,7 +318,7 @@ An overall picture of this process can be seen in the following chart, but there {node [shape=""]; start; end;} exclude [label="Exclude\ncommodities\nfrom market"]; single_year [label="Single year\niteration", fillcolor="lightgrey", style="rounded,filled"] - maxiter [label="Maxium iter?", shape=diamond, style=""] + maxiter [label="Maximum iter?", shape=diamond, style=""] converged [label="Converged?", shape=diamond, style=""] prices [label="Update with\nconverged prices"] {node [label="Update with not\nconverged prices"]; prices1; prices2;} diff --git a/docs/glossary.rst b/docs/glossary.rst index bf331eeb0..e188558ae 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -39,7 +39,7 @@ Here we provide a glossary for some of the frequently used terms in this documen This is where an agent does not know everything needed to make a perfect decision. Levelised cost of electricity - The levelised cost of electricty is a measure of the average net present cost of electricity generation for a generating plant over its lifetime. + The levelised cost of electricity is a measure of the average net present cost of electricity generation for a generating plant over its lifetime. Limited foresight Limited foresight is the condition that an agent is unable to predict the entire future perfectly. The agent is only able to predict the future either imperfectly, or a limited time ahead. diff --git a/docs/inputs/agents.rst b/docs/inputs/agents.rst index ccdc22ddd..97740963a 100644 --- a/docs/inputs/agents.rst +++ b/docs/inputs/agents.rst @@ -41,7 +41,7 @@ Type technologies they can consider (by :ref:`SearchRule `). "New" agents invest on the rest of the demand, and can often consider more general sets of technologies. If only "New" agents are included, they will also invest to make up for - decomissioned assets, but the end mix might be different than using a specialised + decommissioned assets, but the end mix might be different than using a specialised "Retrofit" agent for that. AgentShare diff --git a/docs/inputs/commodities.rst b/docs/inputs/commodities.rst index 56ddc83bd..fe9c8b459 100644 --- a/docs/inputs/commodities.rst +++ b/docs/inputs/commodities.rst @@ -25,10 +25,10 @@ CommodityType which are either extracted, transformed from one to another, or used in the energy system. The "service" type includes commodities such as space heating or hot water which correspond to selected - poples' needs whose fulflment requires energy uses. + poples' needs whose fulfillment requires energy uses. The "material" type represent non-energy inputs for energy technologies, such as limestone or oxygen. - The "environmental" type refers to non-energy commodities whichare used to quantify an inpact on the environment, + The "environmental" type refers to non-energy commodities whichare used to quantify an impact on the environment, such as greenhouse gases or CO2. They can be subjected to different types of environmental fees or taxes. CommodityName diff --git a/docs/inputs/technodata.rst b/docs/inputs/technodata.rst index 06b0a3ae1..913d8a8c9 100644 --- a/docs/inputs/technodata.rst +++ b/docs/inputs/technodata.rst @@ -151,11 +151,11 @@ Agent_0, ..., Agent_N resBoilerElectric, region1, 2010, ..., 1 resBoilerElectric, region1, 2030, ..., 1 - In a two-agent simulation, a new column neeeds to be added for each retrofit agent belonging to the new-retrofit agent pair. + In a two-agent simulation, a new column needs to be added for each retrofit agent belonging to the new-retrofit agent pair. The column heading refers each retrofit agent "AgentShare" as defined in the agents' definition (see :ref:`inputs-agents`). Assuming a split of the initial capacity into 30 \% and 70 \% for each retrofit agent, the model table would be setup as follows. The values of the "AgetnShare" needs to reflect the demand split represented by the "Quantity" attribute (see :ref:`inputs-agents`), - to make sure that the initial demand is fullfilled with the initial stock. + to make sure that the initial demand is fulfilled with the initial stock. .. csv-table:: Techno-data: AgentShare - 2 agents :header: ProcessName, RegionName, Time, ..., Agent_2, Agent_4 diff --git a/docs/inputs/technodata_timeslices.rst b/docs/inputs/technodata_timeslices.rst index 9c2516119..3e2b9e29f 100644 --- a/docs/inputs/technodata_timeslices.rst +++ b/docs/inputs/technodata_timeslices.rst @@ -3,7 +3,7 @@ ====================== Techno-data Timeslices ====================== -The techno-data timeslices is an optinal file which contains information on technologies, their region, timeslices, utilization factors and minimum service factor. The objective of this file is to link the utilization factor and minimum service factor to timeslices. For instance, if you were to model solar photovoltaics, you would probably want to specify that they can not produce any electricty at night, or if you're modelling a nuclear power plant, that they must generate a minimum amount of electricity. The techno-data timeslice file enables you to do that. Note, that if this file is not present, the utilization facto will be used from the technodata file. +The techno-data timeslices is an optional file which contains information on technologies, their region, timeslices, utilization factors and minimum service factor. The objective of this file is to link the utilization factor and minimum service factor to timeslices. For instance, if you were to model solar photovoltaics, you would probably want to specify that they can not produce any electricity at night, or if you're modelling a nuclear power plant, that they must generate a minimum amount of electricity. The techno-data timeslice file enables you to do that. Note, that if this file is not present, the utilization facto will be used from the technodata file. .. csv-table:: Techno-data diff --git a/docs/inputs/toml.rst b/docs/inputs/toml.rst index a1a6a3074..9a0653b18 100644 --- a/docs/inputs/toml.rst +++ b/docs/inputs/toml.rst @@ -11,7 +11,7 @@ described in this :ref:`previous section `. Here, however, we focus attributes that are specific to MUSE. The TOML file can be read using :py:func:`~readers.toml.read_settings`. The resulting -data is used to construt the market clearing algorithm directly in the :py:meth:`MCA's +data is used to construct the market clearing algorithm directly in the :py:meth:`MCA's factory function `. ------------ @@ -90,7 +90,7 @@ a whole. Carbon market ------------- -This section containts the settings related to the modelling of the carbon market. If omitted, it defaults to not +This section contains the settings related to the modelling of the carbon market. If omitted, it defaults to not including the carbon market in the simulation. Example @@ -109,7 +109,7 @@ Example *method* Method used to equilibrate the carbon market. Available options are `fitting` and `bisection`, however this can be expanded with the `@register_carbon_budget_method` hook in `muse.carbon_budget`. - The market-clearing algortihm iterates over the sectors until the market reaches an equilibrium in the foresight period (the period next to the one analysed). + The market-clearing algorithm iterates over the sectors until the market reaches an equilibrium in the foresight period (the period next to the one analysed). This is represented by a stable variation of a commodity demand (or price) between iterations below a defined tolerance. The market-clearing algorithm samples a user-defined set of carbon prices. @@ -145,7 +145,7 @@ Example `price_too_high_threshold`, a user-defined threshold based on heuristics on the values of the carbon price, reflecting typical historical trends. *fitter* - `fitter` specifies the regression model fit. The regresion approximates the model emissions. Predefined options are `linear` and `exponential`. Further options can be defined using the `@register_carbon_budget_fitter` hook in `muse.carbon_budget`. + `fitter` specifies the regression model fit. The regression approximates the model emissions. Predefined options are `linear` and `exponential`. Further options can be defined using the `@register_carbon_budget_fitter` hook in `muse.carbon_budget`. ------------------ Global input files @@ -259,7 +259,7 @@ See section on `Timeslices_`. *outputs_cache* This option behaves exactly like `outputs` for sectors and accepts the same options but controls the output of cached quantities instead. This option is NOT available for - sectors themselves (i.e using `[[sector.comercial.outputs_cache]]` will have no effect). See + sectors themselves (i.e using `[[sector.commercial.outputs_cache]]` will have no effect). See :py:mod:`muse.outputs.cache` for more details. A single row looks like this: @@ -292,13 +292,13 @@ the user, since it will not affect the model itself. Sectors are defined in :py:class:`~muse.sectors.Sector`. -A sector accepts these atributes: +A sector accepts these attributes: .. _sector-type: *type* Defines the kind of sector this is. *Standard* sectors are those with type - "default". This value corresponds to the name with which a sector class is registerd + "default". This value corresponds to the name with which a sector class is registered with MUSE, via :py:meth:`~muse.sectors.register_sector`. [INSERT OTHER OPTIONS HERE] .. _sector-priority: @@ -441,7 +441,7 @@ Sectors contain a number of subsections: See :ref:`inputs-iocomms`. Once the finest timeslice and its aggregates are given, it is possible for each sector -to define the timeslice simply by refering to the slices it will use at each level. +to define the timeslice simply by referring to the slices it will use at each level. .. _sector-timeslices: @@ -508,7 +508,7 @@ to define the timeslice simply by refering to the slices it will use at each lev writing, three are available: - an "adhoc" solver: Simple in-house solver that ranks the technologies - according to cost and sevice the demand incrementally. + according to cost and service the demand incrementally. - "scipy" solver: Formulates investment as a true LP problem and solves it using the `scipy solver`_. @@ -518,7 +518,7 @@ to define the timeslice simply by refering to the slices it will use at each lev Users can install it with ``pip install cvxopt`` or ``conda install cvxopt``. *demand_share* - A method used to split the MCA demand into seperate parts to be serviced by + A method used to split the MCA demand into separate parts to be serviced by specific agents. A basic distinction is between *new* and *retrofit* agents: the former asked to respond to an increase of commodity demand investing in new assets; the latter asked to invest in new asset to balance the decommissined @@ -626,7 +626,7 @@ simulation. Preset sectors are defined in :py:class:`~muse.sectors.PresetSector`. -The three components, production, consumption, and prices, can be set independantly and +The three components, production, consumption, and prices, can be set independently and not all three need to be set. Production and consumption default to zero, and prices default to leaving things unchanged. @@ -661,7 +661,7 @@ The following attributes are accepted: *consumption_path* CSV output files, one per year. This attribute can include wild cards, i.e. '*', - which can match anything. For instance: `consumption_path = "{cwd}/Consumtion*.csv"` will match any csv file starting with "Consumption" in the + which can match anything. For instance: `consumption_path = "{cwd}/Consumption*.csv"` will match any csv file starting with "Consumption" in the current working directory. The file names must include the year for which it defines the consumption, e.g. `Consumption2015.csv`. @@ -787,4 +787,4 @@ itself can use the following attributes. Path to a technodata CSV file. See. :ref:`inputs-technodata`. *output_path* - Path to a diretory where the sector will write output files. + Path to a directory where the sector will write output files. diff --git a/docs/inputs/toml_primer.rst b/docs/inputs/toml_primer.rst index 7a1669ab3..90a220f13 100644 --- a/docs/inputs/toml_primer.rst +++ b/docs/inputs/toml_primer.rst @@ -58,13 +58,13 @@ dictionary). [[some_table_of_data]] a_key = "another value" -.. Since MUSE requires a number of data files, paths to file can be formated quite +.. Since MUSE requires a number of data files, paths to file can be formatted quite .. flexibly. A `path` any key-value where the value ends with `.csv` or `.toml`, .. as well any key which ends in `_path`, `_file`, or `_dir`, e.g. `data_path` or .. `sector_dir`. Paths can be formatted with shorthands for specific directories. .. Shorth-hands are specified by curly-brackets: -As MUSE requires a number of data file, paths to files can be formated in a flexible manner. Paths can be formatted with shorthands for specific directories and are defined with curly-brackets. For example: +As MUSE requires a number of data file, paths to files can be formatted in a flexible manner. Paths can be formatted with shorthands for specific directories and are defined with curly-brackets. For example: .. code-block:: TOML diff --git a/docs/installation/pipx-based.rst b/docs/installation/pipx-based.rst index 3e4d7a620..bbcf5e01b 100644 --- a/docs/installation/pipx-based.rst +++ b/docs/installation/pipx-based.rst @@ -71,7 +71,11 @@ MUSE needs Python to run but, for now, it only works with versions 3.8 and 3.9, .. note:: - Windows users should disable the alias for Python that comes by default with Windows and that will try to install Python from the Microsoft Store everytime we write ``python`` in the terminal. To do so, press the ``Windows key`` and start typing ``alias``, when it shows up, click in ``Manage app execution aliases``. In the window that opens, disable all the entries related to Python, like in the image. + Windows users should disable the alias for Python that comes by default with Windows + and that will try to install Python from the Microsoft Store every time we write + ``python`` in the terminal. To do so, press the ``Windows key`` and start typing + ``alias``, when it shows up, click in ``Manage app execution aliases``. In the + window that opens, disable all the entries related to Python, like in the image. .. image:: ../figures/disable_python_alias.png :width: 400 diff --git a/docs/muse-components.rst b/docs/muse-components.rst index 8a00bd902..754227957 100644 --- a/docs/muse-components.rst +++ b/docs/muse-components.rst @@ -53,7 +53,7 @@ Technologies, and their parameters are defined in the Technodata.csv file. For a Sectors ------- -Sectors typically group areas of economic activity together, such as the residential sector, which might include all energy conusming activies of households. Possible examples of sectors are: +Sectors typically group areas of economic activity together, such as the residential sector, which might include all energy conusming activities of households. Possible examples of sectors are: - Gas sector @@ -70,7 +70,7 @@ Each of the technologies, which consume a commodity, also output a different com Agents ------ -Agents represent the investment decision makers in an energy system, for example consumers or companies. They invest in technologies that meet service demands, like heating, or produce other needed energy commodities, like electricity. These agents can be heterogenous, meaning that their investment priorities have the ability to differ. +Agents represent the investment decision makers in an energy system, for example consumers or companies. They invest in technologies that meet service demands, like heating, or produce other needed energy commodities, like electricity. These agents can be heterogeneous, meaning that their investment priorities have the ability to differ. As an example, a generation company could compare potential power generators based on their levelized cost of electricity, their net present value, by minimising the total capital cost, a mixture of these and/or any user-defined approach. This approach more closely matches the behaviour of real-life agents in the energy market, where companies, or people, have different priorities and constraints. diff --git a/docs/overview.rst b/docs/overview.rst index 934ab879a..115032d9e 100644 --- a/docs/overview.rst +++ b/docs/overview.rst @@ -31,11 +31,11 @@ MUSE is an open source agent-based modelling environment that can be used to sim MUSE can incorporate residential, power, industrial and conversion sectors, meaning many questions can be explored using MUSE, as per the wishes of the user. -MUSE is an agent-based modelling environment, where the agents are investors and consumers. In MUSE, this means that investment decisions are made from the point of view of the investor and consumer. These agents can be heterogenous, enabling for differering investment strategies between agents, as in the real world. +MUSE is an agent-based modelling environment, where the agents are investors and consumers. In MUSE, this means that investment decisions are made from the point of view of the investor and consumer. These agents can be heterogeneous, enabling for differering investment strategies between agents, as in the real world. MUSE is technology rich and can model energy production, conversion and end-use technologies. So, for example, MUSE can enable the user to develop a power sector with solar photovoltaics, wind turbines and gas power plants which produce energy for appliances like electric stoves, heaters and lighting in the residential sector. Agents invest within these sectors, investing in technologies such as electric stoves in the residential sector or gas power plants in the power sectors. The investments made depend on the agent's investment strategies. -Every sector is a user configurable module. This means that a user can configure any number of sectors, cointaining custom, user-defined technologies and commodities. MUSE is fully data-driven, meaning that the configuration of the model is carried out using a selection of :ref:`input-files`. This means that you are able to customise MUSE to your wishes by modifying these input files. Within a benchmark year, MUSE allows for a user-defined temporal granularity. This allows for the benchmark year to be split into different seasons and times, where energy demand may differ. Thus allowing us to model diurnal peaks in the demand, varying weekly and seasonally. +Every sector is a user configurable module. This means that a user can configure any number of sectors, containing custom, user-defined technologies and commodities. MUSE is fully data-driven, meaning that the configuration of the model is carried out using a selection of :ref:`input-files`. This means that you are able to customise MUSE to your wishes by modifying these input files. Within a benchmark year, MUSE allows for a user-defined temporal granularity. This allows for the benchmark year to be split into different seasons and times, where energy demand may differ. Thus allowing us to model diurnal peaks in the demand, varying weekly and seasonally. MUSE is highly configurable, but it has been built with medium and long-term scenarios in mind; for the short-term, MUSE can be linked with more detailed models. As the number of time steps and regions increase, the computational time also increases, which is something to keep in mind when building highly complex models. diff --git a/docs/user-guide/add-gdp-correlation-demand.ipynb b/docs/user-guide/add-gdp-correlation-demand.ipynb index 6fadbb803..f10f528aa 100644 --- a/docs/user-guide/add-gdp-correlation-demand.ipynb +++ b/docs/user-guide/add-gdp-correlation-demand.ipynb @@ -71,7 +71,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Editting the TOML file to include this can be done relatively quickly if we know the variable names.\n", + "Editing the TOML file to include this can be done relatively quickly if we know the variable names.\n", "\n", "In the second bottom section of the toml file, you will see the following section: \n", "\n", diff --git a/docs/user-guide/add-solar.ipynb b/docs/user-guide/add-solar.ipynb index 60b47747d..6249687a3 100644 --- a/docs/user-guide/add-solar.ipynb +++ b/docs/user-guide/add-solar.ipynb @@ -146,7 +146,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, the ```technodata.csv``` containts parametrisation data for the technology, such as the cost, growth constraints, lifetime of the power plant and fuel used. The technodata file is too long for it all to be displayed here, so we will truncate the full version." + "Finally, the ```technodata.csv``` contains parametrisation data for the technology, such as the cost, growth constraints, lifetime of the power plant and fuel used. The technodata file is too long for it all to be displayed here, so we will truncate the full version." ] }, { diff --git a/docs/user-guide/min-max-timeslice-constraints.ipynb b/docs/user-guide/min-max-timeslice-constraints.ipynb index a1a48ca43..613a94517 100644 --- a/docs/user-guide/min-max-timeslice-constraints.ipynb +++ b/docs/user-guide/min-max-timeslice-constraints.ipynb @@ -22,7 +22,7 @@ "source": [ "## Minimum timeslice\n", "\n", - "In this tutorial we will be amending the default example, which you can find [here](https://github.com/SGIModel/MUSE_OS/tree/main/src/muse/data/example/default). Firstly, we will be imposing a minimium service factor for gasCCGT in the power sector. This is the minimum that a technology can output per timeslice. \n", + "In this tutorial we will be amending the default example, which you can find [here](https://github.com/SGIModel/MUSE_OS/tree/main/src/muse/data/example/default). Firstly, we will be imposing a minimum service factor for gasCCGT in the power sector. This is the minimum that a technology can output per timeslice. \n", "\n", "To do this, we will need to create a new `csv` file that specifies the minimum service factor per timeslice.\n", "\n", diff --git a/docs/user-guide/modify-timing-data.ipynb b/docs/user-guide/modify-timing-data.ipynb index bdfb8984c..e0f7bd784 100644 --- a/docs/user-guide/modify-timing-data.ipynb +++ b/docs/user-guide/modify-timing-data.ipynb @@ -185,7 +185,7 @@ "|gasboiler|R2|2020|…|**60**|**0.5**|**120**|…|0|\n", "|heatpump|R2|2020|…|**60**|**0.5**|**120**|…|0|\n", "\n", - "It must be noted, that this is a toy example. For modelling a real life scenario, data should be sought to ensure that these constriants remain realistic.\n", + "It must be noted, that this is a toy example. For modelling a real life scenario, data should be sought to ensure that these constraints remain realistic.\n", "\n", "For the full power sector ```technodata.csv``` file click [here](https://github.com/SGIModel/MUSE_OS/blob/main/docs/tutorial-code/4-modify-timing-data/2-modify-time-framework/technodata/power/Technodata.csv), and for the full residential sector ```technodata.csv``` file click [here](https://github.com/SGIModel/MUSE_OS/blob/main/docs/tutorial-code/4-modify-timing-data/2-modify-time-framework/technodata/residential/Technodata.csv).\n", "\n", diff --git a/src/muse/agents/agent.py b/src/muse/agents/agent.py index 21f1ffc8c..4a2a545f8 100644 --- a/src/muse/agents/agent.py +++ b/src/muse/agents/agent.py @@ -126,14 +126,14 @@ def __init__( region: Region where the agent operates, used for cross-referencing external tables. search_rules: method used to filter the search space - maturity_threshhold: threshhold when filtering replacement + maturity_threshhold: threshold when filtering replacement technologies with respect to market share year: year the agent is created / current year forecast: Number of years the agent will forecast housekeeping: transform applied to the assets at the start of iteration. Defaults to doing nothing. merge_transform: transform merging current and newly invested assets - together. Defaults to replacing old assets completly. + together. Defaults to replacing old assets completely. demand_threshhold: criteria below which the demand is zero. category: optional attribute that could be used to classify different agents together. @@ -155,7 +155,7 @@ def __init__( self.year = year """ Current year. - The year is incremented by one everytime next is called. + The year is incremented by one every time next is called. """ self.forecast = forecast """Number of years to look into the future for forecating purposed.""" @@ -170,9 +170,9 @@ def __init__( used to filter the search space. """ self.maturity_threshhold = maturity_threshhold - """ Market share threshhold. + """ Market share threshold. - Threshhold when and if filtering replacement technologies with respect + Threshold when and if filtering replacement technologies with respect to market share. """ if kwargs is not None: @@ -189,7 +189,7 @@ def __init__( if housekeeping is None: housekeeping = housekeeping_factory() self._housekeeping = housekeeping - """Tranforms applied on the assets at the start of each iteration. + """Transforms applied on the assets at the start of each iteration. It could mean keeping the assets as are, or removing assets with no capacity in the current year and beyond, etc... @@ -199,20 +199,20 @@ def __init__( if merge_transform is None: merge_transform = asset_merge_factory() self.merge_transform = merge_transform - """Tranforms applied on the old and new assets. + """Transforms applied on the old and new assets. It could mean using only the new assets, or merging old and new, etc... It can be any function registered with :py:func:`~muse.hooks.register_final_asset_transform`. """ self.demand_threshhold = demand_threshhold - """Threshhold below which the demand share is zero. + """Threshold below which the demand share is zero. This criteria avoids fulfilling demand for very small values. If None, then the criteria is not applied. """ self.asset_threshhold = asset_threshhold - """Threshhold below which assets are not added.""" + """Threshold below which assets are not added.""" @property def forecast_year(self): diff --git a/src/muse/carbon_budget.py b/src/muse/carbon_budget.py index 4403db871..3f647af9a 100644 --- a/src/muse/carbon_budget.py +++ b/src/muse/carbon_budget.py @@ -44,7 +44,7 @@ def update_carbon_budget( under: bool = True, ) -> float: """Adjust the carbon budget in the far future if emissions too high or low. - This feature can allow to simulate overshoot shifing. + This feature can allow to simulate overshoot shifting. Arguments: carbon_budget: budget for future year, emissions: emission for future year, @@ -498,7 +498,7 @@ def min_max_bisect( threshold: float, ): """Refines bisection algorithm to escalate carbon price and meet the budget. - As emissions can be a discontinuous fucntion of the carbon price, + As emissions can be a discontinuous function of the carbon price, this method is used to improve the solution search when discountinuities are met, improving the bounds search. diff --git a/src/muse/commodities.py b/src/muse/commodities.py index 162d4f7aa..c0fa8ecb7 100644 --- a/src/muse/commodities.py +++ b/src/muse/commodities.py @@ -12,7 +12,7 @@ class CommodityUsage(IntFlag): For details on how ``enum``'s work, see `python's documentation`__. In practice, :py:class:`CommodityUsage` centralizes in one place the different kinds of - commodities that are meaningfull to the generalized sector, e.g. commodities that + commodities that are meaningful to the generalized sector, e.g. commodities that are consumed by the sector, and commodities that produced by the sectors, as well commodities that are, somehow, *environmental*. diff --git a/src/muse/constraints.py b/src/muse/constraints.py index dc901fb2b..4f4ba2b8d 100644 --- a/src/muse/constraints.py +++ b/src/muse/constraints.py @@ -49,7 +49,7 @@ reduction or matrix multiplication. There are two additional rules. However, they are likely to be the result of an -inefficient defininition of :math:`A_c`, :math:`A_p` and :math:`b`. +inefficient definition of :math:`A_c`, :math:`A_p` and :math:`b`. - Any dimension in :math:`A_c` (:math:`A_b`) that is neither in :math:`b` nor in :math:`x_c` (:math:`x_p`) is reduced by summation before consideration for the @@ -139,7 +139,7 @@ class ConstraintKind(Enum): Where :math:`~` is one of :math:`=,\\leq,\\geq`. A constraint should contain a data-array `b` corresponding to right-hand-side vector -of the contraint. It should also contain a data-array `capacity` corresponding to the +of the constraint. It should also contain a data-array `capacity` corresponding to the left-hand-side matrix operator which will be applied to the capacity-related decision variables. It should contain a similar matrix `production` corresponding to the left-hand-side matrix operator which will be applied to the production-related @@ -558,7 +558,7 @@ def lp_costs( Example: - We can now construct example inputs to the funtion from the sample model. The + We can now construct example inputs to the function from the sample model. The costs will be a matrix where each assets has a candidate replacement technology. >>> from muse import examples @@ -897,7 +897,7 @@ class ScipyAdapter: >>> assert constraint.capacity.data == np.array(1) >>> assert len(constraint.capacity.dims) == 0 - And the upperbound is exanded over the replacement technologies, + And the upperbound is expanded over the replacement technologies, but not over the assets. Hence the assets will be summed over in the final constraint: @@ -907,9 +907,9 @@ class ScipyAdapter: As shown above, it does not bind the production decision variables. Hence, production is zero. The matrix operator for the capacity is simply the identity. - Hence it can be inputed as the dimensionless scalar 1. The upper bound is simply - the maximum for replacement technology (and region, if that particular dimension - exists in the problem). + Hence it can be inputted as the dimensionless scalar 1. The upper bound is + simply the maximum for replacement technology (and region, if that particular + dimension exists in the problem). The lp problem then becomes: @@ -1010,7 +1010,7 @@ def kwargs(self): def _unified_dataset( technologies: xr.Dataset, lpcosts: xr.Dataset, *constraints: Constraint ) -> xr.Dataset: - """Creates single xr.Dataset from costs and contraints.""" + """Creates single xr.Dataset from costs and constraints.""" from xarray import merge assert "year" not in technologies.dims diff --git a/src/muse/decisions.py b/src/muse/decisions.py index 8017d89aa..307974a5f 100644 --- a/src/muse/decisions.py +++ b/src/muse/decisions.py @@ -12,7 +12,7 @@ def weighted_sum(objectives: Dataset, parameters: Any, **kwargs) -> DataArray: Arguments: objectives: An dataset where each array is a separate objective - parameters: parameters, such as weigths, whether to minimize or maximize, the names + parameters: parameters, such as weights, whether to minimize or maximize, the names of objectives to consider, etc. kwargs: Extra input parameters. These parameters are expected to be set from the input file. @@ -183,7 +183,7 @@ def lexical_comparison( Finally, the objectives are ranked lexographically, in the order given by the parameters. - The result is an array of tuples which can subsquently be compared + The result is an array of tuples which can subsequently be compared lexicographically. """ from muse.utilities import lexical_comparison @@ -211,7 +211,7 @@ def retro_lexical_comparison( largest constraint. Finally, the objectives are ranked lexographically, in the order given by the parameters. - The result is an array of tuples which can subsquently be compared + The result is an array of tuples which can subsequently be compared lexicographically. """ from muse.utilities import lexical_comparison @@ -252,7 +252,7 @@ def epsilon_constraints( r"""Minimizes first objective subject to constraints on other objectives. The parameters are a sequence of tuples `(name, minimize, epsilon)`, where - `name` is the name of the objective, `minimze` is `True` if minimizing and + `name` is the name of the objective, `minimize` is `True` if minimizing and false if maximizing that objective, and `epsilon` is the constraint. The first objective is the one that will be minimized according to: diff --git a/src/muse/demand_matching.py b/src/muse/demand_matching.py index d871f46b4..e4793655f 100644 --- a/src/muse/demand_matching.py +++ b/src/muse/demand_matching.py @@ -18,7 +18,7 @@ The basic algorithm proceeds as follows: -#. sort all costs :math:`C_{d, i}` accross both :math:`d` and :math:`i` +#. sort all costs :math:`C_{d, i}` across both :math:`d` and :math:`i` #. for each cost :math:`c_0` in order: @@ -57,7 +57,7 @@ def demand_matching( *constraints: DataArray, protected_dims: Optional[Set] = None, ) -> DataArray: - r"""Demand matching over heterogenous dimensions. + r"""Demand matching over heterogeneous dimensions. This algorithm enables demand matching while enforcing constraints on how much an asset can produce. Any set of dimensions can be matched. The algorithm is general @@ -86,7 +86,7 @@ def demand_matching( constraint. Hence, the solution will depend on the order in which the constraints are given. - #. sort all costs :math:`C_{d, m}` accross both :math:`d` and :math:`m` + #. sort all costs :math:`C_{d, m}` across both :math:`d` and :math:`m` #. for each cost :math:`c_0` in order: @@ -142,7 +142,7 @@ def demand_matching( #. Set :math:`\delta X = \max(0, \delta X - \delta X\prime)` - A more complex problem would see independant dimensions for each quantity. In that, + A more complex problem would see independent dimensions for each quantity. In that, case we can reduce to the original problem as shown here .. math:: @@ -158,17 +158,17 @@ def demand_matching( \frac{M\prime_{r, m}}{M_r} \frac{D\prime_{d, d\prime}}{D_d} X_{d, i} A dimension could be shared by all quantities, in which case each point along that - dimension is treated as independant. + dimension is treated as independent. Similarly, if a dimension is shared only by the demand and a constraint but not by - the cost, then the problem can be reduced a set of problems independant along that + the cost, then the problem can be reduced a set of problems independent along that direction. Arguments: demand: Demand to match with production. It should have the same physical units as `max_production`. - cost: Cost to minimize while fulfiling the demand. - *constraints: each item is a seperate constraint :math:`M_r`. + cost: Cost to minimize while fulfilling the demand. + *constraints: each item is a separate constraint :math:`M_r`. Returns: An array with the joint dimensionality of `max_production`, `cost`, and diff --git a/src/muse/demand_share.py b/src/muse/demand_share.py index 6a68fd267..0b99c977f 100644 --- a/src/muse/demand_share.py +++ b/src/muse/demand_share.py @@ -535,7 +535,7 @@ def new_consumption( ) -> xr.DataArray: r"""Computes share of the demand attributed to new agents. - The new agents service the demand that can be attributed specificaly to growth and + The new agents service the demand that can be attributed specifically to growth and that cannot be serviced by existing assets. In other words: .. math:: diff --git a/src/muse/errors.py b/src/muse/errors.py index 867145102..2e9ce4221 100644 --- a/src/muse/errors.py +++ b/src/muse/errors.py @@ -17,7 +17,7 @@ class UnitsConflictInCommodities(Exception): """Indicates that there is a conflcit in the commodity units between files.""" msg = """The units of “CommIn” “CommOut” and “GlobalCommodities” files must be the -same, including the casing. Check the consistency of the units across those thre files. +same, including the casing. Check the consistency of the units across those three files. """ def __str__(self): @@ -71,7 +71,7 @@ def __str__(self): class AgentWithNoAssetsInDemandShare(Exception): msg = """This error refers to an agent with no assets. To fix this error, check the -capacity assigment to the agents. One possibility is that you have decided not +capacity assignment to the agents. One possibility is that you have decided not to use "Retrofit" agents, as such you may have already removed them from the agent definition file and the file of technodata, the system TOML file should change the demand_share to "standard_demand" function in each subsector @@ -84,7 +84,7 @@ def __str__(self): class NoInteractionsFound(Exception): msg = """A network with no interactions has been found. This might be the case if there are no retrofit agents and yet a 'new_to_retro' network has been defined for a -particular sector. Asses the existance of both new and retrofit agents for all sectors +particular sector. Asses the existence of both new and retrofit agents for all sectors and remove the new_to_retro interacton network if it is not needed """ def __str__(self): diff --git a/src/muse/examples.py b/src/muse/examples.py index bd0f1f564..e52a7223b 100644 --- a/src/muse/examples.py +++ b/src/muse/examples.py @@ -16,7 +16,7 @@ python -m muse --help -The same models can be instanciated in a python script as follows: +The same models can be instantiated in a python script as follows: .. code-block:: Python diff --git a/src/muse/hooks.py b/src/muse/hooks.py index cf88b2635..eaca96c7f 100644 --- a/src/muse/hooks.py +++ b/src/muse/hooks.py @@ -19,9 +19,9 @@ from muse.registration import registrator INITIAL_ASSET_TRANSFORM: MutableMapping[Text, Callable] = {} -""" Tranform at the start of each step. """ +""" Transform at the start of each step. """ FINAL_ASSET_TRANSFORM: MutableMapping[Text, Callable] = {} -""" Tranform at the end of each step, including new assets. """ +""" Transform at the end of each step, including new assets. """ def housekeeping_factory(settings: Union[Text, Mapping] = "noop") -> Callable: @@ -149,7 +149,7 @@ def old_assets_only(old_assets: Dataset, new_assets: Dataset) -> Dataset: def merge_assets(old_assets: Dataset, new_assets: Dataset) -> Dataset: """Adds new assets to old along asset dimension. - New assets are assumed to be unequivalent to any old_assets. Indeed, + New assets are assumed to be nonequivalent to any old_assets. Indeed, it is expected that the asset dimension does not have coordinates (i.e. it is a combination of coordinates, such as technology and installation year). diff --git a/src/muse/investments.py b/src/muse/investments.py index af110928a..5970d8a19 100644 --- a/src/muse/investments.py +++ b/src/muse/investments.py @@ -195,7 +195,7 @@ def cliff_retirement_profile( rewritten as ``technical_life * n`` with ``n = int(protected // technical_life) + 1``. - We could just return an array where each year is repesented. Instead, to save + We could just return an array where each year is represented. Instead, to save memory, we return a compact view of the same where years where no change happens are removed. diff --git a/src/muse/mca.py b/src/muse/mca.py index ae742b909..0f6d862b0 100644 --- a/src/muse/mca.py +++ b/src/muse/mca.py @@ -368,7 +368,7 @@ def run(self) -> None: ) def calibrate_legacy_sectors(self): - """Run a calibration step in the lagacy sectors + """Run a calibration step in the legacy sectors Run historical years """ from copy import deepcopy diff --git a/src/muse/objectives.py b/src/muse/objectives.py index 729a167fd..cd9acee5d 100644 --- a/src/muse/objectives.py +++ b/src/muse/objectives.py @@ -481,7 +481,7 @@ def annual_levelized_cost_of_energy( """Annual cost of energy (LCOE) of technologies - not dependent on production. It needs to be used for trade agents where the actual service is unknown - It follows the `simpified LCOE` given by NREL. + It follows the `simplified LCOE` given by NREL. Arguments: agent: The agent of interest @@ -542,7 +542,7 @@ def lifetime_levelized_cost_of_energy( ): """Levelized cost of energy (LCOE) of technologies over their lifetime. - It follows the `simpified LCOE` given by NREL. + It follows the `simplified LCOE` given by NREL. Arguments: agent: The agent of interest diff --git a/src/muse/outputs/cache.py b/src/muse/outputs/cache.py index 291798175..f657f09a3 100644 --- a/src/muse/outputs/cache.py +++ b/src/muse/outputs/cache.py @@ -15,7 +15,7 @@ cache_quantity(quantity_name=some_data) If the quantity has been set as something to cache, the data will be stored and, -eventually, save to disk after - possibly - agregating the data and removing those +eventually, save to disk after - possibly - aggregating the data and removing those entries corresponding to non-convergent investment attempts. This process of cleaning and aggregation is quantity specific. @@ -86,7 +86,7 @@ def cache_quantity( must be set, or directly called with any number of keyword arguments. In the former case, the matching between quantities and values to cached is done by the function 'match_quantities'. When used in combination with other decorators, care must be - taken to decide the order in which they are applied to make sure the approrpriate + taken to decide the order in which they are applied to make sure the appropriate output is cached. Note that if the quantity has NOT been selected to be cached when configuring the diff --git a/src/muse/production.py b/src/muse/production.py index 62710316f..109647cd5 100644 --- a/src/muse/production.py +++ b/src/muse/production.py @@ -1,7 +1,7 @@ """Various ways and means to compute production. Production is the amount of commodities produced by an asset. However, depending on the -context, it could be computed several ways. For instace, it can be obtained straight +context, it could be computed several ways. For instance, it can be obtained straight from the capacity of the asset. Or it can be obtained by matching for the same commodities with a set of assets. @@ -102,7 +102,7 @@ def maximum_production( ) -> xr.DataArray: """Production when running at full capacity. - *Full capacity* is limited by the utilitization factor. For more details, see + *Full capacity* is limited by the utilization factor. For more details, see :py:func:`muse.quantities.maximum_production`. """ from muse.quantities import maximum_production @@ -168,7 +168,7 @@ def costed_production( The assets are ranked according to their cost. The cost can be provided as an xarray, a callable creating an xarray, or as "alcoe". The asset with least cost are allowed to service the demand first, up to the maximum production. By default, the - mininum service is applied first. + minimum service is applied first. """ from muse.commodities import CommodityUsage, check_usage, is_pollutant diff --git a/src/muse/quantities.py b/src/muse/quantities.py index 843dbf7cc..dda878eae 100644 --- a/src/muse/quantities.py +++ b/src/muse/quantities.py @@ -439,7 +439,7 @@ def maximum_production(technologies: xr.Dataset, capacity: xr.DataArray, **filte shape is matched to `capacity` using `muse.utilities.broadcast_techs`. filters: keyword arguments are used to filter down the capacity and technologies. Filters not relevant to the quantities of interest, i.e. - filters that are not a dimension of `capacity` or `techologies`, are + filters that are not a dimension of `capacity` or `technologies`, are silently ignored. Return: `capacity * fixed_outputs * utilization_factor`, whittled down according to the @@ -511,7 +511,7 @@ def capacity_in_use( None, then no reduction is performed. filters: keyword arguments are used to filter down the capacity and technologies. Filters not relevant to the quantities of interest, i.e. - filters that are not a dimension of `capacity` or `techologies`, are + filters that are not a dimension of `capacity` or `technologies`, are silently ignored. Return: Capacity-in-use for each technology, whittled down by the filters. @@ -583,7 +583,7 @@ def costed_production( ) -> xr.DataArray: """Computes production from ranked assets. The assets are ranked according to their cost. The asset with least cost are allowed - to service the demand first, up to the maximum production. By default, the mininum + to service the demand first, up to the maximum production. By default, the minimum service is applied first. """ diff --git a/src/muse/readers/csv.py b/src/muse/readers/csv.py index b53348d40..f30f899d6 100644 --- a/src/muse/readers/csv.py +++ b/src/muse/readers/csv.py @@ -156,7 +156,7 @@ def read_technodata_timeslices(filename: Union[Text, Path]) -> xr.Dataset: def read_io_technodata(filename: Union[Text, Path]) -> xr.Dataset: - """Reads process inputs or ouputs. + """Reads process inputs or outputs. There are four axes: (technology, region, year, commodity) """ @@ -452,7 +452,7 @@ def read_timeslice_shares( ) -> xr.Dataset: """Reads sliceshare information into a xr.Dataset. - Additionaly, this function will try and recover the timeslice multi- index from a + Additionally, this function will try and recover the timeslice multi- index from a import file "Timeslices{sector}.csv" in the same directory as the timeslice shares. Pass `None` if this behaviour is not required. """ @@ -713,7 +713,7 @@ def read_regression_parameters(path: Union[Text, Path]) -> xr.Dataset: getLogger(__name__).info(f"Reading regression parameters from {path}.") table = pd.read_csv(path, float_precision="high", low_memory=False) - # Normalize clumn names + # Normalize column names table.columns.name = "commodity" table = table.rename( columns={ diff --git a/src/muse/readers/toml.py b/src/muse/readers/toml.py index 1ac7fc526..f3273981a 100644 --- a/src/muse/readers/toml.py +++ b/src/muse/readers/toml.py @@ -117,7 +117,7 @@ def format_paths( ): """Format paths passed to settings. - A setting is recongnized as a path if it's name ends in `_path`, `_file`, or `_dir`, + A setting is recognized as a path if it's name ends in `_path`, `_file`, or `_dir`, or if the associated value is text object and ends with `.csv, as well as settings called `path`. @@ -216,7 +216,7 @@ def format_paths( } def format(path: Text) -> Text: - if path.lower() in ("optional", "reqired"): + if path.lower() in ("optional", "required"): return path return format_path(path, **patterns) # type: ignore @@ -412,7 +412,7 @@ def read_ts_multiindex( The timeslices are read from ``timeslice_levels``. The levels (keyword) and slice (list of values) correspond to the level, slices and slice aggregates - defined inthe the ``timeslices`` section. + defined in the the ``timeslices`` section. >>> toml = \"\"\" ... ["timeslices"] @@ -505,12 +505,12 @@ def read_timeslices( settings: TOML dictionary. It should contain a ``timeslice_levels`` section. Otherwise, the timeslices will default to the global (finest) timeslices. timeslice: Finest timeslices. Defaults to the global in - :py:mod:`~muse.timeslices`. If using the default, then this funtion + :py:mod:`~muse.timeslices`. If using the default, then this function should be called *after* the timeslice module has been setup with a call to :py:func:`~muse.timeslice.setup_module`. transforms: Transforms from desired timeslices to the finest timeslice. Defaults to the global in :py:mod:`~muse.timeslices`. If using the default, - then this funtion should be called *after* the timeslice module has been + then this function should be called *after* the timeslice module has been setup with a call to :py:func:`~muse.timeslice.setup_module`. Returns: A xr.Dataset with the timeslice coordinates. diff --git a/src/muse/registration.py b/src/muse/registration.py index 4b62f7268..540626b22 100644 --- a/src/muse/registration.py +++ b/src/muse/registration.py @@ -55,7 +55,7 @@ def registrator( this function) will emit a standardized log-call. Example: - At it's simplest, creating a registrator and registrating happens by + At it's simplest, creating a registrator and registering happens by first declaring a registry. >>> REGISTRY = {} diff --git a/src/muse/regressions.py b/src/muse/regressions.py index a60e59ee2..ef7ff756a 100644 --- a/src/muse/regressions.py +++ b/src/muse/regressions.py @@ -60,12 +60,12 @@ class Regression(Callable): """ Maps from input names to coefficient names Maps the coefficients names in the class to their names in the input data - tables. This class attribute must be overriden. + tables. This class attribute must be overridden. """ __regression__ = "" """ Name of the regression function. - This class attribute must be overriden. + This class attribute must be overridden. """ def __init__(self, interpolation: Text = "linear", base_year: int = 2010, **kwargs): @@ -250,7 +250,7 @@ def regression_functor( mappings: a dictionary mapping from the functions expected coefficients (e.g. a, b, c) to the name in the input csv data tables (.e.g. constant, GDPexp, GDPscale). - name: name by which the function is refered to in the input data table. + name: name by which the function is referred to in the input data table. """ from logging import getLogger diff --git a/src/muse/sectors/__init__.py b/src/muse/sectors/__init__.py index 499e11bae..94370517b 100644 --- a/src/muse/sectors/__init__.py +++ b/src/muse/sectors/__init__.py @@ -23,7 +23,7 @@ - :meth:`AbstractSector.factory`: Creates a sector from input data - :meth:`AbstractSector.next`: A function which takes a market (demand, supply, prices) and returns a market. What happens within could be anything, though it will - likely constists of dispatch and investment. + likely consists of dispatch and investment. New sectors can be registered with the MUSE input files using :func:`muse.sectors.register.register_sector`. diff --git a/src/muse/sectors/legacy_sector.py b/src/muse/sectors/legacy_sector.py index 2a859ba59..421dcc33f 100644 --- a/src/muse/sectors/legacy_sector.py +++ b/src/muse/sectors/legacy_sector.py @@ -257,20 +257,20 @@ def runprocessmodule(self, consumption, supplycost, supply, t): self.mode, ] - inouts = {"output_dir": self.output_dir, "sectors_dir": self.sectors_dir} + inputs = {"output_dir": self.output_dir, "sectors_dir": self.sectors_dir} if self.name == "Power": if self.mode == "Calibration": params += [self.market_iterative] - result = self.old_sector.power_calibration(*params, **inouts) + result = self.old_sector.power_calibration(*params, **inputs) self.mode = "Iteration" else: self.mode = "Iteration" params += [self.old_sector.instance, self.market_iterative, self.excess] - result = self.old_sector.runprocessmodule(*params, **inouts) + result = self.old_sector.runprocessmodule(*params, **inputs) else: params += [self.market_iterative, self.excess] - result = self.old_sector.runprocessmodule(*params, **inouts) + result = self.old_sector.runprocessmodule(*params, **inputs) self.old_sector.report(result, t[1], self.output_dir) @@ -427,7 +427,7 @@ def xarray_to_ndarray( def commodities_idx(sector, comm: Text) -> Sequence: - """Gets the indeces of the commodities involved in the processes of the + """Gets the indices of the commodities involved in the processes of the sector. Arguments: diff --git a/src/muse/sectors/sector.py b/src/muse/sectors/sector.py index e8be5df80..cb544ed62 100644 --- a/src/muse/sectors/sector.py +++ b/src/muse/sectors/sector.py @@ -148,7 +148,7 @@ def __init__( self.outputs: Callable = ( cast(Callable, ofactory()) if outputs is None else outputs ) - """A function for outputing data for post-mortem analysis.""" + """A function for outputting data for post-mortem analysis.""" self.supply_prod = ( supply_prod if supply_prod is not None else maximum_production ) diff --git a/src/muse/sectors/subsector.py b/src/muse/sectors/subsector.py index f0ba40571..dd088e044 100644 --- a/src/muse/sectors/subsector.py +++ b/src/muse/sectors/subsector.py @@ -46,7 +46,7 @@ def __init__( self.forecast = forecast self.name = name self.expand_market_prices = expand_market_prices - """Wether to expand prices to include destination region. + """Whether to expand prices to include destination region. If ``True``, the input market prices are expanded of the missing "dst_region" dimension by setting them to the maximum between the source and destination diff --git a/src/muse/timeslices.py b/src/muse/timeslices.py index f470f38fc..bae01ea11 100644 --- a/src/muse/timeslices.py +++ b/src/muse/timeslices.py @@ -150,7 +150,7 @@ def aggregate_transforms( settings: Optional[Union[Mapping, Text]] = None, timeslice: Optional[DataArray] = None, ) -> Dict[Tuple, ndarray]: - """Creates dictionay of transforms for aggregate levels. + """Creates dictionary of transforms for aggregate levels. The transforms are used to create the projectors towards the finest timeslice. diff --git a/src/muse/utilities.py b/src/muse/utilities.py index 1561a7de9..f1f1278b5 100644 --- a/src/muse/utilities.py +++ b/src/muse/utilities.py @@ -60,7 +60,7 @@ def reduce_assets( ) -> Union[xr.DataArray, xr.Dataset]: r"""Combine assets along given asset dimension. - This method simplifies combining assets accross multiple agents, or combining assets + This method simplifies combining assets across multiple agents, or combining assets across a given dimension. By default, it will sum together assets from the same region which have the same technology and the same installation date. In other words, assets are identified by the technology, installation year and region. The @@ -193,10 +193,10 @@ def broadcast_techs( capacity, are often flattened out with coordinates 'region', 'installed', and 'technology' represented in a single 'asset' dimension. This latter representation is sparse if not all combinations of 'region', 'installed', - and 'technology' are present, whereas the former represention makes it + and 'technology' are present, whereas the former representation makes it easier to select a subset of the same. - This function broadcast the first represention to the shape and coordinates + This function broadcast the first representation to the shape and coordinates of the second. Arguments: @@ -370,7 +370,7 @@ def lexical_comparison( no turning to integer.) Result: - An array of tuples which can subsquently be compared lexicographically. + An array of tuples which can subsequently be compared lexicographically. """ if order is None: order = [u for u in binsize.data_vars] @@ -466,7 +466,7 @@ def nametuple_to_dict(nametup: Union[Mapping, NamedTuple]) -> Mapping: def future_propagation( data: xr.DataArray, future: xr.DataArray, - threshhold: float = 1e-12, + threshold: float = 1e-12, dim: Text = "year", ) -> xr.DataArray: """Propagates values into the future. @@ -491,10 +491,10 @@ def future_propagation( ... ) This function propagates into ``data`` values from ``future``, but only if those - values differed for the current year beyond a given threshhold: + values differed for the current year beyond a given threshold: >>> from muse.utilities import future_propagation - >>> future_propagation(data, future, threshhold=0.1) + >>> future_propagation(data, future, threshold=0.1) array([[ 0. , 1.2, 1.2, 1.2], [-5. , -4. , -3. , -2. ]]) @@ -502,20 +502,20 @@ def future_propagation( * year (year) ... 2020 2025 2030 2035 * fuel (fuel) >> future_propagation(data, future.sel(fuel="gas", drop=True), threshhold=0.1) + >>> future_propagation(data, future.sel(fuel="gas", drop=True), threshold=0.1) array([[ 0. , 1.2, 1.2, 1.2], [-5. , 1.2, 1.2, 1.2]]) Coordinates: * year (year) ... 2020 2025 2030 2035 * fuel (fuel) >> future_propagation(data, future.sel(fuel="coal", drop=True), threshhold=0.1) + >>> future_propagation(data, future.sel(fuel="coal", drop=True), threshold=0.1) array([[ 0. , -3.95, -3.95, -3.95], [-5. , -4. , -3. , -2. ]]) @@ -535,7 +535,7 @@ def future_propagation( year = future[dim].values return data.where( np.logical_or( - data.year < year, np.abs(data.loc[{dim: year}] - future) < threshhold + data.year < year, np.abs(data.loc[{dim: year}] - future) < threshold ), future, ) diff --git a/tests/conftest.py b/tests/conftest.py index 6e3ceedbb..77a6c3edd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -123,14 +123,14 @@ def pytest_collection_modifyitems(config, items): try: __import__("SGIModelData") except ImportError: - skip_sgi_data = mark.skip(reason="Test reqires private data") + skip_sgi_data = mark.skip(reason="Test requires private data") for item in items: if "sgidata" in item.keywords: item.add_marker(skip_sgi_data) try: __import__("muse_legacy") except ImportError: - skip_legacy = mark.skip(reason="Test reqires legacy code") + skip_legacy = mark.skip(reason="Test requires legacy code") for item in items: if "legacy" in item.keywords: item.add_marker(skip_legacy) diff --git a/tests/test_aggregoutput.py b/tests/test_aggregoutput.py index 7771d7584..b0e85b3d8 100644 --- a/tests/test_aggregoutput.py +++ b/tests/test_aggregoutput.py @@ -3,7 +3,7 @@ def test_aggregate_sector(): - """Test for aggregate_sector function check colum titles, number of + """Test for aggregate_sector function check column titles, number of agents/region/technologies and assets capacities.""" from pandas import DataFrame, concat @@ -72,8 +72,8 @@ def test_aggregate_sectors(): def test_aggregate_sector_manyregions(): - """Test for aggregate_sector function with two regions check colum titles, number of - agents/region/technologies and assets capacities.""" + """Test for aggregate_sector function with two regions check column titles, number + of agents/region/technologies and assets capacities.""" from muse.outputs.mca import _aggregate_sectors from pandas import DataFrame, concat diff --git a/tests/test_decisions.py b/tests/test_decisions.py index 9a024b854..fae36c25b 100644 --- a/tests/test_decisions.py +++ b/tests/test_decisions.py @@ -143,7 +143,7 @@ def test_single_objectives(objectives): assert actual.values == approx(-objectives.b.values) -# when developping/debugging, these few lines help setup the input for the +# when developing/debugging, these few lines help setup the input for the # different tests if __name__ == "main": # fmt: off diff --git a/tests/test_demand_share.py b/tests/test_demand_share.py index 3686dd8bf..642c68798 100644 --- a/tests/test_demand_share.py +++ b/tests/test_demand_share.py @@ -370,7 +370,7 @@ class Agent: assert set(result.dims) == set(market.consumption.dims) - {"year"} assert result.values == approx(0) - # Then try too litte capacity + # Then try too little capacity agents = [ Agent(0.5 * usa_stock.squeeze("region")), Agent(0.5 * asia_stock.squeeze("region")), diff --git a/tests/test_mca.py b/tests/test_mca.py index 8f69177c6..b82fac82c 100644 --- a/tests/test_mca.py +++ b/tests/test_mca.py @@ -28,7 +28,7 @@ def test_check_equilibrium(market: Dataset): def test_check_demand_fulfillment(market): - """Test for the demand fulfilment function of the MCA.""" + """Test for the demand fulfillment function of the MCA.""" from muse.mca import check_demand_fulfillment tolerance_unmet_demand = -0.1 diff --git a/tests/test_outputs.py b/tests/test_outputs.py index 8f76a7864..4c743a99b 100644 --- a/tests/test_outputs.py +++ b/tests/test_outputs.py @@ -572,7 +572,7 @@ def test_aggregate_cache(): from muse.outputs.cache import _aggregate_cache from pandas.testing import assert_frame_equal - quantity = "heigth" + quantity = "height" a = xr.DataArray(np.ones((3, 4, 5)), name=quantity) b = a.copy() @@ -605,7 +605,7 @@ def test_consolidate_quantity(newcapa_agent, retro_agent): sector = SimpleNamespace(name="IT", agents=[newcapa_agent, retro_agent]) agents = extract_agents_internal(sector) - quantity = "heigth" + quantity = "height" a = xr.DataArray( np.ones((3, 4, 5)), dims=("agent", "replacement", "asset"),