Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/input.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ pub mod agent;
pub use agent::read_agents;
pub mod commodity;
pub use commodity::read_commodities;
pub mod process;
pub use process::read_processes;
pub mod region;
pub use region::read_regions;

Expand Down
96 changes: 96 additions & 0 deletions src/input/process.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
//! Code for reading process-related information from CSV files.
use crate::commodity::Commodity;
use crate::input::*;
use crate::process::Process;
use crate::time_slice::TimeSliceInfo;
use anyhow::Result;
use serde::Deserialize;
use std::collections::{HashMap, HashSet};
use std::ops::RangeInclusive;
use std::path::Path;
use std::rc::Rc;
pub mod availability;
use availability::read_process_availabilities;
pub mod flow;
use flow::read_process_flows;
pub mod pac;
use pac::read_process_pacs;
pub mod parameter;
use parameter::read_process_parameters;
pub mod region;
use region::read_process_regions;

const PROCESSES_FILE_NAME: &str = "processes.csv";

macro_rules! define_process_id_getter {
($t:ty) => {
impl HasID for $t {
fn get_id(&self) -> &str {
&self.process_id
}
}
};
}
use define_process_id_getter;

#[derive(PartialEq, Debug, Deserialize)]
struct ProcessDescription {
id: Rc<str>,
description: String,
}
define_id_getter! {ProcessDescription}

/// Read process information from the specified CSV files.
///
/// # Arguments
///
/// * `model_dir` - Folder containing model configuration files
/// * `commodities` - Commodities for the model
/// * `region_ids` - All possible region IDs
/// * `time_slice_info` - Information about seasons and times of day
/// * `year_range` - The possible range of milestone years
///
/// # Returns
///
/// This function returns a map of processes, with the IDs as keys.
pub fn read_processes(
model_dir: &Path,
commodities: &HashMap<Rc<str>, Rc<Commodity>>,
region_ids: &HashSet<Rc<str>>,
time_slice_info: &TimeSliceInfo,
year_range: &RangeInclusive<u32>,
) -> Result<HashMap<Rc<str>, Rc<Process>>> {
let file_path = model_dir.join(PROCESSES_FILE_NAME);
let mut descriptions = read_csv_id_file::<ProcessDescription>(&file_path)?;
let process_ids = HashSet::from_iter(descriptions.keys().cloned());

let mut availabilities = read_process_availabilities(model_dir, &process_ids, time_slice_info)?;
let mut flows = read_process_flows(model_dir, &process_ids, commodities)?;
let mut pacs = read_process_pacs(model_dir, &process_ids, commodities, &flows)?;
let mut parameters = read_process_parameters(model_dir, &process_ids, year_range)?;
let mut regions = read_process_regions(model_dir, &process_ids, region_ids)?;

Ok(process_ids
.into_iter()
.map(|id| {
// We know entry is present
let desc = descriptions.remove(&id).unwrap();

// We've already checked that these exist for each process
let parameter = parameters.remove(&id).unwrap();
let regions = regions.remove(&id).unwrap();

let process = Process {
id: desc.id,
description: desc.description,
availabilities: availabilities.remove(&id).unwrap_or_default(),
flows: flows.remove(&id).unwrap_or_default(),
pacs: pacs.remove(&id).unwrap_or_default(),
parameter,
regions,
};

(id, process.into())
})
.collect())
}
66 changes: 66 additions & 0 deletions src/input/process/availability.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
//! Code for reading process availabilities CSV file
use super::define_process_id_getter;
use crate::input::*;
use crate::process::{LimitType, ProcessAvailability};
use crate::time_slice::TimeSliceInfo;
use anyhow::{ensure, Context, Result};
use itertools::Itertools;
use serde::Deserialize;
use std::collections::{HashMap, HashSet};
use std::path::Path;
use std::rc::Rc;

const PROCESS_AVAILABILITIES_FILE_NAME: &str = "process_availabilities.csv";

define_process_id_getter! {ProcessAvailability}

/// Represents a row of the process availabilities CSV file
#[derive(PartialEq, Debug, Deserialize)]
struct ProcessAvailabilityRaw {
process_id: String,
limit_type: LimitType,
time_slice: String,
#[serde(deserialize_with = "deserialise_proportion_nonzero")]
value: f64,
}

/// Read the availability of each process over time slices
pub fn read_process_availabilities(
model_dir: &Path,
process_ids: &HashSet<Rc<str>>,
time_slice_info: &TimeSliceInfo,
) -> Result<HashMap<Rc<str>, Vec<ProcessAvailability>>> {
let file_path = model_dir.join(PROCESS_AVAILABILITIES_FILE_NAME);
let process_availabilities_csv = read_csv(&file_path)?;
read_process_availabilities_from_iter(process_availabilities_csv, process_ids, time_slice_info)
.with_context(|| input_err_msg(&file_path))
}

fn read_process_availabilities_from_iter<I>(
iter: I,
process_ids: &HashSet<Rc<str>>,
time_slice_info: &TimeSliceInfo,
) -> Result<HashMap<Rc<str>, Vec<ProcessAvailability>>>
where
I: Iterator<Item = ProcessAvailabilityRaw>,
{
let availabilities = iter
.map(|record| -> Result<_> {
let time_slice = time_slice_info.get_selection(&record.time_slice)?;

Ok(ProcessAvailability {
process_id: record.process_id,
limit_type: record.limit_type,
time_slice,
value: record.value,
})
})
.process_results(|iter| iter.into_id_map(process_ids))??;

ensure!(
availabilities.len() >= process_ids.len(),
"Every process must have at least one availability period"
);

Ok(availabilities)
}
193 changes: 193 additions & 0 deletions src/input/process/flow.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
//! Code for reading process flows file
use super::define_process_id_getter;
use crate::commodity::Commodity;
use crate::input::*;
use crate::process::{FlowType, ProcessFlow};
use anyhow::{Context, Result};
use itertools::Itertools;
use serde::Deserialize;
use std::collections::{HashMap, HashSet};
use std::path::Path;
use std::rc::Rc;

const PROCESS_FLOWS_FILE_NAME: &str = "process_flows.csv";

define_process_id_getter! {ProcessFlow}

#[derive(PartialEq, Debug, Deserialize)]
struct ProcessFlowRaw {
process_id: String,
commodity_id: String,
flow: f64,
#[serde(default)]
flow_type: FlowType,
flow_cost: Option<f64>,
}
define_process_id_getter! {ProcessFlowRaw}

/// Read process flows from a CSV file
pub fn read_process_flows(
model_dir: &Path,
process_ids: &HashSet<Rc<str>>,
commodities: &HashMap<Rc<str>, Rc<Commodity>>,
) -> Result<HashMap<Rc<str>, Vec<ProcessFlow>>> {
let file_path = model_dir.join(PROCESS_FLOWS_FILE_NAME);
let process_flow_csv = read_csv(&file_path)?;
read_process_flows_from_iter(process_flow_csv, process_ids, commodities)
.with_context(|| input_err_msg(&file_path))
}

/// Read 'ProcessFlowRaw' records from an iterator and convert them into 'ProcessFlow' records.
fn read_process_flows_from_iter<I>(
iter: I,
process_ids: &HashSet<Rc<str>>,
commodities: &HashMap<Rc<str>, Rc<Commodity>>,
) -> Result<HashMap<Rc<str>, Vec<ProcessFlow>>>
where
I: Iterator<Item = ProcessFlowRaw>,
{
iter.map(|flow_raw| -> Result<ProcessFlow> {
let commodity = commodities
.get(flow_raw.commodity_id.as_str())
.with_context(|| format!("{} is not a valid commodity ID", &flow_raw.commodity_id))?;

Ok(ProcessFlow {
process_id: flow_raw.process_id,
commodity: Rc::clone(commodity),
flow: flow_raw.flow,
flow_type: flow_raw.flow_type,
flow_cost: flow_raw.flow_cost.unwrap_or(0.0),
})
})
.process_results(|iter| iter.into_id_map(process_ids))?
}

#[cfg(test)]
mod test {
use super::*;
use crate::commodity::{CommodityCostMap, CommodityType};
use crate::time_slice::TimeSliceLevel;

#[test]
fn test_read_process_flows_from_iter_good() {
let process_ids = ["id1".into(), "id2".into()].into_iter().collect();
let commodities: HashMap<Rc<str>, Rc<Commodity>> = ["commodity1", "commodity2"]
.into_iter()
.map(|id| {
let commodity = Commodity {
id: id.into(),
description: "Some description".into(),
kind: CommodityType::InputCommodity,
time_slice_level: TimeSliceLevel::Annual,
costs: CommodityCostMap::new(),
demand_by_region: HashMap::new(),
};

(Rc::clone(&commodity.id), commodity.into())
})
.collect();

let flows_raw = [
ProcessFlowRaw {
process_id: "id1".into(),
commodity_id: "commodity1".into(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: Some(1.0),
},
ProcessFlowRaw {
process_id: "id1".into(),
commodity_id: "commodity2".into(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: Some(1.0),
},
ProcessFlowRaw {
process_id: "id2".into(),
commodity_id: "commodity1".into(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: Some(1.0),
},
];

let expected = HashMap::from([
(
"id1".into(),
vec![
ProcessFlow {
process_id: "id1".into(),
commodity: commodities.get("commodity1").unwrap().clone(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: 1.0,
},
ProcessFlow {
process_id: "id1".into(),
commodity: commodities.get("commodity2").unwrap().clone(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: 1.0,
},
],
),
(
"id2".into(),
vec![ProcessFlow {
process_id: "id2".into(),
commodity: commodities.get("commodity1").unwrap().clone(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: 1.0,
}],
),
]);

let actual =
read_process_flows_from_iter(flows_raw.into_iter(), &process_ids, &commodities)
.unwrap();
assert_eq!(expected, actual);
}

#[test]
fn test_read_process_flows_from_iter_bad_commodity_id() {
let process_ids = ["id1".into(), "id2".into()].into_iter().collect();
let commodities = ["commodity1", "commodity2"]
.into_iter()
.map(|id| {
let commodity = Commodity {
id: id.into(),
description: "Some description".into(),
kind: CommodityType::InputCommodity,
time_slice_level: TimeSliceLevel::Annual,
costs: CommodityCostMap::new(),
demand_by_region: HashMap::new(),
};

(Rc::clone(&commodity.id), commodity.into())
})
.collect();

let flows_raw = [
ProcessFlowRaw {
process_id: "id1".into(),
commodity_id: "commodity1".into(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: Some(1.0),
},
ProcessFlowRaw {
process_id: "id1".into(),
commodity_id: "commodity3".into(),
flow: 1.0,
flow_type: FlowType::Fixed,
flow_cost: Some(1.0),
},
];

assert!(
read_process_flows_from_iter(flows_raw.into_iter(), &process_ids, &commodities)
.is_err()
);
}
}
Loading
Loading