Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions packages/ic-error-types/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,8 +334,8 @@ impl UserError {
}
}

pub fn count_bytes(&self) -> usize {
std::mem::size_of_val(self) + self.description.len()
pub fn count_heap_bytes(&self) -> usize {
self.description.len()
}

/// Panics if the error doesn't have the expected code and description.
Expand Down
4 changes: 2 additions & 2 deletions rs/embedders/src/compilation_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ pub struct CompilationCache {
}

impl MemoryDiskBytes for CompilationCache {
fn memory_bytes(&self) -> usize {
self.cache.lock().unwrap().memory_bytes()
fn heap_bytes(&self) -> usize {
self.cache.lock().unwrap().heap_bytes()
}

fn disk_bytes(&self) -> usize {
Expand Down
14 changes: 0 additions & 14 deletions rs/embedders/src/serialized_module.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,16 +82,6 @@ pub struct SerializedModule {
pub is_wasm64: bool,
}

impl MemoryDiskBytes for SerializedModule {
fn memory_bytes(&self) -> usize {
self.bytes.0.len()
}

fn disk_bytes(&self) -> usize {
0
}
}

impl SerializedModule {
pub(crate) fn new(
module: &Module,
Expand Down Expand Up @@ -164,10 +154,6 @@ pub struct OnDiskSerializedModule {
}

impl MemoryDiskBytes for OnDiskSerializedModule {
fn memory_bytes(&self) -> usize {
std::mem::size_of::<Self>()
}

fn disk_bytes(&self) -> usize {
(self.bytes.metadata().unwrap().len() + self.initial_state_data.metadata().unwrap().len())
as usize
Expand Down
15 changes: 14 additions & 1 deletion rs/embedders/src/wasm_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use ic_replicated_state::{
EmbedderCache, NumWasmPages, PageIndex,
};
use ic_sys::{PageBytes, PAGE_SIZE};
use ic_types::{methods::WasmMethod, NumBytes, NumInstructions};
use ic_types::{methods::WasmMethod, MemoryDiskBytes, NumBytes, NumInstructions};
use ic_wasm_types::{BinaryEncodedWasm, WasmInstrumentationError};
use serde::{Deserialize, Serialize};

Expand Down Expand Up @@ -58,10 +58,23 @@ struct Segment {
bytes: Vec<u8>,
}

impl MemoryDiskBytes for Segment {
fn heap_bytes(&self) -> usize {
self.bytes.heap_bytes()
}
}

/// Vector of heap data chunks with their offsets.
#[derive(Clone, Eq, PartialEq, Debug, Default, Deserialize, Serialize)]
pub struct Segments(Vec<Segment>);

impl MemoryDiskBytes for Segments {
fn heap_bytes(&self) -> usize {
// Deep vector size.
self.0.iter().map(|seg| seg.memory_bytes()).sum()
}
}

impl FromIterator<(usize, Vec<u8>)> for Segments {
fn from_iter<T: IntoIterator<Item = (usize, Vec<u8>)>>(iter: T) -> Self {
Segments(
Expand Down
32 changes: 13 additions & 19 deletions rs/execution_environment/src/query_handler/query_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use ic_types::{
};
use ic_utils_lru_cache::LruCache;
use prometheus::{Histogram, IntCounter, IntGauge};
use std::{collections::BTreeMap, mem::size_of_val, sync::Mutex, time::Duration};
use std::{collections::BTreeMap, sync::Mutex, time::Duration};

use crate::metrics::duration_histogram;

Expand Down Expand Up @@ -138,12 +138,8 @@ pub(crate) struct EntryKey {
}

impl MemoryDiskBytes for EntryKey {
fn memory_bytes(&self) -> usize {
size_of_val(self) + self.method_name.len() + self.method_payload.len()
}

fn disk_bytes(&self) -> usize {
0
fn heap_bytes(&self) -> usize {
self.method_name.heap_bytes() + self.method_payload.heap_bytes()
}
}

Expand Down Expand Up @@ -171,6 +167,12 @@ pub(crate) struct EntryEnv {
pub canisters_versions_balances_stats: Vec<(CanisterId, u64, Cycles, QueryStats)>,
}

impl MemoryDiskBytes for EntryEnv {
fn heap_bytes(&self) -> usize {
self.canisters_versions_balances_stats.heap_bytes()
}
}

impl EntryEnv {
// Capture a state (canister version and balance) of the evaluated canisters.
fn try_new(
Expand Down Expand Up @@ -210,12 +212,8 @@ pub(crate) struct EntryValue {
}

impl MemoryDiskBytes for EntryValue {
fn memory_bytes(&self) -> usize {
size_of_val(self) + self.result.memory_bytes()
}

fn disk_bytes(&self) -> usize {
0
fn heap_bytes(&self) -> usize {
self.env.heap_bytes() + self.result.heap_bytes()
}
}

Expand Down Expand Up @@ -380,12 +378,8 @@ pub(crate) struct QueryCache {
}

impl MemoryDiskBytes for QueryCache {
fn memory_bytes(&self) -> usize {
size_of_val(self) + self.cache.lock().unwrap().memory_bytes()
}

fn disk_bytes(&self) -> usize {
0
fn heap_bytes(&self) -> usize {
self.cache.lock().unwrap().heap_bytes()
}
}

Expand Down
155 changes: 152 additions & 3 deletions rs/execution_environment/src/query_handler/query_cache/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,11 @@ use ic_types::{
};
use ic_types_test_utils::ids::subnet_test_id;
use ic_universal_canister::call_args;
use std::{collections::BTreeMap, sync::Arc, time::Duration};
use std::{
collections::{BTreeMap, BTreeSet},
sync::Arc,
time::Duration,
};

const MAX_EXPIRY_TIME: Duration = Duration::from_secs(10);
const MORE_THAN_MAX_EXPIRY_TIME: Duration = Duration::from_secs(11);
Expand Down Expand Up @@ -771,7 +775,7 @@ fn query_cache_frees_memory_after_invalidated_entries() {
let res = test
.non_replicated_query(id, "canister_balance_sized_reply", vec![])
.unwrap();
assert_eq!(BIG_RESPONSE_SIZE, res.memory_bytes());
assert_eq!(size_of_val(&res) + BIG_RESPONSE_SIZE, res.memory_bytes());
let memory_bytes = query_cache(&test).memory_bytes();
// After the first reply, the cache should have more than 1MB of data.
assert!(memory_bytes > BIG_RESPONSE_SIZE);
Expand All @@ -786,7 +790,7 @@ fn query_cache_frees_memory_after_invalidated_entries() {
let res = test
.non_replicated_query(id, "canister_balance_sized_reply", vec![])
.unwrap();
assert_eq!(SMALL_RESPONSE_SIZE, res.memory_bytes());
assert_eq!(size_of_val(&res) + SMALL_RESPONSE_SIZE, res.memory_bytes());
let memory_bytes = query_cache(&test).memory_bytes();
// The second 42 reply should invalidate and replace the first 1MB reply in the cache.
assert!(memory_bytes > SMALL_RESPONSE_SIZE);
Expand Down Expand Up @@ -1563,3 +1567,148 @@ fn query_cache_future_proof_test() {
}
}
}

#[test]
fn memory_bytes_future_proof_guard() {
const HEAP_BYTES: usize = 2;
const MORE_HEAP_BYTES: usize = 3;

// Key with no heap data.
let key = EntryKey {
source: user_test_id(1),
receiver: CanisterId::from_u64(1),
method_name: String::new(),
method_payload: vec![],
};
assert_eq!(size_of_val(&key), 112);
assert_eq!(key.memory_bytes(), size_of_val(&key));

// Key with some heap data.
let key = EntryKey {
source: user_test_id(1),
receiver: CanisterId::from_u64(1),
method_name: " ".repeat(HEAP_BYTES),
method_payload: vec![42; HEAP_BYTES],
};
assert_eq!(size_of_val(&key), 112);
assert_eq!(key.memory_bytes(), size_of_val(&key) + HEAP_BYTES * 2);

// Key with more heap data.
let key = EntryKey {
source: user_test_id(1),
receiver: CanisterId::from_u64(1),
method_name: "0".repeat(MORE_HEAP_BYTES),
method_payload: vec![42; MORE_HEAP_BYTES],
};
assert_eq!(size_of_val(&key), 112);
assert_eq!(key.memory_bytes(), size_of_val(&key) + MORE_HEAP_BYTES * 2);

// Value with no heap data.
let env = EntryEnv {
batch_time: time::GENESIS,
canisters_versions_balances_stats: vec![],
};
let value = EntryValue::new(
env,
Result::Ok(WasmResult::Reply(vec![])),
&SystemApiCallCounters::default(),
);
assert_eq!(size_of_val(&value), 80);
assert_eq!(value.memory_bytes(), size_of_val(&value));

// Value with some heap data.
let env = EntryEnv {
batch_time: time::GENESIS,
canisters_versions_balances_stats: vec![
(
CanisterId::from_u64(1),
0,
0_u64.into(),
QueryStats::default(),
);
HEAP_BYTES
],
};
let env_vec_size = size_of_val(&*env.canisters_versions_balances_stats);
let value = EntryValue::new(
env,
Result::Ok(WasmResult::Reply(vec![42; HEAP_BYTES])),
&SystemApiCallCounters::default(),
);
assert_eq!(size_of_val(&value), 80);
assert_eq!(
value.memory_bytes(),
size_of_val(&value) + env_vec_size + HEAP_BYTES
);

// Value with more heap data.
let env = EntryEnv {
batch_time: time::GENESIS,
canisters_versions_balances_stats: vec![
(
CanisterId::from_u64(1),
0,
0_u64.into(),
QueryStats::default(),
);
MORE_HEAP_BYTES
],
};
let env_vec_size = size_of_val(&*env.canisters_versions_balances_stats);
let value = EntryValue::new(
env,
Result::Ok(WasmResult::Reply(vec![42; MORE_HEAP_BYTES])),
&SystemApiCallCounters::default(),
);
assert_eq!(size_of_val(&value), 80);
assert_eq!(
value.memory_bytes(),
size_of_val(&value) + env_vec_size + MORE_HEAP_BYTES
);
Comment thread
berestovskyy marked this conversation as resolved.
}

#[test]
fn btree_set_memory_bytes_future_proof_guard() {
let mut set = BTreeSet::new();
assert_eq!(set.heap_bytes(), 0);
assert_eq!(set.memory_bytes(), size_of::<BTreeSet::<u64>>());
set.insert(42_u64);
assert_eq!(set.heap_bytes(), size_of::<u64>());
assert_eq!(
set.memory_bytes(),
size_of::<BTreeSet::<u64>>() + size_of::<u64>()
);

let mut set = BTreeSet::new();
set.insert(" ".repeat(100));
set.insert(" ".repeat(1000));
// By default, the `heap_bytes` returns a constant time estimation.
assert_eq!(set.heap_bytes(), size_of::<String>() * 2);
assert_eq!(
set.memory_bytes(),
size_of::<BTreeSet::<u64>>() + size_of::<String>() * 2
);
}

#[test]
fn vec_memory_bytes_future_proof_guard() {
let mut vec = vec![];
assert_eq!(vec.heap_bytes(), 0);
assert_eq!(vec.memory_bytes(), size_of::<Vec::<u64>>());
vec.push(42_u64);
assert_eq!(vec.heap_bytes(), size_of::<u64>());
assert_eq!(
vec.memory_bytes(),
size_of::<BTreeSet::<u64>>() + size_of::<u64>()
);

let mut vec = BTreeSet::new();
vec.insert(" ".repeat(100));
vec.insert(" ".repeat(1000));
// By default, the `heap_bytes` returns a constant time estimation.
assert_eq!(vec.heap_bytes(), size_of::<String>() * 2);
assert_eq!(
vec.memory_bytes(),
size_of::<BTreeSet::<u64>>() + size_of::<String>() * 2
);
}
Comment thread
berestovskyy marked this conversation as resolved.
Loading
Loading