From c60512f5bfecea022e07700aa18fa8d5ee72c55c Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 6 Sep 2024 12:06:14 -0700 Subject: [PATCH 1/2] Warn against `clippy::cast_possible_truncation` in Wasmtime This commit explicitly enables the `clippy::cast_possible_truncation` lint in Clippy for just the `wasmtime::runtime` module. This does not enable it for the entire workspace since it's a very noisy lint and in general has a low signal value. For the domain that `wasmtime::runtime` is working in, however, this is a much more useful lint. We in general want to be very careful about casting between `usize`, `u32`, and `u64` and the purpose of this module-targeted lint is to help with just that. I was inspired to do this after reading over #9206 where especially when refactoring code and changing types I think it would be useful to have locations flagged as "truncation may happen here" which previously weren't truncating. The failure mode for this lint is that panics might be introduced where truncation is explicitly intended. Most of the time though this isn't actually desired so the more practical consequence of this lint is to probably slow down wasmtime ever so slightly and bloat it ever so slightly by having a few more checks in a few places. This is likely best addressed in a more comprehensive manner, however, rather than specifically for just this one case. This problem isn't unique to just casts, but to many other forms of `.unwrap()` for example. --- crates/wasmtime/src/runtime.rs | 28 +++++++++++++++++++ .../src/runtime/component/func/typed.rs | 10 +++---- .../src/runtime/component/resource_table.rs | 2 +- .../wasmtime/src/runtime/component/values.rs | 12 ++++---- crates/wasmtime/src/runtime/coredump.rs | 18 ++++++------ crates/wasmtime/src/runtime/debug.rs | 16 +++++++---- crates/wasmtime/src/runtime/instantiate.rs | 7 +++-- crates/wasmtime/src/runtime/trap.rs | 6 ++-- crates/wasmtime/src/runtime/vm/component.rs | 2 ++ crates/wasmtime/src/runtime/vm/cow.rs | 16 +++++++---- crates/wasmtime/src/runtime/vm/gc/enabled.rs | 12 ++++++++ .../src/runtime/vm/gc/enabled/arrayref.rs | 9 +++--- .../src/runtime/vm/gc/enabled/free_list.rs | 4 +-- .../src/runtime/vm/gc/enabled/structref.rs | 9 +++--- crates/wasmtime/src/runtime/vm/instance.rs | 8 ++++-- .../instance/allocator/pooling/memory_pool.rs | 3 +- .../allocator/pooling/unix_stack_pool.rs | 3 +- crates/wasmtime/src/runtime/vm/libcalls.rs | 1 + crates/wasmtime/src/runtime/vm/memory.rs | 3 +- .../src/runtime/vm/sys/unix/signals.rs | 2 ++ 20 files changed, 118 insertions(+), 53 deletions(-) diff --git a/crates/wasmtime/src/runtime.rs b/crates/wasmtime/src/runtime.rs index 0d8eaca2da2c..04cd87d3a53a 100644 --- a/crates/wasmtime/src/runtime.rs +++ b/crates/wasmtime/src/runtime.rs @@ -1,3 +1,31 @@ +// Wasmtime's runtime has lots of fiddly bits where we're doing operations like +// casting between wasm i32/i64 and host `usize` values. There's also in general +// just lots of pieces of low-level manipulation of memory and internals of VM +// runtime state. To help keep all the integer casts correct be a bit more +// strict than the default settings to help weed out bugs ahead of time. +// +// This inevitably leads to wordier code than might otherwise be used because, +// for example, `u64 as usize` is warned against and will be an error on CI. +// This happens pretty frequently and needs to be replaced with `val.try_into()` +// or `usize::try_from(val)` where the error is handled. In some cases the +// correct thing to do is to `.unwrap()` the error to indicate a fatal mistake, +// but in some cases the correct thing is to propagate the error. +// +// Some niche cases that explicitly want truncation are recommended to have a +// function along the lines of +// +// #[allow(clippy::cast_possible_truncation)] +// fn truncate_i32_to_i8(a: i32) -> i8 { a as i8 } +// +// as this explicitly indicates the intent of truncation is desired. Other +// locations should use fallible conversions. +// +// If performance is absolutely critical then it's recommended to use `#[allow]` +// with a comment indicating why performance is critical as well as a short +// explanation of why truncation shouldn't be happening at runtime. This +// situation should be pretty rare though. +#![warn(clippy::cast_possible_truncation)] + #[macro_use] pub(crate) mod func; diff --git a/crates/wasmtime/src/runtime/component/func/typed.rs b/crates/wasmtime/src/runtime/component/func/typed.rs index ec1df7b3afc1..a2ab99c55ba8 100644 --- a/crates/wasmtime/src/runtime/component/func/typed.rs +++ b/crates/wasmtime/src/runtime/component/func/typed.rs @@ -837,7 +837,7 @@ macro_rules! integers { unsafe impl Lift for $primitive { #[inline] - #[allow(trivial_numeric_casts)] + #[allow(trivial_numeric_casts, clippy::cast_possible_truncation)] fn lift(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result { debug_assert!(matches!(ty, InterfaceType::$ty)); Ok(src.$get() as $primitive) @@ -1110,8 +1110,8 @@ unsafe impl Lower for str { debug_assert!(offset % (Self::ALIGN32 as usize) == 0); let (ptr, len) = lower_string(cx, self)?; // FIXME: needs memory64 handling - *cx.get(offset + 0) = (ptr as i32).to_le_bytes(); - *cx.get(offset + 4) = (len as i32).to_le_bytes(); + *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes(); + *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes(); Ok(()) } } @@ -1457,8 +1457,8 @@ where }; debug_assert!(offset % (Self::ALIGN32 as usize) == 0); let (ptr, len) = lower_list(cx, elem, self)?; - *cx.get(offset + 0) = (ptr as i32).to_le_bytes(); - *cx.get(offset + 4) = (len as i32).to_le_bytes(); + *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes(); + *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes(); Ok(()) } } diff --git a/crates/wasmtime/src/runtime/component/resource_table.rs b/crates/wasmtime/src/runtime/component/resource_table.rs index 22a72fb3fee2..2314a970413a 100644 --- a/crates/wasmtime/src/runtime/component/resource_table.rs +++ b/crates/wasmtime/src/runtime/component/resource_table.rs @@ -161,7 +161,7 @@ impl ResourceTable { fn push_(&mut self, e: TableEntry) -> Result { if let Some(free) = self.pop_free_list() { self.entries[free] = Entry::Occupied { entry: e }; - Ok(free as u32) + Ok(free.try_into().unwrap()) } else { let ix = self .entries diff --git a/crates/wasmtime/src/runtime/component/values.rs b/crates/wasmtime/src/runtime/component/values.rs index 0221ea50f847..a57e783a6e94 100644 --- a/crates/wasmtime/src/runtime/component/values.rs +++ b/crates/wasmtime/src/runtime/component/values.rs @@ -187,11 +187,11 @@ impl Val { let u32_count = cx.types.canonical_abi(&ty).flat_count(usize::MAX).unwrap(); let ty = &cx.types[i]; let mut flags = Vec::new(); - for i in 0..u32_count { + for i in 0..u32::try_from(u32_count).unwrap() { push_flags( ty, &mut flags, - (i as u32) * 32, + i * 32, u32::lift(cx, InterfaceType::U32, next(src))?, ); } @@ -480,8 +480,8 @@ impl Val { let ty = &cx.types[ty]; let (ptr, len) = lower_list(cx, ty.element, values)?; // FIXME: needs memory64 handling - *cx.get(offset + 0) = (ptr as i32).to_le_bytes(); - *cx.get(offset + 4) = (len as i32).to_le_bytes(); + *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes(); + *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes(); Ok(()) } (InterfaceType::List(_), _) => unexpected(ty, self), @@ -947,7 +947,7 @@ fn get_enum_discriminant(ty: &TypeEnum, n: &str) -> Result { ty.names .get_index_of(n) .ok_or_else(|| anyhow::anyhow!("enum variant name `{n}` is not valid")) - .map(|i| i as u32) + .map(|i| i.try_into().unwrap()) } fn get_variant_discriminant<'a>( @@ -958,7 +958,7 @@ fn get_variant_discriminant<'a>( .cases .get_full(name) .ok_or_else(|| anyhow::anyhow!("unknown variant case: `{name}`"))?; - Ok((i as u32, ty)) + Ok((i.try_into().unwrap(), ty)) } fn next<'a>(src: &mut Iter<'a, ValRaw>) -> &'a ValRaw { diff --git a/crates/wasmtime/src/runtime/coredump.rs b/crates/wasmtime/src/runtime/coredump.rs index 0d20a2067aad..fe6fa4377e6e 100644 --- a/crates/wasmtime/src/runtime/coredump.rs +++ b/crates/wasmtime/src/runtime/coredump.rs @@ -137,16 +137,18 @@ impl WasmCoreDump { // to balance these conflicting desires, we break the memory up // into reasonably-sized chunks and then trim runs of zeroes // from the start and end of each chunk. - const CHUNK_SIZE: u32 = 4096; - for (i, chunk) in mem - .data(&store) - .chunks_exact(CHUNK_SIZE as usize) - .enumerate() - { + const CHUNK_SIZE: usize = 4096; + for (i, chunk) in mem.data(&store).chunks_exact(CHUNK_SIZE).enumerate() { if let Some(start) = chunk.iter().position(|byte| *byte != 0) { let end = chunk.iter().rposition(|byte| *byte != 0).unwrap() + 1; - let offset = (i as u32) * CHUNK_SIZE + (start as u32); - let offset = wasm_encoder::ConstExpr::i32_const(offset as i32); + let offset = i * CHUNK_SIZE + start; + let offset = if ty.is_64() { + let offset = u64::try_from(offset).unwrap(); + wasm_encoder::ConstExpr::i64_const(offset as i64) + } else { + let offset = u32::try_from(offset).unwrap(); + wasm_encoder::ConstExpr::i32_const(offset as i32) + }; data.active(memory_idx, &offset, chunk[start..end].iter().copied()); } } diff --git a/crates/wasmtime/src/runtime/debug.rs b/crates/wasmtime/src/runtime/debug.rs index b3a5d94ddb64..6636e19b56e0 100644 --- a/crates/wasmtime/src/runtime/debug.rs +++ b/crates/wasmtime/src/runtime/debug.rs @@ -63,8 +63,11 @@ fn relocate_dwarf_sections(bytes: &mut [u8], code_region: (*const u8, usize)) -> } for (offset, value) in relocations { - let (loc, _) = object::from_bytes_mut::>(&mut bytes[offset as usize..]) - .map_err(|()| anyhow!("invalid dwarf relocations"))?; + let (loc, _) = offset + .try_into() + .ok() + .and_then(|offset| object::from_bytes_mut::>(&mut bytes[offset..]).ok()) + .ok_or_else(|| anyhow!("invalid dwarf relocations"))?; loc.set(NE, value); } Ok(()) @@ -126,7 +129,8 @@ fn convert_object_elf_to_loadable_file( let text_range = match sections.section_by_name(e, b".text") { Some((i, text)) => { let range = text.file_range(e); - let off = header.e_shoff.get(e) as usize + i.0 * header.e_shentsize.get(e) as usize; + let e_shoff = usize::try_from(header.e_shoff.get(e)).unwrap(); + let off = e_shoff + i.0 * header.e_shentsize.get(e) as usize; let section: &mut SectionHeader64 = object::from_bytes_mut(&mut bytes[off..]).unwrap().0; @@ -160,6 +164,8 @@ fn convert_object_elf_to_loadable_file( let header: &mut FileHeader64 = object::from_bytes_mut(bytes).unwrap().0; header.e_type.set(e, ET_DYN); header.e_phoff.set(e, ph_off as u64); - header.e_phentsize.set(e, e_phentsize as u16); - header.e_phnum.set(e, e_phnum as u16); + header + .e_phentsize + .set(e, u16::try_from(e_phentsize).unwrap()); + header.e_phnum.set(e, u16::try_from(e_phnum).unwrap()); } diff --git a/crates/wasmtime/src/runtime/instantiate.rs b/crates/wasmtime/src/runtime/instantiate.rs index a8abc8147c50..e0e7ae492141 100644 --- a/crates/wasmtime/src/runtime/instantiate.rs +++ b/crates/wasmtime/src/runtime/instantiate.rs @@ -274,9 +274,12 @@ impl CompiledModule { .meta .dwarf .binary_search_by_key(&(id as u8), |(id, _)| *id) - .map(|i| { + .ok() + .and_then(|i| { let (_, range) = &self.meta.dwarf[i]; - &self.code_memory().dwarf()[range.start as usize..range.end as usize] + let start = range.start.try_into().ok()?; + let end = range.end.try_into().ok()?; + self.code_memory().dwarf().get(start..end) }) .unwrap_or(&[]); Ok(EndianSlice::new(data, gimli::LittleEndian)) diff --git a/crates/wasmtime/src/runtime/trap.rs b/crates/wasmtime/src/runtime/trap.rs index 7f159a7e18c0..649c09152f00 100644 --- a/crates/wasmtime/src/runtime/trap.rs +++ b/crates/wasmtime/src/runtime/trap.rs @@ -4,9 +4,7 @@ use crate::prelude::*; use crate::store::StoreOpaque; use crate::{AsContext, Module}; use core::fmt; -use wasmtime_environ::{ - demangle_function_name, demangle_function_name_or_index, EntityRef, FilePos, -}; +use wasmtime_environ::{demangle_function_name, demangle_function_name_or_index, FilePos}; /// Representation of a WebAssembly trap and what caused it to occur. /// @@ -451,7 +449,7 @@ impl FrameInfo { text_offset, ); let index = compiled_module.module().func_index(index); - let func_index = index.index() as u32; + let func_index = index.as_u32(); let func_name = compiled_module.func_name(index).map(|s| s.to_string()); // In debug mode for now assert that we found a mapping for `pc` within diff --git a/crates/wasmtime/src/runtime/vm/component.rs b/crates/wasmtime/src/runtime/vm/component.rs index a9847355c49b..96c7e4db4173 100644 --- a/crates/wasmtime/src/runtime/vm/component.rs +++ b/crates/wasmtime/src/runtime/vm/component.rs @@ -23,6 +23,8 @@ use sptr::Strict; use wasmtime_environ::component::*; use wasmtime_environ::{HostPtr, PrimaryMap, VMSharedTypeIndex}; +#[allow(clippy::cast_possible_truncation)] // it's intended this is truncated on + // 32-bit platforms const INVALID_PTR: usize = 0xdead_dead_beef_beef_u64 as usize; mod libcalls; diff --git a/crates/wasmtime/src/runtime/vm/cow.rs b/crates/wasmtime/src/runtime/vm/cow.rs index ded449f8e340..a2d9f1afb77d 100644 --- a/crates/wasmtime/src/runtime/vm/cow.rs +++ b/crates/wasmtime/src/runtime/vm/cow.rs @@ -72,10 +72,13 @@ impl MemoryImage { data: &[u8], mmap: Option<&MmapVec>, ) -> Result> { + let assert_page_aligned = |val: usize| { + assert_eq!(val % (page_size as usize), 0); + }; // Sanity-check that various parameters are page-aligned. let len = data.len(); assert_eq!(offset % u64::from(page_size), 0); - assert_eq!((len as u32) % page_size, 0); + assert_page_aligned(len); let linear_memory_offset = match usize::try_from(offset) { Ok(offset) => offset, Err(_) => return Ok(None), @@ -101,10 +104,10 @@ impl MemoryImage { let data_start = data.as_ptr() as usize; let data_end = data_start + data.len(); assert!(start <= data_start && data_end <= end); - assert_eq!((start as u32) % page_size, 0); - assert_eq!((data_start as u32) % page_size, 0); - assert_eq!((data_end as u32) % page_size, 0); - assert_eq!((mmap.original_offset() as u32) % page_size, 0); + assert_page_aligned(start); + assert_page_aligned(data_start); + assert_page_aligned(data_end); + assert_page_aligned(mmap.original_offset()); #[cfg(feature = "std")] if let Some(file) = mmap.original_file() { @@ -167,7 +170,8 @@ impl ModuleMemoryImages { _ => return Ok(None), }; let mut memories = PrimaryMap::with_capacity(map.len()); - let page_size = crate::runtime::vm::host_page_size() as u32; + let page_size = crate::runtime::vm::host_page_size(); + let page_size = u32::try_from(page_size).unwrap(); for (memory_index, init) in map { // mmap-based-initialization only works for defined memories with a // known starting point of all zeros, so bail out if the mmeory is diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled.rs b/crates/wasmtime/src/runtime/vm/gc/enabled.rs index ca8ab8859a87..6708a509cc0a 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled.rs @@ -27,3 +27,15 @@ const DEFAULT_GC_HEAP_CAPACITY: usize = 1 << 19; /// The default GC heap capacity for miri: 64KiB. #[cfg(miri)] const DEFAULT_GC_HEAP_CAPACITY: usize = 1 << 16; + +// Explicit methods with `#[allow]` to clearly indicate that truncation is +// desired when used. +#[allow(clippy::cast_possible_truncation)] +fn truncate_i32_to_i16(a: i32) -> i16 { + a as i16 +} + +#[allow(clippy::cast_possible_truncation)] +fn truncate_i32_to_i8(a: i32) -> i8 { + a as i8 +} diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs index d6d192226e00..6385b5e0ec37 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs @@ -1,3 +1,4 @@ +use super::{truncate_i32_to_i16, truncate_i32_to_i8}; use crate::{ prelude::*, runtime::vm::{GcHeap, GcStore, VMGcRef}, @@ -193,8 +194,8 @@ impl VMArrayRef { let offset = layout.elem_offset(index, ty.byte_size_in_gc_heap()); let mut data = store.unwrap_gc_store_mut().gc_object_data(self.as_gc_ref()); match val { - Val::I32(i) if ty.is_i8() => data.write_i8(offset, i as i8), - Val::I32(i) if ty.is_i16() => data.write_i16(offset, i as i16), + Val::I32(i) if ty.is_i8() => data.write_i8(offset, truncate_i32_to_i8(i)), + Val::I32(i) if ty.is_i16() => data.write_i16(offset, truncate_i32_to_i16(i)), Val::I32(i) => data.write_i32(offset, i), Val::I64(i) => data.write_i64(offset, i), Val::F32(f) => data.write_u32(offset, f), @@ -279,11 +280,11 @@ impl VMArrayRef { Val::I32(i) if ty.is_i8() => store .gc_store_mut()? .gc_object_data(self.as_gc_ref()) - .write_i8(offset, i as i8), + .write_i8(offset, truncate_i32_to_i8(i)), Val::I32(i) if ty.is_i16() => store .gc_store_mut()? .gc_object_data(self.as_gc_ref()) - .write_i16(offset, i as i16), + .write_i16(offset, truncate_i32_to_i16(i)), Val::I32(i) => store .gc_store_mut()? .gc_object_data(self.as_gc_ref()) diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/free_list.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/free_list.rs index 8e62253896ba..85e766823d77 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/free_list.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/free_list.rs @@ -14,8 +14,8 @@ pub(crate) struct FreeList { /// Our minimum and maximum supported alignment. Every allocation is aligned to /// this. -const ALIGN_USIZE: usize = 8; -const ALIGN_U32: u32 = ALIGN_USIZE as u32; +const ALIGN_U32: u32 = 8; +const ALIGN_USIZE: usize = ALIGN_U32 as usize; /// Our minimum allocation size. const MIN_BLOCK_SIZE: u32 = 24; diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs index d0bd18d43b28..db773b7f9cda 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs @@ -1,3 +1,4 @@ +use super::{truncate_i32_to_i16, truncate_i32_to_i8}; use crate::{ prelude::*, runtime::vm::{GcHeap, GcStore, VMGcRef}, @@ -187,8 +188,8 @@ impl VMStructRef { let offset = layout.fields[field]; let mut data = store.gc_store_mut()?.gc_object_data(self.as_gc_ref()); match val { - Val::I32(i) if ty.is_i8() => data.write_i8(offset, i as i8), - Val::I32(i) if ty.is_i16() => data.write_i16(offset, i as i16), + Val::I32(i) if ty.is_i8() => data.write_i8(offset, truncate_i32_to_i8(i)), + Val::I32(i) if ty.is_i16() => data.write_i16(offset, truncate_i32_to_i16(i)), Val::I32(i) => data.write_i32(offset, i), Val::I64(i) => data.write_i64(offset, i), Val::F32(f) => data.write_u32(offset, f), @@ -273,11 +274,11 @@ impl VMStructRef { Val::I32(i) if ty.is_i8() => store .gc_store_mut()? .gc_object_data(self.as_gc_ref()) - .write_i8(offset, i as i8), + .write_i8(offset, truncate_i32_to_i8(i)), Val::I32(i) if ty.is_i16() => store .gc_store_mut()? .gc_object_data(self.as_gc_ref()) - .write_i16(offset, i as i16), + .write_i16(offset, truncate_i32_to_i16(i)), Val::I32(i) => store .gc_store_mut()? .gc_object_data(self.as_gc_ref()) diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index fe3fbe6a2430..bdf52e3d2432 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -944,6 +944,7 @@ impl Instance { let src = self.validate_inbounds(src_mem.current_length(), src, len)?; let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?; + let len = usize::try_from(len).unwrap(); // Bounds and casts are checked above, by this point we know that // everything is safe. @@ -952,7 +953,7 @@ impl Instance { let src = src_mem.base.add(src); // FIXME audit whether this is safe in the presence of shared memory // (https://github.com/bytecodealliance/wasmtime/issues/4203). - ptr::copy(src, dst, len as usize); + ptr::copy(src, dst, len); } Ok(()) @@ -967,7 +968,7 @@ impl Instance { if end > max { Err(oob()) } else { - Ok(ptr as usize) + Ok(ptr.try_into().unwrap()) } } @@ -985,6 +986,7 @@ impl Instance { ) -> Result<(), Trap> { let memory = self.get_memory(memory_index); let dst = self.validate_inbounds(memory.current_length(), dst, len)?; + let len = usize::try_from(len).unwrap(); // Bounds and casts are checked above, by this point we know that // everything is safe. @@ -992,7 +994,7 @@ impl Instance { let dst = memory.base.add(dst); // FIXME audit whether this is safe in the presence of shared memory // (https://github.com/bytecodealliance/wasmtime/issues/4203). - ptr::write_bytes(dst, val, len as usize); + ptr::write_bytes(dst, val, len); } Ok(()) diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs index 2e5dd793b94b..33cc098dc85d 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs @@ -378,7 +378,8 @@ impl MemoryPool { // the process to continue, because we never perform a // mmap that would leave an open space for someone // else to come in and map something. - slot.instantiate(initial_size as usize, image, memory_plan)?; + let initial_size = usize::try_from(initial_size).unwrap(); + slot.instantiate(initial_size, image, memory_plan)?; Memory::new_static( memory_plan, diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs index 2bfe64612216..76dcdd99940d 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs @@ -205,8 +205,9 @@ impl StackPool { let index = (start_of_stack - base) / self.stack_size; assert!(index < self.max_stacks); + let index = u32::try_from(index).unwrap(); - self.index_allocator.free(SlotId(index as u32)); + self.index_allocator.free(SlotId(index)); } } diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index 207048809099..c976e28f5bf1 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -346,6 +346,7 @@ fn memory_fill( len: u64, ) -> Result<(), Trap> { let memory_index = MemoryIndex::from_u32(memory_index); + #[allow(clippy::cast_possible_truncation)] instance.memory_fill(memory_index, dst, val as u8, len) } diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index d36ea1dc6eed..fe84be375e14 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -811,5 +811,6 @@ pub fn validate_atomic_addr( return Err(Trap::MemoryOutOfBounds); } - Ok(def.base.wrapping_add(addr as usize)) + let addr = usize::try_from(addr).unwrap(); + Ok(def.base.wrapping_add(addr)) } diff --git a/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs b/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs index 5b8d2b44543b..0736c52e02bf 100644 --- a/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs +++ b/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs @@ -248,6 +248,8 @@ unsafe extern "C" fn trap_handler( } } +#[allow(clippy::cast_possible_truncation)] // too fiddly to handle and wouldn't + // help much anyway unsafe fn get_trap_registers(cx: *mut libc::c_void, _signum: libc::c_int) -> TrapRegisters { cfg_if::cfg_if! { if #[cfg(all(any(target_os = "linux", target_os = "android"), target_arch = "x86_64"))] { From f29ce229028ca4b10a98eb3c19ac2d0afe5efe2e Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 6 Sep 2024 12:49:43 -0700 Subject: [PATCH 2/2] Fix some casts in tests --- .../allocator/pooling/index_allocator.rs | 4 +- crates/wasmtime/src/runtime/vm/vmcontext.rs | 43 +++++++++---------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/index_allocator.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/index_allocator.rs index 0127bff01fe9..2b6cbb4dcc82 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/index_allocator.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/index_allocator.rs @@ -491,9 +491,9 @@ mod test { fn test_next_available_allocation_strategy() { for size in 0..20 { let state = ModuleAffinityIndexAllocator::new(size, 0); - assert_eq!(state.num_empty_slots() as u32, size); + assert_eq!(state.num_empty_slots(), usize::try_from(size).unwrap()); for i in 0..size { - assert_eq!(state.num_empty_slots() as u32, size - i); + assert_eq!(state.num_empty_slots(), usize::try_from(size - i).unwrap()); assert_eq!(state.alloc(None).unwrap().index(), i as usize); } assert!(state.alloc(None).is_none()); diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index 77c2aa91d9c3..5e96bb13c461 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -79,12 +79,12 @@ mod test_vmfunction_import { use super::VMFunctionImport; use core::mem::offset_of; use std::mem::size_of; - use wasmtime_environ::{Module, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, VMOffsets}; #[test] fn check_vmfunction_import_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.size_of_vmfunction_import()) @@ -144,12 +144,12 @@ mod test_vmtable_import { use super::VMTableImport; use core::mem::offset_of; use std::mem::size_of; - use wasmtime_environ::{Module, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, VMOffsets}; #[test] fn check_vmtable_import_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.size_of_vmtable_import()) @@ -190,12 +190,12 @@ mod test_vmmemory_import { use super::VMMemoryImport; use core::mem::offset_of; use std::mem::size_of; - use wasmtime_environ::{Module, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, VMOffsets}; #[test] fn check_vmmemory_import_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.size_of_vmmemory_import()) @@ -234,12 +234,12 @@ mod test_vmglobal_import { use super::VMGlobalImport; use core::mem::offset_of; use std::mem::size_of; - use wasmtime_environ::{Module, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, VMOffsets}; #[test] fn check_vmglobal_import_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.size_of_vmglobal_import()) @@ -297,12 +297,12 @@ mod test_vmmemory_definition { use super::VMMemoryDefinition; use core::mem::offset_of; use std::mem::size_of; - use wasmtime_environ::{Module, PtrSize, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets}; #[test] fn check_vmmemory_definition_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.ptr.size_of_vmmemory_definition()) @@ -341,12 +341,12 @@ mod test_vmtable_definition { use super::VMTableDefinition; use core::mem::offset_of; use std::mem::size_of; - use wasmtime_environ::{Module, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, VMOffsets}; #[test] fn check_vmtable_definition_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.size_of_vmtable_definition()) @@ -377,7 +377,7 @@ pub struct VMGlobalDefinition { mod test_vmglobal_definition { use super::VMGlobalDefinition; use std::mem::{align_of, size_of}; - use wasmtime_environ::{Module, PtrSize, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets}; #[test] fn check_vmglobal_definition_alignment() { @@ -391,7 +391,7 @@ mod test_vmglobal_definition { #[test] fn check_vmglobal_definition_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.ptr.size_of_vmglobal_definition()) @@ -401,7 +401,7 @@ mod test_vmglobal_definition { #[test] fn check_vmglobal_begins_aligned() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!(offsets.vmctx_globals_begin() % 16, 0); } @@ -608,12 +608,12 @@ impl VMGlobalDefinition { mod test_vmshared_type_index { use super::VMSharedTypeIndex; use std::mem::size_of; - use wasmtime_environ::{Module, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, VMOffsets}; #[test] fn check_vmshared_type_index() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.size_of_vmshared_type_index()) @@ -673,12 +673,12 @@ mod test_vm_func_ref { use super::VMFuncRef; use core::mem::offset_of; use std::mem::size_of; - use wasmtime_environ::{Module, PtrSize, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets}; #[test] fn check_vm_func_ref_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( size_of::(), usize::from(offsets.ptr.size_of_vm_func_ref()) @@ -837,13 +837,12 @@ impl Default for VMRuntimeLimits { mod test_vmruntime_limits { use super::VMRuntimeLimits; use core::mem::offset_of; - use std::mem::size_of; - use wasmtime_environ::{Module, PtrSize, VMOffsets}; + use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets}; #[test] fn field_offsets() { let module = Module::new(); - let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); + let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( offset_of!(VMRuntimeLimits, stack_limit), usize::from(offsets.ptr.vmruntime_limits_stack_limit())