Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions crates/wasmtime/src/runtime.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,31 @@
// Wasmtime's runtime has lots of fiddly bits where we're doing operations like
// casting between wasm i32/i64 and host `usize` values. There's also in general
// just lots of pieces of low-level manipulation of memory and internals of VM
// runtime state. To help keep all the integer casts correct be a bit more
// strict than the default settings to help weed out bugs ahead of time.
//
// This inevitably leads to wordier code than might otherwise be used because,
// for example, `u64 as usize` is warned against and will be an error on CI.
// This happens pretty frequently and needs to be replaced with `val.try_into()`
// or `usize::try_from(val)` where the error is handled. In some cases the
// correct thing to do is to `.unwrap()` the error to indicate a fatal mistake,
// but in some cases the correct thing is to propagate the error.
//
// Some niche cases that explicitly want truncation are recommended to have a
// function along the lines of
//
// #[allow(clippy::cast_possible_truncation)]
// fn truncate_i32_to_i8(a: i32) -> i8 { a as i8 }
//
// as this explicitly indicates the intent of truncation is desired. Other
// locations should use fallible conversions.
//
// If performance is absolutely critical then it's recommended to use `#[allow]`
// with a comment indicating why performance is critical as well as a short
// explanation of why truncation shouldn't be happening at runtime. This
// situation should be pretty rare though.
#![warn(clippy::cast_possible_truncation)]

#[macro_use]
pub(crate) mod func;

Expand Down
10 changes: 5 additions & 5 deletions crates/wasmtime/src/runtime/component/func/typed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -837,7 +837,7 @@ macro_rules! integers {

unsafe impl Lift for $primitive {
#[inline]
#[allow(trivial_numeric_casts)]
#[allow(trivial_numeric_casts, clippy::cast_possible_truncation)]
fn lift(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
debug_assert!(matches!(ty, InterfaceType::$ty));
Ok(src.$get() as $primitive)
Expand Down Expand Up @@ -1110,8 +1110,8 @@ unsafe impl Lower for str {
debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
let (ptr, len) = lower_string(cx, self)?;
// FIXME: needs memory64 handling
*cx.get(offset + 0) = (ptr as i32).to_le_bytes();
*cx.get(offset + 4) = (len as i32).to_le_bytes();
*cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
*cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
Ok(())
}
}
Expand Down Expand Up @@ -1457,8 +1457,8 @@ where
};
debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
let (ptr, len) = lower_list(cx, elem, self)?;
*cx.get(offset + 0) = (ptr as i32).to_le_bytes();
*cx.get(offset + 4) = (len as i32).to_le_bytes();
*cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
*cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
Ok(())
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/wasmtime/src/runtime/component/resource_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ impl ResourceTable {
fn push_(&mut self, e: TableEntry) -> Result<u32, ResourceTableError> {
if let Some(free) = self.pop_free_list() {
self.entries[free] = Entry::Occupied { entry: e };
Ok(free as u32)
Ok(free.try_into().unwrap())
} else {
let ix = self
.entries
Expand Down
12 changes: 6 additions & 6 deletions crates/wasmtime/src/runtime/component/values.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,11 +187,11 @@ impl Val {
let u32_count = cx.types.canonical_abi(&ty).flat_count(usize::MAX).unwrap();
let ty = &cx.types[i];
let mut flags = Vec::new();
for i in 0..u32_count {
for i in 0..u32::try_from(u32_count).unwrap() {
push_flags(
ty,
&mut flags,
(i as u32) * 32,
i * 32,
u32::lift(cx, InterfaceType::U32, next(src))?,
);
}
Expand Down Expand Up @@ -480,8 +480,8 @@ impl Val {
let ty = &cx.types[ty];
let (ptr, len) = lower_list(cx, ty.element, values)?;
// FIXME: needs memory64 handling
*cx.get(offset + 0) = (ptr as i32).to_le_bytes();
*cx.get(offset + 4) = (len as i32).to_le_bytes();
*cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
*cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
Ok(())
}
(InterfaceType::List(_), _) => unexpected(ty, self),
Expand Down Expand Up @@ -947,7 +947,7 @@ fn get_enum_discriminant(ty: &TypeEnum, n: &str) -> Result<u32> {
ty.names
.get_index_of(n)
.ok_or_else(|| anyhow::anyhow!("enum variant name `{n}` is not valid"))
.map(|i| i as u32)
.map(|i| i.try_into().unwrap())
}

fn get_variant_discriminant<'a>(
Expand All @@ -958,7 +958,7 @@ fn get_variant_discriminant<'a>(
.cases
.get_full(name)
.ok_or_else(|| anyhow::anyhow!("unknown variant case: `{name}`"))?;
Ok((i as u32, ty))
Ok((i.try_into().unwrap(), ty))
}

fn next<'a>(src: &mut Iter<'a, ValRaw>) -> &'a ValRaw {
Expand Down
18 changes: 10 additions & 8 deletions crates/wasmtime/src/runtime/coredump.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,16 +137,18 @@ impl WasmCoreDump {
// to balance these conflicting desires, we break the memory up
// into reasonably-sized chunks and then trim runs of zeroes
// from the start and end of each chunk.
const CHUNK_SIZE: u32 = 4096;
for (i, chunk) in mem
.data(&store)
.chunks_exact(CHUNK_SIZE as usize)
.enumerate()
{
const CHUNK_SIZE: usize = 4096;
for (i, chunk) in mem.data(&store).chunks_exact(CHUNK_SIZE).enumerate() {
if let Some(start) = chunk.iter().position(|byte| *byte != 0) {
let end = chunk.iter().rposition(|byte| *byte != 0).unwrap() + 1;
let offset = (i as u32) * CHUNK_SIZE + (start as u32);
let offset = wasm_encoder::ConstExpr::i32_const(offset as i32);
let offset = i * CHUNK_SIZE + start;
let offset = if ty.is_64() {
let offset = u64::try_from(offset).unwrap();
wasm_encoder::ConstExpr::i64_const(offset as i64)
} else {
let offset = u32::try_from(offset).unwrap();
wasm_encoder::ConstExpr::i32_const(offset as i32)
};
data.active(memory_idx, &offset, chunk[start..end].iter().copied());
}
}
Expand Down
16 changes: 11 additions & 5 deletions crates/wasmtime/src/runtime/debug.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,11 @@ fn relocate_dwarf_sections(bytes: &mut [u8], code_region: (*const u8, usize)) ->
}

for (offset, value) in relocations {
let (loc, _) = object::from_bytes_mut::<U64Bytes<NE>>(&mut bytes[offset as usize..])
.map_err(|()| anyhow!("invalid dwarf relocations"))?;
let (loc, _) = offset
.try_into()
.ok()
.and_then(|offset| object::from_bytes_mut::<U64Bytes<NE>>(&mut bytes[offset..]).ok())
.ok_or_else(|| anyhow!("invalid dwarf relocations"))?;
Comment on lines +67 to +70
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this indirect through Option because the error types are different between line 67 and 69? Otherwise, wouldn't a combination of and_then and unwrap_or_else work here?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah this had to do with different error types across the two functions and I figured options was the easiest way to go here. This is code is on the older side in Wasmtime and is probably ripe for some improvement.

loc.set(NE, value);
}
Ok(())
Expand Down Expand Up @@ -126,7 +129,8 @@ fn convert_object_elf_to_loadable_file<E: Endian>(
let text_range = match sections.section_by_name(e, b".text") {
Some((i, text)) => {
let range = text.file_range(e);
let off = header.e_shoff.get(e) as usize + i.0 * header.e_shentsize.get(e) as usize;
let e_shoff = usize::try_from(header.e_shoff.get(e)).unwrap();
let off = e_shoff + i.0 * header.e_shentsize.get(e) as usize;

let section: &mut SectionHeader64<E> =
object::from_bytes_mut(&mut bytes[off..]).unwrap().0;
Expand Down Expand Up @@ -160,6 +164,8 @@ fn convert_object_elf_to_loadable_file<E: Endian>(
let header: &mut FileHeader64<E> = object::from_bytes_mut(bytes).unwrap().0;
header.e_type.set(e, ET_DYN);
header.e_phoff.set(e, ph_off as u64);
header.e_phentsize.set(e, e_phentsize as u16);
header.e_phnum.set(e, e_phnum as u16);
header
.e_phentsize
.set(e, u16::try_from(e_phentsize).unwrap());
header.e_phnum.set(e, u16::try_from(e_phnum).unwrap());
}
7 changes: 5 additions & 2 deletions crates/wasmtime/src/runtime/instantiate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,9 +274,12 @@ impl CompiledModule {
.meta
.dwarf
.binary_search_by_key(&(id as u8), |(id, _)| *id)
.map(|i| {
.ok()
.and_then(|i| {
let (_, range) = &self.meta.dwarf[i];
&self.code_memory().dwarf()[range.start as usize..range.end as usize]
let start = range.start.try_into().ok()?;
let end = range.end.try_into().ok()?;
self.code_memory().dwarf().get(start..end)
})
.unwrap_or(&[]);
Ok(EndianSlice::new(data, gimli::LittleEndian))
Expand Down
6 changes: 2 additions & 4 deletions crates/wasmtime/src/runtime/trap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,7 @@ use crate::prelude::*;
use crate::store::StoreOpaque;
use crate::{AsContext, Module};
use core::fmt;
use wasmtime_environ::{
demangle_function_name, demangle_function_name_or_index, EntityRef, FilePos,
};
use wasmtime_environ::{demangle_function_name, demangle_function_name_or_index, FilePos};

/// Representation of a WebAssembly trap and what caused it to occur.
///
Expand Down Expand Up @@ -451,7 +449,7 @@ impl FrameInfo {
text_offset,
);
let index = compiled_module.module().func_index(index);
let func_index = index.index() as u32;
let func_index = index.as_u32();
let func_name = compiled_module.func_name(index).map(|s| s.to_string());

// In debug mode for now assert that we found a mapping for `pc` within
Expand Down
2 changes: 2 additions & 0 deletions crates/wasmtime/src/runtime/vm/component.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ use sptr::Strict;
use wasmtime_environ::component::*;
use wasmtime_environ::{HostPtr, PrimaryMap, VMSharedTypeIndex};

#[allow(clippy::cast_possible_truncation)] // it's intended this is truncated on
// 32-bit platforms
const INVALID_PTR: usize = 0xdead_dead_beef_beef_u64 as usize;

mod libcalls;
Expand Down
16 changes: 10 additions & 6 deletions crates/wasmtime/src/runtime/vm/cow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,13 @@ impl MemoryImage {
data: &[u8],
mmap: Option<&MmapVec>,
) -> Result<Option<MemoryImage>> {
let assert_page_aligned = |val: usize| {
assert_eq!(val % (page_size as usize), 0);
};
// Sanity-check that various parameters are page-aligned.
let len = data.len();
assert_eq!(offset % u64::from(page_size), 0);
assert_eq!((len as u32) % page_size, 0);
assert_page_aligned(len);
let linear_memory_offset = match usize::try_from(offset) {
Ok(offset) => offset,
Err(_) => return Ok(None),
Expand All @@ -101,10 +104,10 @@ impl MemoryImage {
let data_start = data.as_ptr() as usize;
let data_end = data_start + data.len();
assert!(start <= data_start && data_end <= end);
assert_eq!((start as u32) % page_size, 0);
assert_eq!((data_start as u32) % page_size, 0);
assert_eq!((data_end as u32) % page_size, 0);
assert_eq!((mmap.original_offset() as u32) % page_size, 0);
assert_page_aligned(start);
assert_page_aligned(data_start);
assert_page_aligned(data_end);
assert_page_aligned(mmap.original_offset());

#[cfg(feature = "std")]
if let Some(file) = mmap.original_file() {
Expand Down Expand Up @@ -167,7 +170,8 @@ impl ModuleMemoryImages {
_ => return Ok(None),
};
let mut memories = PrimaryMap::with_capacity(map.len());
let page_size = crate::runtime::vm::host_page_size() as u32;
let page_size = crate::runtime::vm::host_page_size();
let page_size = u32::try_from(page_size).unwrap();
for (memory_index, init) in map {
// mmap-based-initialization only works for defined memories with a
// known starting point of all zeros, so bail out if the mmeory is
Expand Down
12 changes: 12 additions & 0 deletions crates/wasmtime/src/runtime/vm/gc/enabled.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,15 @@ const DEFAULT_GC_HEAP_CAPACITY: usize = 1 << 19;
/// The default GC heap capacity for miri: 64KiB.
#[cfg(miri)]
const DEFAULT_GC_HEAP_CAPACITY: usize = 1 << 16;

// Explicit methods with `#[allow]` to clearly indicate that truncation is
// desired when used.
#[allow(clippy::cast_possible_truncation)]
fn truncate_i32_to_i16(a: i32) -> i16 {
a as i16
}

#[allow(clippy::cast_possible_truncation)]
fn truncate_i32_to_i8(a: i32) -> i8 {
a as i8
}
9 changes: 5 additions & 4 deletions crates/wasmtime/src/runtime/vm/gc/enabled/arrayref.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use super::{truncate_i32_to_i16, truncate_i32_to_i8};
use crate::{
prelude::*,
runtime::vm::{GcHeap, GcStore, VMGcRef},
Expand Down Expand Up @@ -193,8 +194,8 @@ impl VMArrayRef {
let offset = layout.elem_offset(index, ty.byte_size_in_gc_heap());
let mut data = store.unwrap_gc_store_mut().gc_object_data(self.as_gc_ref());
match val {
Val::I32(i) if ty.is_i8() => data.write_i8(offset, i as i8),
Val::I32(i) if ty.is_i16() => data.write_i16(offset, i as i16),
Val::I32(i) if ty.is_i8() => data.write_i8(offset, truncate_i32_to_i8(i)),
Val::I32(i) if ty.is_i16() => data.write_i16(offset, truncate_i32_to_i16(i)),
Val::I32(i) => data.write_i32(offset, i),
Val::I64(i) => data.write_i64(offset, i),
Val::F32(f) => data.write_u32(offset, f),
Expand Down Expand Up @@ -279,11 +280,11 @@ impl VMArrayRef {
Val::I32(i) if ty.is_i8() => store
.gc_store_mut()?
.gc_object_data(self.as_gc_ref())
.write_i8(offset, i as i8),
.write_i8(offset, truncate_i32_to_i8(i)),
Val::I32(i) if ty.is_i16() => store
.gc_store_mut()?
.gc_object_data(self.as_gc_ref())
.write_i16(offset, i as i16),
.write_i16(offset, truncate_i32_to_i16(i)),
Val::I32(i) => store
.gc_store_mut()?
.gc_object_data(self.as_gc_ref())
Expand Down
4 changes: 2 additions & 2 deletions crates/wasmtime/src/runtime/vm/gc/enabled/free_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ pub(crate) struct FreeList {

/// Our minimum and maximum supported alignment. Every allocation is aligned to
/// this.
const ALIGN_USIZE: usize = 8;
const ALIGN_U32: u32 = ALIGN_USIZE as u32;
const ALIGN_U32: u32 = 8;
const ALIGN_USIZE: usize = ALIGN_U32 as usize;

/// Our minimum allocation size.
const MIN_BLOCK_SIZE: u32 = 24;
Expand Down
9 changes: 5 additions & 4 deletions crates/wasmtime/src/runtime/vm/gc/enabled/structref.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use super::{truncate_i32_to_i16, truncate_i32_to_i8};
use crate::{
prelude::*,
runtime::vm::{GcHeap, GcStore, VMGcRef},
Expand Down Expand Up @@ -187,8 +188,8 @@ impl VMStructRef {
let offset = layout.fields[field];
let mut data = store.gc_store_mut()?.gc_object_data(self.as_gc_ref());
match val {
Val::I32(i) if ty.is_i8() => data.write_i8(offset, i as i8),
Val::I32(i) if ty.is_i16() => data.write_i16(offset, i as i16),
Val::I32(i) if ty.is_i8() => data.write_i8(offset, truncate_i32_to_i8(i)),
Val::I32(i) if ty.is_i16() => data.write_i16(offset, truncate_i32_to_i16(i)),
Val::I32(i) => data.write_i32(offset, i),
Val::I64(i) => data.write_i64(offset, i),
Val::F32(f) => data.write_u32(offset, f),
Expand Down Expand Up @@ -273,11 +274,11 @@ impl VMStructRef {
Val::I32(i) if ty.is_i8() => store
.gc_store_mut()?
.gc_object_data(self.as_gc_ref())
.write_i8(offset, i as i8),
.write_i8(offset, truncate_i32_to_i8(i)),
Val::I32(i) if ty.is_i16() => store
.gc_store_mut()?
.gc_object_data(self.as_gc_ref())
.write_i16(offset, i as i16),
.write_i16(offset, truncate_i32_to_i16(i)),
Val::I32(i) => store
.gc_store_mut()?
.gc_object_data(self.as_gc_ref())
Expand Down
8 changes: 5 additions & 3 deletions crates/wasmtime/src/runtime/vm/instance.rs
Original file line number Diff line number Diff line change
Expand Up @@ -944,6 +944,7 @@ impl Instance {

let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
let len = usize::try_from(len).unwrap();

// Bounds and casts are checked above, by this point we know that
// everything is safe.
Expand All @@ -952,7 +953,7 @@ impl Instance {
let src = src_mem.base.add(src);
// FIXME audit whether this is safe in the presence of shared memory
// (https://github.com/bytecodealliance/wasmtime/issues/4203).
ptr::copy(src, dst, len as usize);
ptr::copy(src, dst, len);
}

Ok(())
Expand All @@ -967,7 +968,7 @@ impl Instance {
if end > max {
Err(oob())
} else {
Ok(ptr as usize)
Ok(ptr.try_into().unwrap())
}
}

Expand All @@ -985,14 +986,15 @@ impl Instance {
) -> Result<(), Trap> {
let memory = self.get_memory(memory_index);
let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
let len = usize::try_from(len).unwrap();

// Bounds and casts are checked above, by this point we know that
// everything is safe.
unsafe {
let dst = memory.base.add(dst);
// FIXME audit whether this is safe in the presence of shared memory
// (https://github.com/bytecodealliance/wasmtime/issues/4203).
ptr::write_bytes(dst, val, len as usize);
ptr::write_bytes(dst, val, len);
}

Ok(())
Expand Down
Loading