From 35722f158106fc2edd924bb82b4a1f7d135f32c7 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Thu, 27 Feb 2025 15:34:12 -0800 Subject: [PATCH] Rename `VMRuntimeLimits` to `VMStoreContext` Way back in time, this struct originally contained the stack and fuel limits. Then it also got the epoch deadline. Then it also got the exit FP/PC and entry FP. Now it is just the place where we put per-store mutable data that is accessed by JIT code and must be shared between all `VMContext`s. So it is time to rename it. This commit is purely mechanical and just renames the type and various methods and variables that use/access it. --- crates/cranelift/src/compiler.rs | 32 +++++------ crates/cranelift/src/compiler/component.rs | 6 +- crates/cranelift/src/func_environ.rs | 57 +++++++++---------- crates/environ/src/builtin.rs | 2 +- .../src/component/vmcomponent_offsets.rs | 14 ++--- crates/environ/src/vmoffsets.rs | 44 +++++++------- crates/wasmtime/src/runtime/func.rs | 6 +- crates/wasmtime/src/runtime/store.rs | 36 +++++++----- crates/wasmtime/src/runtime/vm.rs | 2 +- crates/wasmtime/src/runtime/vm/component.rs | 4 +- crates/wasmtime/src/runtime/vm/instance.rs | 10 ++-- .../wasmtime/src/runtime/vm/traphandlers.rs | 46 +++++++-------- .../src/runtime/vm/traphandlers/backtrace.rs | 39 +++++++------ .../vm/traphandlers/coredump_disabled.rs | 4 +- .../vm/traphandlers/coredump_enabled.rs | 9 +-- crates/wasmtime/src/runtime/vm/vmcontext.rs | 55 ++++++++++-------- tests/all/traps.rs | 2 +- winch/codegen/src/codegen/mod.rs | 18 +++--- winch/codegen/src/isa/x64/masm.rs | 6 +- 19 files changed, 207 insertions(+), 185 deletions(-) diff --git a/crates/cranelift/src/compiler.rs b/crates/cranelift/src/compiler.rs index 33ce1c17ac3f..1b9e0fb167af 100644 --- a/crates/cranelift/src/compiler.rs +++ b/crates/cranelift/src/compiler.rs @@ -224,7 +224,7 @@ impl wasmtime_environ::Compiler for Compiler { // The way that stack overflow is handled here is by adding a prologue // check to all functions for how much native stack is remaining. The // `VMContext` pointer is the first argument to all functions, and the - // first field of this structure is `*const VMRuntimeLimits` and the + // first field of this structure is `*const VMStoreContext` and the // first field of that is the stack limit. Note that the stack limit in // this case means "if the stack pointer goes below this, trap". Each // function which consumes stack space or isn't a leaf function starts @@ -255,7 +255,7 @@ impl wasmtime_environ::Compiler for Compiler { }); let stack_limit = context.func.create_global_value(ir::GlobalValueData::Load { base: interrupts_ptr, - offset: i32::from(func_env.offsets.ptr.vmruntime_limits_stack_limit()).into(), + offset: i32::from(func_env.offsets.ptr.vmstore_context_stack_limit()).into(), global_type: isa.pointer_type(), flags: MemFlags::trusted(), }); @@ -393,13 +393,13 @@ impl wasmtime_environ::Compiler for Compiler { wasmtime_environ::VMCONTEXT_MAGIC, ); let ptr = isa.pointer_bytes(); - let limits = builder.ins().load( + let vm_store_context = builder.ins().load( pointer_type, MemFlags::trusted(), caller_vmctx, - i32::from(ptr.vmcontext_runtime_limits()), + i32::from(ptr.vmcontext_store_context()), ); - save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &ptr, limits); + save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &ptr, vm_store_context); // Spill all wasm arguments to the stack in `ValRaw` slots. let (args_base, args_len) = @@ -592,13 +592,13 @@ impl wasmtime_environ::Compiler for Compiler { // additionally perform the "routine of the exit trampoline" of saving // fp/pc/etc. debug_assert_vmctx_kind(isa, &mut builder, vmctx, wasmtime_environ::VMCONTEXT_MAGIC); - let limits = builder.ins().load( + let vm_store_context = builder.ins().load( pointer_type, MemFlags::trusted(), vmctx, - ptr_size.vmcontext_runtime_limits(), + ptr_size.vmcontext_store_context(), ); - save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &ptr_size, limits); + save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &ptr_size, vm_store_context); // Now it's time to delegate to the actual builtin. Forward all our own // arguments to the libcall itself. @@ -1157,15 +1157,15 @@ fn save_last_wasm_entry_fp( builder: &mut FunctionBuilder, pointer_type: ir::Type, ptr_size: &impl PtrSize, - vm_runtime_limits_offset: u32, + vm_store_context_offset: u32, vmctx: Value, ) { - // First we need to get the `VMRuntimeLimits`. - let limits = builder.ins().load( + // First we need to get the `VMStoreContext`. + let vm_store_context = builder.ins().load( pointer_type, MemFlags::trusted(), vmctx, - i32::try_from(vm_runtime_limits_offset).unwrap(), + i32::try_from(vm_store_context_offset).unwrap(), ); // Then store our current stack pointer into the appropriate slot. @@ -1173,8 +1173,8 @@ fn save_last_wasm_entry_fp( builder.ins().store( MemFlags::trusted(), fp, - limits, - ptr_size.vmruntime_limits_last_wasm_entry_fp(), + vm_store_context, + ptr_size.vmstore_context_last_wasm_entry_fp(), ); } @@ -1201,7 +1201,7 @@ fn save_last_wasm_exit_fp_and_pc( MemFlags::trusted(), wasm_fp, limits, - ptr.vmruntime_limits_last_wasm_exit_fp(), + ptr.vmstore_context_last_wasm_exit_fp(), ); // Finally save the Wasm return address to the limits. let wasm_pc = builder.ins().get_return_address(pointer_type); @@ -1209,6 +1209,6 @@ fn save_last_wasm_exit_fp_and_pc( MemFlags::trusted(), wasm_pc, limits, - ptr.vmruntime_limits_last_wasm_exit_pc(), + ptr.vmstore_context_last_wasm_exit_pc(), ); } diff --git a/crates/cranelift/src/compiler/component.rs b/crates/cranelift/src/compiler/component.rs index 796b029280a2..a88d26d22c4c 100644 --- a/crates/cranelift/src/compiler/component.rs +++ b/crates/cranelift/src/compiler/component.rs @@ -856,17 +856,17 @@ impl ComponentCompiler for Compiler { wasmtime_environ::component::VMCOMPONENT_MAGIC, ); if let Abi::Wasm = abi { - let limits = c.builder.ins().load( + let vm_store_context = c.builder.ins().load( pointer_type, MemFlags::trusted(), vmctx, - i32::try_from(c.offsets.limits()).unwrap(), + i32::try_from(c.offsets.vm_store_context()).unwrap(), ); super::save_last_wasm_exit_fp_and_pc( &mut c.builder, pointer_type, &c.offsets.ptr, - limits, + vm_store_context, ); } diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 67ed54a28a34..d1f5ba1e97a5 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -130,17 +130,17 @@ pub struct FuncEnvironment<'module_environment> { /// A function-local variable which stores the cached value of the amount of /// fuel remaining to execute. If used this is modified frequently so it's /// stored locally as a variable instead of always referenced from the field - /// in `*const VMRuntimeLimits` + /// in `*const VMStoreContext` fuel_var: cranelift_frontend::Variable, /// A function-local variable which caches the value of `*const - /// VMRuntimeLimits` for this function's vmctx argument. This pointer is stored + /// VMStoreContext` for this function's vmctx argument. This pointer is stored /// in the vmctx itself, but never changes for the lifetime of the function, /// so if we load it up front we can continue to use it throughout. - vmruntime_limits_ptr: ir::Value, + vmstore_context_ptr: ir::Value, /// A cached epoch deadline value, when performing epoch-based - /// interruption. Loaded from `VMRuntimeLimits` and reloaded after + /// interruption. Loaded from `VMStoreContext` and reloaded after /// any yield. epoch_deadline_var: cranelift_frontend::Variable, @@ -199,7 +199,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { fuel_var: Variable::new(0), epoch_deadline_var: Variable::new(0), epoch_ptr_var: Variable::new(0), - vmruntime_limits_ptr: ir::Value::reserved_value(), + vmstore_context_ptr: ir::Value::reserved_value(), // Start with at least one fuel being consumed because even empty // functions should consume at least some fuel. @@ -304,8 +304,8 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } } - fn declare_vmruntime_limits_ptr(&mut self, builder: &mut FunctionBuilder<'_>) { - // We load the `*const VMRuntimeLimits` value stored within vmctx at the + fn declare_vmstore_context_ptr(&mut self, builder: &mut FunctionBuilder<'_>) { + // We load the `*const VMStoreContext` value stored within vmctx at the // head of the function and reuse the same value across the entire // function. This is possible since we know that the pointer never // changes for the lifetime of the function. @@ -313,8 +313,8 @@ impl<'module_environment> FuncEnvironment<'module_environment> { let vmctx = self.vmctx(builder.func); let base = builder.ins().global_value(pointer_type, vmctx); let offset = i32::from(self.offsets.ptr.vmctx_runtime_limits()); - debug_assert!(self.vmruntime_limits_ptr.is_reserved_value()); - self.vmruntime_limits_ptr = + debug_assert!(self.vmstore_context_ptr.is_reserved_value()); + self.vmstore_context_ptr = builder .ins() .load(pointer_type, ir::MemFlags::trusted(), base, offset); @@ -324,7 +324,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { // On function entry we load the amount of fuel into a function-local // `self.fuel_var` to make fuel modifications fast locally. This cache // is then periodically flushed to the Store-defined location in - // `VMRuntimeLimits` later. + // `VMStoreContext` later. builder.declare_var(self.fuel_var, ir::types::I64); self.fuel_load_into_var(builder); self.fuel_check(builder); @@ -372,13 +372,13 @@ impl<'module_environment> FuncEnvironment<'module_environment> { match op { // Exiting a function (via a return or unreachable) or otherwise // entering a different function (via a call) means that we need to - // update the fuel consumption in `VMRuntimeLimits` because we're + // update the fuel consumption in `VMStoreContext` because we're // about to move control out of this function itself and the fuel // may need to be read. // // Before this we need to update the fuel counter from our own cost // leading up to this function call, and then we can store - // `self.fuel_var` into `VMRuntimeLimits`. + // `self.fuel_var` into `VMStoreContext`. Operator::Unreachable | Operator::Return | Operator::CallIndirect { .. } @@ -463,7 +463,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { builder.def_var(self.fuel_var, fuel); } - /// Loads the fuel consumption value from `VMRuntimeLimits` into `self.fuel_var` + /// Loads the fuel consumption value from `VMStoreContext` into `self.fuel_var` fn fuel_load_into_var(&mut self, builder: &mut FunctionBuilder<'_>) { let (addr, offset) = self.fuel_addr_offset(); let fuel = builder @@ -473,7 +473,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } /// Stores the fuel consumption value from `self.fuel_var` into - /// `VMRuntimeLimits`. + /// `VMStoreContext`. fn fuel_save_from_var(&mut self, builder: &mut FunctionBuilder<'_>) { let (addr, offset) = self.fuel_addr_offset(); let fuel_consumed = builder.use_var(self.fuel_var); @@ -483,12 +483,12 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } /// Returns the `(address, offset)` of the fuel consumption within - /// `VMRuntimeLimits`, used to perform loads/stores later. + /// `VMStoreContext`, used to perform loads/stores later. fn fuel_addr_offset(&mut self) -> (ir::Value, ir::immediates::Offset32) { - debug_assert!(!self.vmruntime_limits_ptr.is_reserved_value()); + debug_assert!(!self.vmstore_context_ptr.is_reserved_value()); ( - self.vmruntime_limits_ptr, - i32::from(self.offsets.ptr.vmruntime_limits_fuel_consumed()).into(), + self.vmstore_context_ptr, + i32::from(self.offsets.ptr.vmstore_context_fuel_consumed()).into(), ) } @@ -672,15 +672,12 @@ impl<'module_environment> FuncEnvironment<'module_environment> { // We keep the deadline cached in a register to speed the checks // in the common case (between epoch ticks) but we want to do a // precise check here by reloading the cache first. - let deadline = - builder.ins().load( - ir::types::I64, - ir::MemFlags::trusted(), - self.vmruntime_limits_ptr, - ir::immediates::Offset32::new( - self.offsets.ptr.vmruntime_limits_epoch_deadline() as i32 - ), - ); + let deadline = builder.ins().load( + ir::types::I64, + ir::MemFlags::trusted(), + self.vmstore_context_ptr, + ir::immediates::Offset32::new(self.offsets.ptr.vmstore_context_epoch_deadline() as i32), + ); builder.def_var(self.epoch_deadline_var, deadline); self.epoch_check_cached(builder, cur_epoch_value, continuation_block); @@ -3088,10 +3085,10 @@ impl FuncEnvironment<'_> { self.conditionally_trap(builder, overflow, ir::TrapCode::STACK_OVERFLOW); } - // If the `vmruntime_limits_ptr` variable will get used then we initialize - // it here. + // If the `vmstore_context_ptr` variable will get used then we + // initialize it here. if self.tunables.consume_fuel || self.tunables.epoch_interruption { - self.declare_vmruntime_limits_ptr(builder); + self.declare_vmstore_context_ptr(builder); } // Additionally we initialize `fuel_var` if it will get used. if self.tunables.consume_fuel { diff --git a/crates/environ/src/builtin.rs b/crates/environ/src/builtin.rs index 1b9d322b366e..a446829af7d8 100644 --- a/crates/environ/src/builtin.rs +++ b/crates/environ/src/builtin.rs @@ -116,7 +116,7 @@ macro_rules! foreach_builtin_function { // Wasm code, so that it doesn't need to make a libcall to go from // id to `VMFuncRef`. That will be a little tricky: it will also // require updating the pointer to the slab in the `VMContext` (or - // `VMRuntimeLimits` or wherever we put it) when the slab is + // `VMStoreContext` or wherever we put it) when the slab is // resized. #[cfg(feature = "gc")] get_interned_func_ref( diff --git a/crates/environ/src/component/vmcomponent_offsets.rs b/crates/environ/src/component/vmcomponent_offsets.rs index 429a186cc689..9dbc0ee1a513 100644 --- a/crates/environ/src/component/vmcomponent_offsets.rs +++ b/crates/environ/src/component/vmcomponent_offsets.rs @@ -3,7 +3,7 @@ // struct VMComponentContext { // magic: u32, // builtins: &'static VMComponentBuiltins, -// limits: *const VMRuntimeLimits, +// limits: *const VMStoreContext, // flags: [VMGlobalDefinition; component.num_runtime_component_instances], // trampoline_func_refs: [VMFuncRef; component.num_trampolines], // lowerings: [VMLowering; component.num_lowerings], @@ -61,7 +61,7 @@ pub struct VMComponentOffsets

{ // precalculated offsets of various member fields magic: u32, builtins: u32, - limits: u32, + vm_store_context: u32, flags: u32, trampoline_func_refs: u32, lowerings: u32, @@ -98,7 +98,7 @@ impl VMComponentOffsets

{ num_resources: component.num_resources, magic: 0, builtins: 0, - limits: 0, + vm_store_context: 0, flags: 0, trampoline_func_refs: 0, lowerings: 0, @@ -138,7 +138,7 @@ impl VMComponentOffsets

{ size(magic) = 4u32, align(u32::from(ret.ptr.size())), size(builtins) = ret.ptr.size(), - size(limits) = ret.ptr.size(), + size(vm_store_context) = ret.ptr.size(), align(16), size(flags) = cmul(ret.num_runtime_component_instances, ret.ptr.size_of_vmglobal_definition()), align(u32::from(ret.ptr.size())), @@ -186,10 +186,10 @@ impl VMComponentOffsets

{ self.flags + index.as_u32() * u32::from(self.ptr.size_of_vmglobal_definition()) } - /// The offset of the `limits` field. + /// The offset of the `vm_store_context` field. #[inline] - pub fn limits(&self) -> u32 { - self.limits + pub fn vm_store_context(&self) -> u32 { + self.vm_store_context } /// The offset of the `trampoline_func_refs` field. diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index bbcf233843ca..9923a62643c3 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -8,7 +8,7 @@ // // these fields is a compile-time constant when using `HostPtr`. // magic: u32, // _padding: u32, // (On 64-bit systems) -// runtime_limits: *const VMRuntimeLimits, +// vm_store_context: *const VMStoreContext, // builtin_functions: *mut VMBuiltinFunctionsArray, // callee: *mut VMFunctionBody, // epoch_ptr: *mut AtomicU64, @@ -109,7 +109,7 @@ pub trait PtrSize { fn size(&self) -> u8; /// The offset of the `VMContext::runtime_limits` field - fn vmcontext_runtime_limits(&self) -> u8 { + fn vmcontext_store_context(&self) -> u8 { u8::try_from(align( u32::try_from(core::mem::size_of::()).unwrap(), u32::from(self.size()), @@ -119,7 +119,7 @@ pub trait PtrSize { /// The offset of the `VMContext::builtin_functions` field fn vmcontext_builtin_functions(&self) -> u8 { - self.vmcontext_runtime_limits() + self.size() + self.vmcontext_store_context() + self.size() } /// The offset of the `array_call` field. @@ -165,39 +165,39 @@ pub trait PtrSize { 4 } - // Offsets within `VMRuntimeLimits` + // Offsets within `VMStoreContext` - /// Return the offset of the `fuel_consumed` field of `VMRuntimeLimits` + /// Return the offset of the `fuel_consumed` field of `VMStoreContext` #[inline] - fn vmruntime_limits_fuel_consumed(&self) -> u8 { + fn vmstore_context_fuel_consumed(&self) -> u8 { 0 } - /// Return the offset of the `epoch_deadline` field of `VMRuntimeLimits` + /// Return the offset of the `epoch_deadline` field of `VMStoreContext` #[inline] - fn vmruntime_limits_epoch_deadline(&self) -> u8 { - self.vmruntime_limits_fuel_consumed() + 8 + fn vmstore_context_epoch_deadline(&self) -> u8 { + self.vmstore_context_fuel_consumed() + 8 } - /// Return the offset of the `stack_limit` field of `VMRuntimeLimits` + /// Return the offset of the `stack_limit` field of `VMStoreContext` #[inline] - fn vmruntime_limits_stack_limit(&self) -> u8 { - self.vmruntime_limits_epoch_deadline() + 8 + fn vmstore_context_stack_limit(&self) -> u8 { + self.vmstore_context_epoch_deadline() + 8 } - /// Return the offset of the `last_wasm_exit_fp` field of `VMRuntimeLimits`. - fn vmruntime_limits_last_wasm_exit_fp(&self) -> u8 { - self.vmruntime_limits_stack_limit() + self.size() + /// Return the offset of the `last_wasm_exit_fp` field of `VMStoreContext`. + fn vmstore_context_last_wasm_exit_fp(&self) -> u8 { + self.vmstore_context_stack_limit() + self.size() } - /// Return the offset of the `last_wasm_exit_pc` field of `VMRuntimeLimits`. - fn vmruntime_limits_last_wasm_exit_pc(&self) -> u8 { - self.vmruntime_limits_last_wasm_exit_fp() + self.size() + /// Return the offset of the `last_wasm_exit_pc` field of `VMStoreContext`. + fn vmstore_context_last_wasm_exit_pc(&self) -> u8 { + self.vmstore_context_last_wasm_exit_fp() + self.size() } - /// Return the offset of the `last_wasm_entry_fp` field of `VMRuntimeLimits`. - fn vmruntime_limits_last_wasm_entry_fp(&self) -> u8 { - self.vmruntime_limits_last_wasm_exit_pc() + self.size() + /// Return the offset of the `last_wasm_entry_fp` field of `VMStoreContext`. + fn vmstore_context_last_wasm_entry_fp(&self) -> u8 { + self.vmstore_context_last_wasm_exit_pc() + self.size() } // Offsets within `VMMemoryDefinition` @@ -246,7 +246,7 @@ pub trait PtrSize { 0 } - /// Return the offset to the `VMRuntimeLimits` structure + /// Return the offset to the `VMStoreContext` structure #[inline] fn vmctx_runtime_limits(&self) -> u8 { self.vmctx_magic() + self.size() diff --git a/crates/wasmtime/src/runtime/func.rs b/crates/wasmtime/src/runtime/func.rs index aae22ceb29fe..029197840a91 100644 --- a/crates/wasmtime/src/runtime/func.rs +++ b/crates/wasmtime/src/runtime/func.rs @@ -1632,7 +1632,7 @@ fn enter_wasm(store: &mut StoreContextMut<'_, T>) -> Option { // For asynchronous stores then each call happens on a separate native // stack. This means that the previous stack limit is no longer relevant // because we're on a separate stack. - if unsafe { *store.0.runtime_limits().stack_limit.get() } != usize::MAX + if unsafe { *store.0.vm_store_context().stack_limit.get() } != usize::MAX && !store.0.async_support() { return None; @@ -1676,7 +1676,7 @@ fn enter_wasm(store: &mut StoreContextMut<'_, T>) -> Option { let wasm_stack_limit = stack_pointer - store.engine().config().max_wasm_stack; let prev_stack = unsafe { mem::replace( - &mut *store.0.runtime_limits().stack_limit.get(), + &mut *store.0.vm_store_context().stack_limit.get(), wasm_stack_limit, ) }; @@ -1693,7 +1693,7 @@ fn exit_wasm(store: &mut StoreContextMut<'_, T>, prev_stack: Option) { }; unsafe { - *store.0.runtime_limits().stack_limit.get() = prev_stack; + *store.0.vm_store_context().stack_limit.get() = prev_stack; } } diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index 8a4a87780925..634bea850234 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -86,7 +86,7 @@ use crate::runtime::vm::GcRootsList; use crate::runtime::vm::{ ExportGlobal, GcStore, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SignalHandler, - StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMRuntimeLimits, + StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMStoreContext, }; use crate::trampoline::VMHostGlobalContext; use crate::RootSet; @@ -305,7 +305,7 @@ pub struct StoreOpaque { _marker: marker::PhantomPinned, engine: Engine, - runtime_limits: VMRuntimeLimits, + vm_store_context: VMStoreContext, instances: Vec, #[cfg(feature = "component-model")] num_component_instances: usize, @@ -523,7 +523,7 @@ impl Store { inner: StoreOpaque { _marker: marker::PhantomPinned, engine: engine.clone(), - runtime_limits: Default::default(), + vm_store_context: Default::default(), instances: Vec::new(), #[cfg(feature = "component-model")] num_component_instances: 0, @@ -1404,8 +1404,8 @@ impl StoreOpaque { } #[inline] - pub fn runtime_limits(&self) -> &VMRuntimeLimits { - &self.runtime_limits + pub fn vm_store_context(&self) -> &VMStoreContext { + &self.vm_store_context } #[inline(never)] @@ -1625,12 +1625,12 @@ impl StoreOpaque { self.engine().tunables().consume_fuel, "fuel is not configured in this store" ); - let injected_fuel = unsafe { *self.runtime_limits.fuel_consumed.get() }; + let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() }; Ok(get_fuel(injected_fuel, self.fuel_reserve)) } fn refuel(&mut self) -> bool { - let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() }; + let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() }; refuel( injected_fuel, &mut self.fuel_reserve, @@ -1643,7 +1643,7 @@ impl StoreOpaque { self.engine().tunables().consume_fuel, "fuel is not configured in this store" ); - let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() }; + let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() }; set_fuel( injected_fuel, &mut self.fuel_reserve, @@ -1678,8 +1678,8 @@ impl StoreOpaque { } #[inline] - pub fn vmruntime_limits(&self) -> NonNull { - NonNull::from(&self.runtime_limits) + pub fn vm_store_context_ptr(&self) -> NonNull { + NonNull::from(&self.vm_store_context) } #[inline] @@ -2083,13 +2083,18 @@ impl StoreInner { // Set a new deadline based on the "epoch deadline delta". // // Safety: this is safe because the epoch deadline in the - // `VMRuntimeLimits` is accessed only here and by Wasm guest code + // `VMStoreContext` is accessed only here and by Wasm guest code // running in this store, and we have a `&mut self` here. // // Also, note that when this update is performed while Wasm is // on the stack, the Wasm will reload the new value once we // return into it. - let epoch_deadline = unsafe { self.vmruntime_limits().as_mut().epoch_deadline.get_mut() }; + let epoch_deadline = unsafe { + self.vm_store_context_ptr() + .as_mut() + .epoch_deadline + .get_mut() + }; *epoch_deadline = self.engine().current_epoch() + delta; } @@ -2110,7 +2115,12 @@ impl StoreInner { // Safety: this is safe because, as above, it is only invoked // from within `new_epoch` which is called from guest Wasm // code, which will have an exclusive borrow on the Store. - let epoch_deadline = unsafe { self.vmruntime_limits().as_mut().epoch_deadline.get_mut() }; + let epoch_deadline = unsafe { + self.vm_store_context_ptr() + .as_mut() + .epoch_deadline + .get_mut() + }; *epoch_deadline } } diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index 5392571d2278..0607b9f5566d 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -93,7 +93,7 @@ pub use crate::runtime::vm::unwind::*; pub use crate::runtime::vm::vmcontext::{ VMArrayCallFunction, VMArrayCallHostFuncContext, VMContext, VMFuncRef, VMFunctionBody, VMFunctionImport, VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport, - VMOpaqueContext, VMRuntimeLimits, VMTableImport, VMTagImport, VMWasmCallFunction, ValRaw, + VMOpaqueContext, VMStoreContext, VMTableImport, VMTagImport, VMWasmCallFunction, ValRaw, }; pub use send_sync_ptr::SendSyncPtr; diff --git a/crates/wasmtime/src/runtime/vm/component.rs b/crates/wasmtime/src/runtime/vm/component.rs index 830c9ddb82c0..2d4aef8938d3 100644 --- a/crates/wasmtime/src/runtime/vm/component.rs +++ b/crates/wasmtime/src/runtime/vm/component.rs @@ -468,8 +468,8 @@ impl ComponentInstance { *self.vmctx_plus_offset_mut(self.offsets.magic()) = VMCOMPONENT_MAGIC; *self.vmctx_plus_offset_mut(self.offsets.builtins()) = VmPtr::from(NonNull::from(&libcalls::VMComponentBuiltins::INIT)); - *self.vmctx_plus_offset_mut(self.offsets.limits()) = - VmPtr::from(self.store.0.as_ref().vmruntime_limits()); + *self.vmctx_plus_offset_mut(self.offsets.vm_store_context()) = + VmPtr::from(self.store.0.as_ref().vm_store_context_ptr()); for i in 0..self.offsets.num_runtime_component_instances { let i = RuntimeComponentInstanceIndex::from_u32(i); diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index bbaff01573ad..a1f4590a1e7a 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -8,7 +8,7 @@ use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator}; use crate::runtime::vm::table::{Table, TableElement, TableElementType}; use crate::runtime::vm::vmcontext::{ VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition, - VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMRuntimeLimits, + VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext, VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport, }; use crate::runtime::vm::{ @@ -582,7 +582,7 @@ impl Instance { /// Return a pointer to the interrupts structure #[inline] - pub fn runtime_limits(&mut self) -> NonNull>> { + pub fn vm_store_context(&mut self) -> NonNull>> { unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_runtime_limits()) } } @@ -611,14 +611,14 @@ impl Instance { self.store = store.map(VMStoreRawPtr); if let Some(mut store) = store { let store = store.as_mut(); - self.runtime_limits() - .write(Some(store.vmruntime_limits().into())); + self.vm_store_context() + .write(Some(store.vm_store_context_ptr().into())); #[cfg(target_has_atomic = "64")] self.epoch_ptr() .write(Some(NonNull::from(store.engine().epoch_counter()).into())); self.set_gc_heap(store.gc_store_mut().ok()); } else { - self.runtime_limits().write(None); + self.vm_store_context().write(None); #[cfg(target_has_atomic = "64")] self.epoch_ptr().write(None); self.set_gc_heap(None); diff --git a/crates/wasmtime/src/runtime/vm/traphandlers.rs b/crates/wasmtime/src/runtime/vm/traphandlers.rs index 23162d846359..62da746f404c 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers.rs @@ -19,7 +19,7 @@ use crate::prelude::*; use crate::runtime::module::lookup_code; use crate::runtime::store::{ExecutorRef, StoreOpaque}; use crate::runtime::vm::sys::traphandlers; -use crate::runtime::vm::{Instance, InterpreterRef, VMContext, VMRuntimeLimits}; +use crate::runtime::vm::{Instance, InterpreterRef, VMContext, VMStoreContext}; use crate::{StoreContextMut, WasmBacktrace}; use core::cell::Cell; use core::ops::Range; @@ -426,20 +426,20 @@ mod call_thread_state { #[cfg(feature = "coredump")] pub(super) capture_coredump: bool, - pub(crate) limits: NonNull, + pub(crate) vm_store_context: NonNull, pub(crate) unwinder: &'static dyn Unwind, pub(super) prev: Cell, #[cfg(all(has_native_signals, unix))] pub(crate) async_guard_range: Range<*mut u8>, - // The values of `VMRuntimeLimits::last_wasm_{exit_{pc,fp},entry_sp}` - // for the *previous* `CallThreadState` for this same store/limits. Our - // *current* last wasm PC/FP/SP are saved in `self.limits`. We save a - // copy of the old registers here because the `VMRuntimeLimits` + // The values of `VMStoreContext::last_wasm_{exit_{pc,fp},entry_sp}` for + // the *previous* `CallThreadState` for this same store/limits. Our + // *current* last wasm PC/FP/SP are saved in `self.vm_store_context`. We + // save a copy of the old registers here because the `VMStoreContext` // typically doesn't change across nested calls into Wasm (i.e. they are - // typically calls back into the same store and `self.limits == - // self.prev.limits`) and we must to maintain the list of + // typically calls back into the same store and `self.vm_store_context + // == self.prev.vm_store_context`) and we must to maintain the list of // contiguous-Wasm-frames stack regions for backtracing purposes. old_last_wasm_exit_fp: Cell, old_last_wasm_exit_pc: Cell, @@ -453,10 +453,10 @@ mod call_thread_state { debug_assert!(self.unwind.replace(None).is_none()); unsafe { - let limits = self.limits.as_ref(); - *limits.last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get(); - *limits.last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get(); - *limits.last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get(); + let cx = self.vm_store_context.as_ref(); + *cx.last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get(); + *cx.last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get(); + *cx.last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get(); } } } @@ -466,8 +466,8 @@ mod call_thread_state { #[inline] pub(super) fn new(store: &mut StoreOpaque, caller: NonNull) -> CallThreadState { - let limits = unsafe { - Instance::from_vmctx(caller, |i| i.runtime_limits()) + let vm_store_context = unsafe { + Instance::from_vmctx(caller, |i| i.vm_store_context()) .read() .unwrap() .as_non_null() @@ -486,18 +486,18 @@ mod call_thread_state { capture_backtrace: store.engine().config().wasm_backtrace, #[cfg(feature = "coredump")] capture_coredump: store.engine().config().coredump_on_trap, - limits, + vm_store_context, #[cfg(all(has_native_signals, unix))] async_guard_range: store.async_guard_range(), prev: Cell::new(ptr::null()), old_last_wasm_exit_fp: Cell::new(unsafe { - *limits.as_ref().last_wasm_exit_fp.get() + *vm_store_context.as_ref().last_wasm_exit_fp.get() }), old_last_wasm_exit_pc: Cell::new(unsafe { - *limits.as_ref().last_wasm_exit_pc.get() + *vm_store_context.as_ref().last_wasm_exit_pc.get() }), old_last_wasm_entry_fp: Cell::new(unsafe { - *limits.as_ref().last_wasm_entry_fp.get() + *vm_store_context.as_ref().last_wasm_entry_fp.get() }), } } @@ -598,8 +598,8 @@ impl CallThreadState { (None, None) } UnwindReason::Trap(_) => ( - self.capture_backtrace(self.limits.as_ptr(), None), - self.capture_coredump(self.limits.as_ptr(), None), + self.capture_backtrace(self.vm_store_context.as_ptr(), None), + self.capture_coredump(self.vm_store_context.as_ptr(), None), ), }; self.unwind.set(Some((reason, backtrace, coredump))); @@ -624,7 +624,7 @@ impl CallThreadState { fn capture_backtrace( &self, - limits: *const VMRuntimeLimits, + limits: *const VMStoreContext, trap_pc_and_fp: Option<(usize, usize)>, ) -> Option { if !self.capture_backtrace { @@ -717,8 +717,8 @@ impl CallThreadState { faulting_addr: Option, trap: wasmtime_environ::Trap, ) { - let backtrace = self.capture_backtrace(self.limits.as_ptr(), Some((pc, fp))); - let coredump = self.capture_coredump(self.limits.as_ptr(), Some((pc, fp))); + let backtrace = self.capture_backtrace(self.vm_store_context.as_ptr(), Some((pc, fp))); + let coredump = self.capture_coredump(self.vm_store_context.as_ptr(), Some((pc, fp))); self.unwind.set(Some(( UnwindReason::Trap(TrapReason::Jit { pc, diff --git a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs index 276f8cca04c8..77fbfe58b534 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs @@ -11,7 +11,7 @@ //! pointer (FP) and program counter (PC) each time we call into Wasm and Wasm //! calls into the host via trampolines (see //! `crates/wasmtime/src/runtime/vm/trampolines`). The most recent entry is -//! stored in `VMRuntimeLimits` and older entries are saved in +//! stored in `VMStoreContext` and older entries are saved in //! `CallThreadState`. This lets us identify ranges of contiguous Wasm frames on //! the stack. //! @@ -25,7 +25,7 @@ use crate::prelude::*; use crate::runtime::store::StoreOpaque; use crate::runtime::vm::{ traphandlers::{tls, CallThreadState}, - Unwind, VMRuntimeLimits, + Unwind, VMStoreContext, }; use core::ops::ControlFlow; @@ -65,10 +65,12 @@ impl Backtrace { /// Capture the current Wasm stack in a backtrace. pub fn new(store: &StoreOpaque) -> Backtrace { - let limits = store.runtime_limits(); + let vm_store_context = store.vm_store_context(); let unwind = store.unwinder(); tls::with(|state| match state { - Some(state) => unsafe { Self::new_with_trap_state(limits, unwind, state, None) }, + Some(state) => unsafe { + Self::new_with_trap_state(vm_store_context, unwind, state, None) + }, None => Backtrace(vec![]), }) } @@ -77,15 +79,15 @@ impl Backtrace { /// /// If Wasm hit a trap, and we calling this from the trap handler, then the /// Wasm exit trampoline didn't run, and we use the provided PC and FP - /// instead of looking them up in `VMRuntimeLimits`. + /// instead of looking them up in `VMStoreContext`. pub(crate) unsafe fn new_with_trap_state( - limits: *const VMRuntimeLimits, + vm_store_context: *const VMStoreContext, unwind: &dyn Unwind, state: &CallThreadState, trap_pc_and_fp: Option<(usize, usize)>, ) -> Backtrace { let mut frames = vec![]; - Self::trace_with_trap_state(limits, unwind, state, trap_pc_and_fp, |frame| { + Self::trace_with_trap_state(vm_store_context, unwind, state, trap_pc_and_fp, |frame| { frames.push(frame); ControlFlow::Continue(()) }); @@ -95,10 +97,12 @@ impl Backtrace { /// Walk the current Wasm stack, calling `f` for each frame we walk. #[cfg(feature = "gc")] pub fn trace(store: &StoreOpaque, f: impl FnMut(Frame) -> ControlFlow<()>) { - let limits = store.runtime_limits(); + let vm_store_context = store.vm_store_context(); let unwind = store.unwinder(); tls::with(|state| match state { - Some(state) => unsafe { Self::trace_with_trap_state(limits, unwind, state, None, f) }, + Some(state) => unsafe { + Self::trace_with_trap_state(vm_store_context, unwind, state, None, f) + }, None => {} }); } @@ -107,9 +111,9 @@ impl Backtrace { /// /// If Wasm hit a trap, and we calling this from the trap handler, then the /// Wasm exit trampoline didn't run, and we use the provided PC and FP - /// instead of looking them up in `VMRuntimeLimits`. + /// instead of looking them up in `VMStoreContext`. pub(crate) unsafe fn trace_with_trap_state( - limits: *const VMRuntimeLimits, + vm_store_context: *const VMStoreContext, unwind: &dyn Unwind, state: &CallThreadState, trap_pc_and_fp: Option<(usize, usize)>, @@ -122,14 +126,17 @@ impl Backtrace { // trampoline did not get a chance to save the last Wasm PC and FP, // and we need to use the plumbed-through values instead. Some((pc, fp)) => { - assert!(core::ptr::eq(limits, state.limits.as_ptr())); + assert!(core::ptr::eq( + vm_store_context, + state.vm_store_context.as_ptr() + )); (pc, fp) } // Either there is no Wasm currently on the stack, or we exited Wasm // through the Wasm-to-host trampoline. None => { - let pc = *(*limits).last_wasm_exit_pc.get(); - let fp = *(*limits).last_wasm_exit_fp.get(); + let pc = *(*vm_store_context).last_wasm_exit_pc.get(); + let fp = *(*vm_store_context).last_wasm_exit_fp.get(); (pc, fp) } }; @@ -137,12 +144,12 @@ impl Backtrace { let activations = core::iter::once(( last_wasm_exit_pc, last_wasm_exit_fp, - *(*limits).last_wasm_entry_fp.get(), + *(*vm_store_context).last_wasm_entry_fp.get(), )) .chain( state .iter() - .filter(|state| core::ptr::eq(limits, state.limits.as_ptr())) + .filter(|state| core::ptr::eq(vm_store_context, state.vm_store_context.as_ptr())) .map(|state| { ( state.old_last_wasm_exit_pc(), diff --git a/crates/wasmtime/src/runtime/vm/traphandlers/coredump_disabled.rs b/crates/wasmtime/src/runtime/vm/traphandlers/coredump_disabled.rs index d4dc86a1d9bc..8726e85685ba 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers/coredump_disabled.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers/coredump_disabled.rs @@ -1,5 +1,5 @@ use crate::runtime::vm::traphandlers::CallThreadState; -use crate::runtime::vm::VMRuntimeLimits; +use crate::runtime::vm::VMStoreContext; /// A WebAssembly Coredump #[derive(Debug)] @@ -8,7 +8,7 @@ pub enum CoreDumpStack {} impl CallThreadState { pub(super) fn capture_coredump( &self, - _limits: *const VMRuntimeLimits, + _ctx: *const VMStoreContext, _trap_pc_and_fp: Option<(usize, usize)>, ) -> Option { None diff --git a/crates/wasmtime/src/runtime/vm/traphandlers/coredump_enabled.rs b/crates/wasmtime/src/runtime/vm/traphandlers/coredump_enabled.rs index ee598e1d0ca2..64287324b675 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers/coredump_enabled.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers/coredump_enabled.rs @@ -1,6 +1,6 @@ use super::CallThreadState; use crate::prelude::*; -use crate::runtime::vm::{Backtrace, VMRuntimeLimits}; +use crate::runtime::vm::{Backtrace, VMStoreContext}; use wasm_encoder::CoreDumpValue; /// A WebAssembly Coredump @@ -25,14 +25,15 @@ pub struct CoreDumpStack { impl CallThreadState { pub(super) fn capture_coredump( &self, - limits: *const VMRuntimeLimits, + vm_store_context: *const VMStoreContext, trap_pc_and_fp: Option<(usize, usize)>, ) -> Option { if !self.capture_coredump { return None; } - let bt = - unsafe { Backtrace::new_with_trap_state(limits, self.unwinder, self, trap_pc_and_fp) }; + let bt = unsafe { + Backtrace::new_with_trap_state(vm_store_context, self.unwinder, self, trap_pc_and_fp) + }; Some(CoreDumpStack { bt, diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index fd4e0a11b52b..9af40e983f5f 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -975,10 +975,17 @@ const _: () = { ) }; -/// Structure used to control interrupting wasm code. +/// Structure that holds all mutable context that is shared across all instances +/// in a store, for example data related to fuel or epochs. +/// +/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that +/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way +/// that multiple `wasmtime::Instance`s may be associated with the same +/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same +/// `VMStoreContext` when they are associated with the same `wasmtime::Store`. #[derive(Debug)] #[repr(C)] -pub struct VMRuntimeLimits { +pub struct VMStoreContext { // NB: 64-bit integer fields are located first with pointer-sized fields // trailing afterwards. That makes the offsets in this structure easier to // calculate on 32-bit platforms as we don't have to worry about the @@ -1046,19 +1053,19 @@ pub struct VMRuntimeLimits { pub last_wasm_entry_fp: UnsafeCell, } -// The `VMRuntimeLimits` type is a pod-type with no destructor, and we don't +// The `VMStoreContext` type is a pod-type with no destructor, and we don't // access any fields from other threads, so add in these trait impls which are // otherwise not available due to the `fuel_consumed` and `epoch_deadline` -// variables in `VMRuntimeLimits`. -unsafe impl Send for VMRuntimeLimits {} -unsafe impl Sync for VMRuntimeLimits {} +// variables in `VMStoreContext`. +unsafe impl Send for VMStoreContext {} +unsafe impl Sync for VMStoreContext {} // SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. -unsafe impl VmSafe for VMRuntimeLimits {} +unsafe impl VmSafe for VMStoreContext {} -impl Default for VMRuntimeLimits { - fn default() -> VMRuntimeLimits { - VMRuntimeLimits { +impl Default for VMStoreContext { + fn default() -> VMStoreContext { + VMStoreContext { stack_limit: UnsafeCell::new(usize::max_value()), fuel_consumed: UnsafeCell::new(0), epoch_deadline: UnsafeCell::new(0), @@ -1070,8 +1077,8 @@ impl Default for VMRuntimeLimits { } #[cfg(test)] -mod test_vmruntime_limits { - use super::VMRuntimeLimits; +mod test_vmstore_context { + use super::VMStoreContext; use core::mem::offset_of; use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets}; @@ -1080,28 +1087,28 @@ mod test_vmruntime_limits { let module = Module::new(); let offsets = VMOffsets::new(HostPtr, &module); assert_eq!( - offset_of!(VMRuntimeLimits, stack_limit), - usize::from(offsets.ptr.vmruntime_limits_stack_limit()) + offset_of!(VMStoreContext, stack_limit), + usize::from(offsets.ptr.vmstore_context_stack_limit()) ); assert_eq!( - offset_of!(VMRuntimeLimits, fuel_consumed), - usize::from(offsets.ptr.vmruntime_limits_fuel_consumed()) + offset_of!(VMStoreContext, fuel_consumed), + usize::from(offsets.ptr.vmstore_context_fuel_consumed()) ); assert_eq!( - offset_of!(VMRuntimeLimits, epoch_deadline), - usize::from(offsets.ptr.vmruntime_limits_epoch_deadline()) + offset_of!(VMStoreContext, epoch_deadline), + usize::from(offsets.ptr.vmstore_context_epoch_deadline()) ); assert_eq!( - offset_of!(VMRuntimeLimits, last_wasm_exit_fp), - usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_fp()) + offset_of!(VMStoreContext, last_wasm_exit_fp), + usize::from(offsets.ptr.vmstore_context_last_wasm_exit_fp()) ); assert_eq!( - offset_of!(VMRuntimeLimits, last_wasm_exit_pc), - usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_pc()) + offset_of!(VMStoreContext, last_wasm_exit_pc), + usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc()) ); assert_eq!( - offset_of!(VMRuntimeLimits, last_wasm_entry_fp), - usize::from(offsets.ptr.vmruntime_limits_last_wasm_entry_fp()) + offset_of!(VMStoreContext, last_wasm_entry_fp), + usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp()) ); } } diff --git a/tests/all/traps.rs b/tests/all/traps.rs index 36bae7729cad..cab0bb6bb5ad 100644 --- a/tests/all/traps.rs +++ b/tests/all/traps.rs @@ -1515,7 +1515,7 @@ fn dont_see_stale_stack_walking_registers() -> Result<()> { (export "get_trap" (func $host_get_trap)) ;; We enter and exit Wasm, which saves registers in the - ;; `VMRuntimeLimits`. Later, when we call a re-exported host + ;; `VMStoreContext`. Later, when we call a re-exported host ;; function, we should not accidentally reuse those saved ;; registers. (start $start) diff --git a/winch/codegen/src/codegen/mod.rs b/winch/codegen/src/codegen/mod.rs index f3fdb20fc1c0..f990d27f3b93 100644 --- a/winch/codegen/src/codegen/mod.rs +++ b/winch/codegen/src/codegen/mod.rs @@ -1093,7 +1093,7 @@ where /// Checks if fuel consumption is enabled and emits a series of instructions /// that check the current fuel usage by performing a zero-comparison with - /// the number of units stored in `VMRuntimeLimits`. + /// the number of units stored in `VMStoreContext`. pub fn maybe_emit_fuel_check(&mut self) -> Result<()> { if !self.tunables.consume_fuel { return Ok(()); @@ -1141,10 +1141,10 @@ where } /// Emits a series of instructions that load the `fuel_consumed` field from - /// `VMRuntimeLimits`. + /// `VMStoreContext`. fn emit_load_fuel_consumed(&mut self, fuel_reg: Reg) -> Result<()> { let limits_offset = self.env.vmoffsets.ptr.vmctx_runtime_limits(); - let fuel_offset = self.env.vmoffsets.ptr.vmruntime_limits_fuel_consumed(); + let fuel_offset = self.env.vmoffsets.ptr.vmstore_context_fuel_consumed(); self.masm.load_ptr( self.masm.address_at_vmctx(u32::from(limits_offset))?, writable!(fuel_reg), @@ -1222,7 +1222,7 @@ where ) -> Result<()> { let epoch_ptr_offset = self.env.vmoffsets.ptr.vmctx_epoch_ptr(); let runtime_limits_offset = self.env.vmoffsets.ptr.vmctx_runtime_limits(); - let epoch_deadline_offset = self.env.vmoffsets.ptr.vmruntime_limits_epoch_deadline(); + let epoch_deadline_offset = self.env.vmoffsets.ptr.vmstore_context_epoch_deadline(); // Load the current epoch value into `epoch_counter_var`. self.masm.load_ptr( @@ -1238,7 +1238,7 @@ where OperandSize::S64, )?; - // Load the `VMRuntimeLimits`. + // Load the `VMStoreContext`. self.masm.load_ptr( self.masm .address_at_vmctx(u32::from(runtime_limits_offset))?, @@ -1254,7 +1254,7 @@ where ) } - /// Increments the fuel consumed in `VMRuntimeLimits` by flushing + /// Increments the fuel consumed in `VMStoreContext` by flushing /// `self.fuel_consumed` to memory. fn emit_fuel_increment(&mut self) -> Result<()> { let fuel_at_point = std::mem::replace(&mut self.fuel_consumed, 0); @@ -1263,10 +1263,10 @@ where } let limits_offset = self.env.vmoffsets.ptr.vmctx_runtime_limits(); - let fuel_offset = self.env.vmoffsets.ptr.vmruntime_limits_fuel_consumed(); + let fuel_offset = self.env.vmoffsets.ptr.vmstore_context_fuel_consumed(); let limits_reg = self.context.any_gpr(self.masm)?; - // Load `VMRuntimeLimits` into the `limits_reg` reg. + // Load `VMStoreContext` into the `limits_reg` reg. self.masm.load_ptr( self.masm.address_at_vmctx(u32::from(limits_offset))?, writable!(limits_reg), @@ -1289,7 +1289,7 @@ where OperandSize::S64, )?; - // Store the updated fuel consumed to `VMRuntimeLimits`. + // Store the updated fuel consumed to `VMStoreContext`. self.masm.store( scratch!(M).into(), self.masm diff --git a/winch/codegen/src/isa/x64/masm.rs b/winch/codegen/src/isa/x64/masm.rs index 0466e5bf3f45..e3465121734c 100644 --- a/winch/codegen/src/isa/x64/masm.rs +++ b/winch/codegen/src/isa/x64/masm.rs @@ -92,7 +92,7 @@ pub(crate) struct MacroAssembler { asm: Assembler, /// ISA flags. flags: x64_settings::Flags, - /// Shared flags. + /// Shared flags.vmcontext_store_context shared_flags: settings::Flags, /// The target pointer size. ptr_size: OperandSize, @@ -126,12 +126,12 @@ impl Masm for MacroAssembler { let scratch = regs::scratch(); self.load_ptr( - self.address_at_reg(vmctx, ptr_size.vmcontext_runtime_limits().into())?, + self.address_at_reg(vmctx, ptr_size.vmcontext_store_context().into())?, writable!(scratch), )?; self.load_ptr( - Address::offset(scratch, ptr_size.vmruntime_limits_stack_limit().into()), + Address::offset(scratch, ptr_size.vmstore_context_stack_limit().into()), writable!(scratch), )?;