diff --git a/Cargo.lock b/Cargo.lock index 2e9714a7a..32a550f40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -799,6 +799,7 @@ dependencies = [ "litebox_common_linux", "modular-bitfield", "num_enum", + "thiserror", ] [[package]] @@ -930,6 +931,8 @@ name = "litebox_runner_lvbs" version = "0.1.0" dependencies = [ "litebox", + "litebox_common_linux", + "litebox_common_optee", "litebox_platform_lvbs", "litebox_platform_multiplex", "litebox_shim_optee", diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index 4841ab019..21b3725fc 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -35,7 +35,7 @@ fn ratchet_globals() -> Result<()> { ("litebox_runner_lvbs/", 3), ("litebox_runner_snp/", 1), ("litebox_shim_linux/", 1), - ("litebox_shim_optee/", 6), + ("litebox_shim_optee/", 7), ], |file| { Ok(file @@ -65,6 +65,7 @@ fn ratchet_maybe_uninit() -> Result<()> { ("litebox_platform_linux_userland/", 3), ("litebox_platform_lvbs/", 5), ("litebox_shim_linux/", 5), + ("litebox_shim_optee/", 1), ], |file| { Ok(file diff --git a/litebox/src/mm/linux.rs b/litebox/src/mm/linux.rs index fa56b151e..c19408d0e 100644 --- a/litebox/src/mm/linux.rs +++ b/litebox/src/mm/linux.rs @@ -213,6 +213,7 @@ impl core::ops::Add for NonZeroPageSize { } /// A non-zero address that is `ALIGN`-aligned. +#[derive(Clone, Copy)] pub struct NonZeroAddress(usize); impl NonZeroAddress { diff --git a/litebox/src/platform/mod.rs b/litebox/src/platform/mod.rs index b05783629..c12d42958 100644 --- a/litebox/src/platform/mod.rs +++ b/litebox/src/platform/mod.rs @@ -7,6 +7,7 @@ pub mod common_providers; pub mod page_mgmt; pub mod trivial_providers; +pub mod vmap; #[cfg(test)] pub(crate) mod mock; diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs new file mode 100644 index 000000000..a5e676846 --- /dev/null +++ b/litebox/src/platform/vmap.rs @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +use crate::platform::page_mgmt::MemoryRegionPermissions; +use thiserror::Error; + +/// A provider to map and unmap physical pages with virtually contiguous addresses. +/// +/// `ALIGN`: The page frame size. +/// +/// This provider is written to implement `litebox_shim_optee::ptr::PhysMutPtr` and +/// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need +/// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). +pub trait VmapProvider { + /// Data structure for an array of physical page addresses which are virtually contiguous. + type PhysPageAddrArray; + + /// Data structure to maintain the mapping information returned by `vmap()`. + type PhysPageMapInfo; + + /// Map the given `PhysPageAddrArray` into virtually contiguous addresses with the given + /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function + /// expects that it can access and update the page table using `&self`. + /// + /// This function is analogous to Linux kernel's `vmap()`. + /// + /// # Safety + /// + /// The caller should ensure that `pages` are not in active use by other entities + /// (especially, there should be no read/write or write/write conflicts). + /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps + /// from the caller, hypervisor, or hardware. + /// Multiple LiteBox threads might concurrently call this function with overlapping + /// physical pages, so the implementation should safely handle such cases. + unsafe fn vmap( + &self, + pages: Self::PhysPageAddrArray, + perms: PhysPageMapPermissions, + ) -> Result; + + /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). + /// Use `&self` to access and update the page table. + /// + /// This function is analogous to Linux kernel's `vunmap()`. + /// + /// # Safety + /// + /// The caller should ensure that the virtual addresses in `vmap_info` are not in active + /// use by other entities. + unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; + + /// Validate that the given physical pages do not belong to LiteBox-owned memory. + /// Use `&self` to get the memory layout of the platform (i.e., the physical memory + /// range assigned to LiteBox). + /// + /// This function is a no-op if there is no other world or VM sharing the physical memory. + /// + /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. + fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError>; + + /// Protect the given physical pages to ensure concurrent read or exclusive write access: + /// - Read protection: prevent others from writing to the pages. + /// - Read/write protection: prevent others from reading or writing to the pages. + /// - No protection: allow others to read and write the pages. + /// + /// This function can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// It is a no-op if there is no other world or VM sharing the physical memory. + /// + /// Returns `Ok(())` if it successfully protects the pages. If it fails, returns + /// `Err(PhysPointerError)`. + /// + /// # Safety + /// + /// Since this function is expected to use hypercalls or other privileged hardware features, + /// the caller must ensure that it is safe to perform such operations at the time of the call. + /// Also, the caller should unprotect the pages when they are no longer needed to be protected. + unsafe fn protect( + &self, + pages: Self::PhysPageAddrArray, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError>; +} + +/// Data structure representing a physical address with page alignment. +/// +/// Currently, this is an alias to `crate::mm::linux::NonZeroAddress`. This might change if +/// we selectively conduct sanity checks based on whether an address is virtual or physical +/// (e.g., whether a virtual address is canonical, whether a physical address is tagged with +/// a valid key ID, etc.). +pub type PhysPageAddr = crate::mm::linux::NonZeroAddress; + +/// Data structure to maintain the mapping information returned by `vmap()`. +/// +/// `base` is the virtual address of the mapped region which is page aligned. +/// `size` is the size of the mapped region in bytes. +#[derive(Clone)] +pub struct PhysPageMapInfo { + pub base: *mut u8, + pub size: usize, +} + +bitflags::bitflags! { + /// Physical page map permissions which is a restricted version of + /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. + /// + /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED + /// permissions are explicitly prohibited. + #[non_exhaustive] + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub struct PhysPageMapPermissions: u8 { + /// Readable + const READ = 1 << 0; + /// Writable + const WRITE = 1 << 1; + const _ = !0; + } +} + +impl From for PhysPageMapPermissions { + fn from(perms: MemoryRegionPermissions) -> Self { + let mut phys_perms = PhysPageMapPermissions::empty(); + if perms.contains(MemoryRegionPermissions::READ) { + phys_perms |= PhysPageMapPermissions::READ; + } + if perms.contains(MemoryRegionPermissions::WRITE) { + phys_perms |= PhysPageMapPermissions::WRITE; + } + phys_perms + } +} + +impl From for MemoryRegionPermissions { + fn from(perms: PhysPageMapPermissions) -> Self { + let mut mem_perms = MemoryRegionPermissions::empty(); + if perms.contains(PhysPageMapPermissions::READ) { + mem_perms |= MemoryRegionPermissions::READ; + } + if perms.contains(PhysPageMapPermissions::WRITE) { + mem_perms |= MemoryRegionPermissions::WRITE; + } + mem_perms + } +} + +/// Possible errors for physical pointer access with `VmapProvider` +#[non_exhaustive] +#[derive(Error, Debug)] +pub enum PhysPointerError { + #[error("Physical address {0:#x} is invalid to access")] + InvalidPhysicalAddress(usize), + #[error("Physical address {0:#x} is not aligned to {1} bytes")] + UnalignedPhysicalAddress(usize, usize), + #[error("Offset {0:#x} is not aligned to {1} bytes")] + UnalignedOffset(usize, usize), + #[error("Base offset {0:#x} is greater than or equal to alignment ({1} bytes)")] + InvalidBaseOffset(usize, usize), + #[error( + "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" + )] + InsufficientPhysicalPages(usize, usize), + #[error("Index {0} is out of bounds (count: {1})")] + IndexOutOfBounds(usize, usize), + #[error("Physical address {0:#x} is already mapped")] + AlreadyMapped(usize), + #[error("Physical address {0:#x} is unmapped")] + Unmapped(usize), + #[error("No mapping information available")] + NoMappingInfo, + #[error("Overflow occurred during calculation")] + Overflow, + #[error("Non-contiguous physical pages in the array")] + NonContiguousPages, + #[error("The operation is unsupported on this platform")] + UnsupportedOperation, + #[error("Unsupported permissions: {0:#x}")] + UnsupportedPermissions(u8), +} diff --git a/litebox_common_optee/Cargo.toml b/litebox_common_optee/Cargo.toml index 5b88e7c9f..901997b43 100644 --- a/litebox_common_optee/Cargo.toml +++ b/litebox_common_optee/Cargo.toml @@ -9,6 +9,7 @@ litebox = { path = "../litebox/", version = "0.1.0" } litebox_common_linux = { path = "../litebox_common_linux/", version = "0.1.0" } modular-bitfield = { version = "0.12.0", default-features = false } num_enum = { version = "0.7.3", default-features = false } +thiserror = { version = "2.0.6", default-features = false } [lints] workspace = true diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index b8a4c8f8f..a4826c6cc 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -6,7 +6,7 @@ extern crate alloc; use alloc::boxed::Box; -use litebox::platform::RawConstPointer as _; +use litebox::platform::{RawConstPointer as _, vmap::PhysPointerError}; use litebox_common_linux::{PtRegs, errno::Errno}; use modular_bitfield::prelude::*; use modular_bitfield::specifiers::{B4, B8, B48, B54}; @@ -441,35 +441,15 @@ impl UteeParams { /// Each parameter for TA invocation with copied content/buffer for safer operations. /// This is our representation of `utee_params` and not for directly /// interacting with OP-TEE TAs and clients (which expect pointers/references). -/// `out_address(es)`: VTL0 physical address(es) to write output data to. They are virtually -/// contiguous but may not be physically contiguous. #[derive(Clone)] pub enum UteeParamOwned { None, - ValueInput { - value_a: u64, - value_b: u64, - }, - ValueOutput { - out_address: Option, - }, - ValueInout { - value_a: u64, - value_b: u64, - out_address: Option, - }, - MemrefInput { - data: Box<[u8]>, - }, - MemrefOutput { - buffer_size: usize, - out_addresses: Option>, - }, - MemrefInout { - data: Box<[u8]>, - buffer_size: usize, - out_addresses: Option>, - }, + ValueInput { value_a: u64, value_b: u64 }, + ValueOutput {}, + ValueInout { value_a: u64, value_b: u64 }, + MemrefInput { data: Box<[u8]> }, + MemrefOutput { buffer_size: usize }, + MemrefInout { data: Box<[u8]>, buffer_size: usize }, } impl UteeParamOwned { @@ -522,6 +502,31 @@ pub struct TeeUuid { pub time_hi_and_version: u16, pub clock_seq_and_node: [u8; 8], } +impl TeeUuid { + #[allow(clippy::missing_panics_doc)] + pub fn from_bytes(data: [u8; 16]) -> Self { + let time_low = u32::from_le_bytes(data[0..4].try_into().unwrap()); + let time_mid = u16::from_le_bytes(data[4..6].try_into().unwrap()); + let time_hi_and_version = u16::from_le_bytes(data[6..8].try_into().unwrap()); + let mut clock_seq_and_node = [0u8; 8]; + clock_seq_and_node.copy_from_slice(&data[8..16]); + Self { + time_low, + time_mid, + time_hi_and_version, + clock_seq_and_node, + } + } + + pub fn from_u32_array(data: [u32; 4]) -> Self { + let mut buffer = [0u8; 16]; + buffer[0..4].copy_from_slice(&data[0].to_le_bytes()); + buffer[4..8].copy_from_slice(&data[1].to_le_bytes()); + buffer[8..12].copy_from_slice(&data[2].to_le_bytes()); + buffer[12..16].copy_from_slice(&data[3].to_le_bytes()); + Self::from_bytes(buffer) + } +} /// `TEE_Identity` from `optee_os/lib/libutee/include/tee_api_types.h`. #[derive(Clone, Copy, PartialEq)] @@ -1108,7 +1113,18 @@ pub enum OpteeMessageCommand { UnregisterShm = OPTEE_MSG_CMD_UNREGISTER_SHM, DoBottomHalf = OPTEE_MSG_CMD_DO_BOTTOM_HALF, StopAsyncNotif = OPTEE_MSG_CMD_STOP_ASYNC_NOTIF, - Unknown = 0xffff_ffff, +} + +impl TryFrom for UteeEntryFunc { + type Error = OpteeSmcReturn; + fn try_from(cmd: OpteeMessageCommand) -> Result { + match cmd { + OpteeMessageCommand::OpenSession => Ok(UteeEntryFunc::OpenSession), + OpteeMessageCommand::CloseSession => Ok(UteeEntryFunc::CloseSession), + OpteeMessageCommand::InvokeCommand => Ok(UteeEntryFunc::InvokeCommand), + _ => Err(OpteeSmcReturn::EBadCmd), + } + } } /// Temporary reference memory parameter @@ -1116,11 +1132,11 @@ pub enum OpteeMessageCommand { #[repr(C)] pub struct OpteeMsgParamTmem { /// Physical address of the buffer - buf_ptr: u64, + pub buf_ptr: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Temporary shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// Registered memory reference parameter @@ -1128,11 +1144,11 @@ pub struct OpteeMsgParamTmem { #[repr(C)] pub struct OpteeMsgParamRmem { /// Offset into shared memory reference - offs: u64, + pub offs: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// FF-A memory reference parameter @@ -1140,15 +1156,15 @@ pub struct OpteeMsgParamRmem { #[repr(C)] pub struct OpteeMsgParamFmem { /// Lower bits of offset into shared memory reference - offs_low: u32, + pub offs_low: u32, /// Higher bits of offset into shared memory reference - offs_high: u32, + pub offs_high: u32, /// Internal offset into the first page of shared memory reference - internal_offs: u16, + pub internal_offs: u16, /// Size of the buffer - size: u64, + pub size: u64, /// Global identifier of the shared memory - global_id: u64, + pub global_id: u64, } /// Opaque value parameter @@ -1156,9 +1172,9 @@ pub struct OpteeMsgParamFmem { #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamValue { - a: u64, - b: u64, - c: u64, + pub a: u64, + pub b: u64, + pub c: u64, } /// Parameter used together with `OpteeMsgArg` @@ -1224,6 +1240,54 @@ impl OpteeMsgParam { pub fn attr_type(&self) -> OpteeMsgAttrType { OpteeMsgAttrType::try_from(self.attr.typ()).unwrap_or(OpteeMsgAttrType::None) } + pub fn get_param_tmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_INOUT + ) { + Some(unsafe { self.u.tmem }) + } else { + None + } + } + pub fn get_param_rmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.rmem }) + } else { + None + } + } + pub fn get_param_fmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.fmem }) + } else { + None + } + } + pub fn get_param_value(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_INOUT + ) { + Some(unsafe { self.u.value }) + } else { + None + } + } } /// `optee_msg_arg` from `optee_os/core/include/optee_msg.h` @@ -1233,30 +1297,94 @@ impl OpteeMsgParam { #[repr(C)] pub struct OpteeMsgArg { /// OP-TEE message command. This is a superset of `UteeEntryFunc`. - cmd: OpteeMessageCommand, + pub cmd: OpteeMessageCommand, /// TA function ID which is used if `cmd == InvokeCommand`. Note that the meaning of `cmd` and `func` /// is swapped compared to TAs. - func: u32, + pub func: u32, /// Session ID. This is "IN" parameter most of the time except for `cmd == OpenSession` where /// the secure world generates and returns a session ID. - session: u32, + pub session: u32, /// Cancellation ID. This is a unique value to identify this request. - cancel_id: u32, + pub cancel_id: u32, pad: u32, /// Return value from the secure world - ret: u32, + pub ret: u32, /// Origin of the return value - ret_origin: TeeOrigin, + pub ret_origin: TeeOrigin, /// Number of parameters contained in `params` - num_params: u32, + pub num_params: u32, /// Parameters to be passed to the secure world. If `cmd == OpenSession`, the first two params contain /// a TA UUID and they are not delivered to the TA. /// Note that, originally, the length of this array is variable. We fix it to `TEE_NUM_PARAMS + 2` to /// simplify the implementation (our OP-TEE Shim supports up to four parameters as well). - params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], + pub params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], } -/// OP-TEE SMC call arguments. +impl OpteeMsgArg { + /// Validate the message argument structure. + pub fn validate(&self) -> Result<(), OpteeSmcReturn> { + let _ = + OpteeMessageCommand::try_from(self.cmd as u32).map_err(|_| OpteeSmcReturn::EBadCmd)?; + if self.cmd == OpteeMessageCommand::OpenSession && self.num_params < 2 { + return Err(OpteeSmcReturn::EBadCmd); + } + if self.num_params as usize > self.params.len() { + Err(OpteeSmcReturn::EBadCmd) + } else { + Ok(()) + } + } + pub fn get_param_tmem(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturn::ENotAvail) + } else { + Ok(self.params[index] + .get_param_tmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) + } + } + pub fn get_param_rmem(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturn::ENotAvail) + } else { + Ok(self.params[index] + .get_param_rmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) + } + } + pub fn get_param_fmem(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturn::ENotAvail) + } else { + Ok(self.params[index] + .get_param_fmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) + } + } + pub fn get_param_value(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturn::ENotAvail) + } else { + Ok(self.params[index] + .get_param_value() + .ok_or(OpteeSmcReturn::EBadCmd)?) + } + } + pub fn set_param_value( + &mut self, + index: usize, + value: OpteeMsgParamValue, + ) -> Result<(), OpteeSmcReturn> { + if index >= self.num_params as usize { + Err(OpteeSmcReturn::ENotAvail) + } else { + self.params[index].u.value = value; + Ok(()) + } + } +} + +/// A memory page to exchange OP-TEE SMC call arguments. /// OP-TEE assumes that the underlying architecture is Arm with TrustZone and /// thus it uses Secure Monitor Call (SMC) calling convention (SMCCC). /// Since we currently rely on the existing OP-TEE driver which assumes SMCCC, we translate it into @@ -1264,9 +1392,28 @@ pub struct OpteeMsgArg { /// Specifically, OP-TEE SMC call uses up to nine CPU registers to pass arguments. /// However, since VTL call only supports up to four parameters, we allocate a VTL0 memory page and /// exchange all arguments through that memory page. +/// TODO: Since this is LVBS-specific structure to facilitate the translation between VTL call convention, +/// we might want to move it to the `litebox_platform_lvbs` crate later. #[repr(align(4096))] #[derive(Clone, Copy)] #[repr(C)] +pub struct OpteeSmcArgsPage { + pub args: [usize; Self::NUM_OPTEE_SMC_ARGS], +} +impl OpteeSmcArgsPage { + const NUM_OPTEE_SMC_ARGS: usize = 9; +} + +impl From<&OpteeSmcArgsPage> for OpteeSmcArgs { + fn from(page: &OpteeSmcArgsPage) -> Self { + let mut smc = OpteeSmcArgs::default(); + smc.args.copy_from_slice(&page.args); + smc + } +} + +/// OP-TEE SMC call arguments. +#[derive(Clone, Copy, Default)] pub struct OpteeSmcArgs { args: [usize; Self::NUM_OPTEE_SMC_ARGS], } @@ -1274,35 +1421,29 @@ pub struct OpteeSmcArgs { impl OpteeSmcArgs { const NUM_OPTEE_SMC_ARGS: usize = 9; - pub fn arg_index(&self, index: usize) -> Option { - if index < Self::NUM_OPTEE_SMC_ARGS { - Some(self.args[index]) - } else { - None - } - } - /// Get the function ID of an OP-TEE SMC call - pub fn func_id(&self) -> Result { - OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK).map_err(|_| Errno::EINVAL) + pub fn func_id(&self) -> Result { + OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK) + .map_err(|_| OpteeSmcReturn::EBadCmd) } /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. - pub fn optee_msg_arg_phys_addr(&self) -> Result { + pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. if self.args[1] & 0xffff_ffff_0000_0000 == 0 && self.args[2] & 0xffff_ffff_0000_0000 == 0 { let addr = (self.args[1] << 32) | self.args[2]; - Ok(addr) + Ok(addr as u64) } else { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::EBadAddr) } } } /// `OPTEE_SMC_FUNCID_*` from `core/arch/arm/include/sm/optee_smc.h` /// TODO: Add stuffs based on the OP-TEE driver that LVBS is using. +const OPTEE_SMC_FUNCID_GET_OS_UUID: usize = 0x0; const OPTEE_SMC_FUNCID_GET_OS_REVISION: usize = 0x1; const OPTEE_SMC_FUNCID_CALL_WITH_ARG: usize = 0x4; const OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES: usize = 0x9; @@ -1316,6 +1457,7 @@ const OPTEE_SMC_FUNCID_CALLS_REVISION: usize = 0xff03; #[derive(PartialEq, TryFromPrimitive)] #[repr(usize)] pub enum OpteeSmcFunction { + GetOsUuid = OPTEE_SMC_FUNCID_GET_OS_UUID, GetOsRevision = OPTEE_SMC_FUNCID_GET_OS_REVISION, CallWithArg = OPTEE_SMC_FUNCID_CALL_WITH_ARG, ExchangeCapabilities = OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES, @@ -1332,64 +1474,94 @@ impl OpteeSmcFunction { /// OP-TEE SMC call result. /// OP-TEE SMC call uses CPU registers to pass input and output values. -/// Thus, this structure is technically equivalent to `OpteeSmcArgs`, but we separate them for clarity. -#[repr(align(4096))] -#[derive(Clone, Copy)] -#[repr(C)] -pub struct OpteeSmcResult { - args: [usize; Self::NUM_OPTEE_SMC_ARGS], -} - -impl OpteeSmcResult { - const NUM_OPTEE_SMC_ARGS: usize = 9; - - pub fn return_status(&mut self, status: OpteeSmcReturn) { - self.args[0] = status as usize; - } - - pub fn exchange_capabilities( - &mut self, +/// Thus, we convert this into `OpteeSmcArgs` later. +#[non_exhaustive] +pub enum OpteeSmcResult<'a> { + Generic { + status: OpteeSmcReturn, + }, + ExchangeCapabilities { status: OpteeSmcReturn, capabilities: OpteeSecureWorldCapabilities, max_notif_value: usize, data: usize, - ) { - self.return_status(status); - self.args[1] = capabilities.bits(); - self.args[2] = max_notif_value; - self.args[3] = data; - } - - /// # Panics - /// panics if any element of `data` cannot be converted to `usize`. - pub fn uuid(&mut self, data: [u32; 4]) { - // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - self.args[0] = usize::try_from(data[0]).unwrap(); - self.args[1] = usize::try_from(data[1]).unwrap(); - self.args[2] = usize::try_from(data[2]).unwrap(); - self.args[3] = usize::try_from(data[3]).unwrap(); - } - - pub fn revision(&mut self, major: usize, minor: usize) { - self.args[0] = major; - self.args[1] = minor; - } - - pub fn os_revision(&mut self, major: usize, minor: usize, build_id: usize) { - self.args[0] = major; - self.args[1] = minor; - self.args[2] = build_id; - } - - pub fn disable_shm_cache( - &mut self, + }, + Uuid { + data: &'a [u32; 4], + }, + Revision { + major: usize, + minor: usize, + }, + OsRevision { + major: usize, + minor: usize, + build_id: usize, + }, + DisableShmCache { status: OpteeSmcReturn, shm_upper32: usize, shm_lower32: usize, - ) { - self.args[0] = status as usize; - self.args[1] = shm_upper32; - self.args[2] = shm_lower32; + }, +} + +impl From> for OpteeSmcArgs { + fn from(value: OpteeSmcResult) -> Self { + match value { + OpteeSmcResult::Generic { status } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc + } + OpteeSmcResult::ExchangeCapabilities { + status, + capabilities, + max_notif_value, + data, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = capabilities.bits(); + smc.args[2] = max_notif_value; + smc.args[3] = data; + smc + } + OpteeSmcResult::Uuid { data } => { + let mut smc = OpteeSmcArgs::default(); + for (i, arg) in smc.args.iter_mut().enumerate().take(4) { + *arg = data[i] as usize; + } + smc + } + OpteeSmcResult::Revision { major, minor } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc + } + OpteeSmcResult::OsRevision { + major, + minor, + build_id, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc.args[2] = build_id; + smc + } + OpteeSmcResult::DisableShmCache { + status, + shm_upper32, + shm_lower32, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = shm_upper32; + smc.args[2] = shm_lower32; + smc + } + } } } @@ -1430,3 +1602,28 @@ pub enum OpteeSmcReturn { ENotAvail = OPTEE_SMC_RETURN_ENOTAVAIL, UnknownFunction = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION, } + +impl From for OpteeSmcReturn { + fn from(err: PhysPointerError) -> Self { + match err { + PhysPointerError::AlreadyMapped(_) => OpteeSmcReturn::EBusy, + PhysPointerError::NoMappingInfo => OpteeSmcReturn::ENomem, + _ => OpteeSmcReturn::EBadAddr, + } + } +} + +impl From for litebox_common_linux::errno::Errno { + fn from(ret: OpteeSmcReturn) -> Self { + match ret { + OpteeSmcReturn::EBusy | OpteeSmcReturn::EThreadLimit => { + litebox_common_linux::errno::Errno::EBUSY + } + OpteeSmcReturn::EResume => litebox_common_linux::errno::Errno::EAGAIN, + OpteeSmcReturn::EBadAddr => litebox_common_linux::errno::Errno::EFAULT, + OpteeSmcReturn::ENomem => litebox_common_linux::errno::Errno::ENOMEM, + OpteeSmcReturn::ENotAvail => litebox_common_linux::errno::Errno::ENOENT, + _ => litebox_common_linux::errno::Errno::EINVAL, + } + } +} diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 20ee472de..0abb52655 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -12,6 +12,9 @@ use std::time::Duration; use litebox::fs::OFlags; use litebox::platform::UnblockedOrTimedOut; use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions}; +use litebox::platform::vmap::{ + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; @@ -2143,6 +2146,41 @@ impl litebox::platform::CrngProvider for LinuxUserland { } } +/// Dummy `VmapProvider`. +/// +/// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). +/// We might need to emulate these functions' behaviors using virtual addresses for development or +/// testing, or use a kernel module to provide this functionality (if needed). +impl VmapProvider for LinuxUserland { + type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; + + type PhysPageMapInfo = PhysPageMapInfo; + + unsafe fn vmap( + &self, + _pages: Self::PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result { + Err(PhysPointerError::UnsupportedOperation) + } + + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + + fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + + unsafe fn protect( + &self, + _pages: Self::PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } +} + #[cfg(test)] mod tests { use core::sync::atomic::AtomicU32; diff --git a/litebox_platform_lvbs/src/host/per_cpu_variables.rs b/litebox_platform_lvbs/src/host/per_cpu_variables.rs index 1c2fa28c5..bc695639f 100644 --- a/litebox_platform_lvbs/src/host/per_cpu_variables.rs +++ b/litebox_platform_lvbs/src/host/per_cpu_variables.rs @@ -110,7 +110,11 @@ impl PerCpuVariables { } /// Save the extended states of each core (VTL0 or VTL1). - pub(crate) fn save_extended_states(&self, vtl: u8) { + /// + /// # Panics + /// + /// Panics if XSAVE areas are not allocated or if an invalid VTL value is provided. + pub fn save_extended_states(&self, vtl: u8) { if self.vtl0_xsave_area_addr.is_null() || self.vtl1_xsave_area_addr.is_null() { panic!("XSAVE areas are not allocated"); } else { @@ -132,7 +136,11 @@ impl PerCpuVariables { } /// Restore the extended states of each core (VTL0 or VTL1). - pub(crate) fn restore_extended_states(&self, vtl: u8) { + /// + /// # Panics + /// + /// Panics if XSAVE areas are not allocated or if an invalid VTL value is provided. + pub fn restore_extended_states(&self, vtl: u8) { if self.vtl0_xsave_area_addr.is_null() || self.vtl1_xsave_area_addr.is_null() { panic!("XSAVE areas are not allocated"); } else { diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 7498447da..ac326e994 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -14,6 +14,9 @@ use core::{ sync::atomic::{AtomicU32, AtomicU64}, }; use litebox::platform::page_mgmt::DeallocationError; +use litebox::platform::vmap::{ + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; use litebox::platform::{ DebugLogProvider, IPInterfaceProvider, ImmediatelyWokenUp, PageManagementProvider, Punchthrough, RawMutexProvider, StdioProvider, TimeProvider, UnblockedOrTimedOut, @@ -755,6 +758,57 @@ impl StdioProvider for LinuxKernel { } } +/// Checks whether the given physical addresses are contiguous with respect to ALIGN. +/// +/// Note: This is a temporary check to let `VmapProvider` work with this platform +/// which does not yet support virtually contiguous mapping of non-contiguous physical pages +/// (for now, it maps physical pages with a fixed offset). +fn check_contiguity( + addrs: &[PhysPageAddr], +) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0].as_usize(); + let second = window[1].as_usize(); + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) +} + +impl VmapProvider for LinuxKernel { + type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; + + type PhysPageMapInfo = PhysPageMapInfo; + + unsafe fn vmap( + &self, + pages: Self::PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result { + // TODO: Remove this check once this platform supports virtually contiguous + // non-contiguous physical page mapping. + check_contiguity(&pages)?; + todo!("use map_vtl0_phys_range()") + } + + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + todo!("use unmap_vtl0_pages()") + } + + fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { + todo!("use vtl1_phys_frame_range to validate") + } + + unsafe fn protect( + &self, + _pages: Self::PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + todo!("use hypercall to protect/unprotect physical pages") + } +} + // NOTE: The below code is a naive workaround to let LVBS code to access the platform. // Rather than doing this, we should implement LVBS interface/provider for the platform. diff --git a/litebox_platform_lvbs/src/mshv/mod.rs b/litebox_platform_lvbs/src/mshv/mod.rs index b95c15b4d..f89e85bc3 100644 --- a/litebox_platform_lvbs/src/mshv/mod.rs +++ b/litebox_platform_lvbs/src/mshv/mod.rs @@ -7,7 +7,6 @@ mod hvcall_vp; mod mem_integrity; pub(crate) mod vsm; mod vsm_intercept; -mod vsm_optee_smc; pub mod vtl1_mem_layout; pub mod vtl_switch; diff --git a/litebox_platform_lvbs/src/mshv/vsm_optee_smc.rs b/litebox_platform_lvbs/src/mshv/vsm_optee_smc.rs deleted file mode 100644 index 0ac94059b..000000000 --- a/litebox_platform_lvbs/src/mshv/vsm_optee_smc.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! VSM OP-TEE SMC functions - -use crate::{ - debug_serial_println, host::per_cpu_variables::with_per_cpu_variables_mut, mshv::HV_VTL_SECURE, -}; -use litebox_common_linux::errno::Errno; -use litebox_common_optee::OpteeSmcArgs; -use x86_64::PhysAddr; - -pub(crate) fn optee_smc_dispatch(optee_smc_args_pfn: u64) -> i64 { - if let Ok(optee_smc_args_page_addr) = PhysAddr::try_new(optee_smc_args_pfn << 12) - && let Some(mut _optee_smc_args) = unsafe { - crate::platform_low().copy_from_vtl0_phys::(optee_smc_args_page_addr) - } - { - // Since we do not know whether an OP-TEE TA uses extended states, we conservatively - // save and restore extended states before and after running any OP-TEE TA. - with_per_cpu_variables_mut(|per_cpu_variables| { - per_cpu_variables.save_extended_states(HV_VTL_SECURE); - }); - // TODO: Implement OP-TEE SMC for TA command invocation here. - debug_serial_println!("VSM function call for OP-TEE message"); - with_per_cpu_variables_mut(|per_cpu_variables| { - per_cpu_variables.restore_extended_states(HV_VTL_SECURE); - }); - 0 - } else { - Errno::EINVAL.as_neg().into() - } -} diff --git a/litebox_platform_lvbs/src/mshv/vtl_switch.rs b/litebox_platform_lvbs/src/mshv/vtl_switch.rs index 2855160b4..dd6aad7d6 100644 --- a/litebox_platform_lvbs/src/mshv/vtl_switch.rs +++ b/litebox_platform_lvbs/src/mshv/vtl_switch.rs @@ -9,7 +9,7 @@ use crate::{ HV_REGISTER_VSM_CODEPAGE_OFFSETS, HV_VTL_NORMAL, HV_VTL_SECURE, HvRegisterVsmCodePageOffsets, NUM_VTLCALL_PARAMS, VTL_ENTRY_REASON_INTERRUPT, VTL_ENTRY_REASON_LOWER_VTL_CALL, VsmFunction, hvcall_vp::hvcall_get_vp_registers, - vsm::vsm_dispatch, vsm_intercept::vsm_handle_intercept, vsm_optee_smc, + vsm::vsm_dispatch, vsm_intercept::vsm_handle_intercept, }, }; use core::arch::{asm, naked_asm}; @@ -313,7 +313,10 @@ fn vtlcall_dispatch(params: &[u64; NUM_VTLCALL_PARAMS]) -> i64 { .unwrap_or(VsmFunction::Unknown); match func_id { VsmFunction::Unknown => Errno::EINVAL.as_neg().into(), - VsmFunction::OpteeMessage => vsm_optee_smc::optee_smc_dispatch(params[1]), + VsmFunction::OpteeMessage => { + // TODO: invoke the OP-TEE upcall once it is merged. + 0 + } _ => vsm_dispatch(func_id, ¶ms[1..]), } } diff --git a/litebox_runner_lvbs/Cargo.toml b/litebox_runner_lvbs/Cargo.toml index 30a916484..e35ac8fe4 100644 --- a/litebox_runner_lvbs/Cargo.toml +++ b/litebox_runner_lvbs/Cargo.toml @@ -7,9 +7,10 @@ edition = "2024" litebox = { version = "0.1.0", path = "../litebox" } litebox_platform_lvbs = { version = "0.1.0", path = "../litebox_platform_lvbs", default-features = false, features = ["interrupt"] } litebox_platform_multiplex = { version = "0.1.0", path = "../litebox_platform_multiplex", default-features = false, features = ["platform_lvbs"] } +litebox_common_optee = { path = "../litebox_common_optee/", version = "0.1.0" } +litebox_common_linux = { path = "../litebox_common_linux/", version = "0.1.0" } litebox_shim_optee = { path = "../litebox_shim_optee/", version = "0.1.0" } - [target.'cfg(target_arch = "x86_64")'.dependencies] x86_64 = { version = "0.15.2", default-features = false, features = ["instructions"] } diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index ca136b3f0..83edc1e4f 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -1,22 +1,38 @@ #![no_std] use core::panic::PanicInfo; +use litebox::{mm::linux::PAGE_SIZE, utils::TruncateExt}; +use litebox_common_optee::{ + LdelfArg, OpteeMessageCommand, OpteeMsgArg, OpteeSmcArgs, OpteeSmcReturn, TeeIdentity, + TeeLogin, TeeUuid, UteeEntryFunc, UteeParamOwned, UteeParams, +}; use litebox_platform_lvbs::{ arch::{gdt, get_core_id, instrs::hlt_loop, interrupts}, debug_serial_println, - host::{bootparam::get_vtl1_memory_info, per_cpu_variables::allocate_per_cpu_variables}, + host::{ + bootparam::get_vtl1_memory_info, + per_cpu_variables::{allocate_per_cpu_variables, with_per_cpu_variables_mut}, + }, mm::MemoryProvider, mshv::{ - hvcall, + HV_VTL_SECURE, hvcall, vtl_switch::vtl_switch_loop_entry, vtl1_mem_layout::{ - PAGE_SIZE, VTL1_INIT_HEAP_SIZE, VTL1_INIT_HEAP_START_PAGE, VTL1_PML4E_PAGE, + VTL1_INIT_HEAP_SIZE, VTL1_INIT_HEAP_START_PAGE, VTL1_PML4E_PAGE, VTL1_PRE_POPULATED_MEMORY_SIZE, get_heap_start_address, }, }, serial_println, }; use litebox_platform_multiplex::Platform; +use litebox_shim_optee::{NormalWorldConstPtr, NormalWorldMutPtr}; +use litebox_shim_optee::{ + loader::ElfLoadInfo, + msg_handler::{ + decode_ta_request, handle_optee_msg_arg, handle_optee_smc_args, + prepare_for_return_to_normal_world, + }, +}; /// # Panics /// @@ -61,6 +77,7 @@ pub fn init() -> Option<&'static Platform> { let pml4_table_addr = vtl1_start + u64::try_from(PAGE_SIZE * VTL1_PML4E_PAGE).unwrap(); let platform = Platform::new(pml4_table_addr, vtl1_start, vtl1_end); ret = Some(platform); + litebox_platform_multiplex::set_platform(platform); // Add the rest of the VTL1 memory to the global allocator once they are mapped to the kernel page table. let mem_fill_start = mem_fill_start + mem_fill_size; @@ -99,6 +116,148 @@ pub fn run(platform: Option<&'static Platform>) -> ! { vtl_switch_loop_entry(platform) } +/// A tentative entry point function for OP-TEE message handler upcall/callback. +/// +/// This entry point function is intended to be called from the LVBS platform which is unware of +/// OP-TEE semantics. Thus, we align this function's signature with other VSM/HVCI functions (i.e., +/// up to three u64 arguments and returning Result). +#[expect(dead_code)] +fn optee_smc_handler_upcall_entry( + smc_args_addr: u64, +) -> Result { + let smc_args_addr: usize = smc_args_addr.truncate(); + match optee_msg_handler(smc_args_addr) { + Ok(smc_arg) => { + let mut smc_args_ptr = + NormalWorldMutPtr::::with_usize(smc_args_addr) + .map_err(|_| litebox_common_linux::errno::Errno::EINVAL)?; + unsafe { smc_args_ptr.write_at_offset(0, smc_arg) } + .map_err(|_| litebox_common_linux::errno::Errno::EFAULT)?; + Ok(0) + } + Err(smc_ret) => Err(smc_ret.into()), + } +} + +fn optee_msg_handler(smc_args_addr: usize) -> Result { + let mut smc_args_ptr = + NormalWorldConstPtr::::with_usize(smc_args_addr)?; + let mut smc_args = unsafe { smc_args_ptr.read_at_offset(0) }?; + let msg_arg_phys_addr = smc_args.optee_msg_arg_phys_addr()?; + let smc_handled = handle_optee_smc_args(&mut smc_args)?; + if let Some(mut msg_arg) = smc_handled.msg_to_handle { + match msg_arg.cmd { + OpteeMessageCommand::OpenSession + | OpteeMessageCommand::InvokeCommand + | OpteeMessageCommand::CloseSession => { + let Ok(ta_req_info) = decode_ta_request(&msg_arg) else { + return Err(OpteeSmcReturn::EBadCmd); + }; + + let params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; + if ta_req_info.entry_func == UteeEntryFunc::OpenSession { + let _litebox = litebox_shim_optee::init_session( + &TeeUuid::default(), + &TeeIdentity { + login: TeeLogin::User, + uuid: TeeUuid::default(), + }, + Some(TA_BINARY), // TODO: replace this with UUID-based TA loading + ); + + let ldelf_info = litebox_shim_optee::loader::load_elf_buffer(LDELF_BINARY) + .expect("Failed to load ldelf"); + let Some(ldelf_arg_address) = ldelf_info.ldelf_arg_address else { + panic!("ldelf_arg_address not found"); + }; + let ldelf_arg = LdelfArg::new(); // TODO: set TA UUID + + let stack = litebox_shim_optee::loader::init_ldelf_stack( + Some(ldelf_info.stack_base), + &ldelf_arg, + ) + .expect("Failed to initialize stack for ldelf"); + let mut _pt_regs = + litebox_shim_optee::loader::prepare_ldelf_registers(&ldelf_info, &stack); + // TODO: run_thread + + // Note: `ldelf` allocates stack (returned via `stack_ptr`) but we don't use it here. + // Need to revisit this to see whether the stack is large enough for our use cases (e.g., + // copy owned data through stack to minimize TOCTTOU threats). + let ldelf_arg_out = unsafe { &*(ldelf_arg_address as *const LdelfArg) }; + let entry_func: usize = ldelf_arg_out.entry_func.truncate(); + + litebox_shim_optee::set_ta_loaded(); + + litebox_shim_optee::loader::allocate_guest_tls(None) + .expect("Failed to allocate TLS"); + + // TODO: maintain this ta load info in a global data structure + let ta_info = ElfLoadInfo { + entry_point: entry_func, + stack_base: ldelf_info.stack_base, + params_address: ldelf_info.params_address, + ldelf_arg_address: None, + }; + + // In OP-TEE TA, each command invocation is like (re)starting the TA with a new stack with + // loaded binary and heap. In that sense, we can create (and destroy) a stack + // for each command freely. + let stack = litebox_shim_optee::loader::init_stack( + Some(ta_info.stack_base), + params.as_slice(), + ) + .expect("Failed to initialize stack with parameters"); + let mut _pt_regs = litebox_shim_optee::loader::prepare_registers( + &ta_info, + &stack, + litebox_shim_optee::get_session_id(), + ta_req_info.entry_func as u32, + None, + ); + + // Since we do not know whether an OP-TEE TA uses extended states, we conservatively + // save and restore extended states before and after invoking the upcall handler. + with_per_cpu_variables_mut(|per_cpu_variables| { + per_cpu_variables.save_extended_states(HV_VTL_SECURE); + }); + + // TODO: run_thread + + with_per_cpu_variables_mut(|per_cpu_variables| { + per_cpu_variables.restore_extended_states(HV_VTL_SECURE); + }); + + // SAFETY + // We assume that `ta_info.params_address` is a valid pointer to `UteeParams`. + let ta_params = unsafe { *(ta_info.params_address as *const UteeParams) }; + + prepare_for_return_to_normal_world(&ta_params, &ta_req_info, &mut msg_arg)?; + + // Overwrite `msg_arg` back to normal world memory to return value outputs (`ValueOutput` or `VapueInout`). + let mut ptr = NormalWorldMutPtr::::with_usize( + msg_arg_phys_addr.truncate(), + )?; + unsafe { ptr.write_at_offset(0, msg_arg) }?; + } else { + // retrieve `ta_info` from global data structure + todo!() + } + Ok(smc_handled.result.into()) + } + _ => { + handle_optee_msg_arg(&msg_arg)?; + Ok(smc_handled.result.into()) + } + } + } else { + Ok(smc_handled.result.into()) + } +} + +const TA_BINARY: &[u8] = &[0u8; 0]; +const LDELF_BINARY: &[u8] = &[0u8; 0]; + #[panic_handler] fn panic(info: &PanicInfo) -> ! { serial_println!("{}", info); diff --git a/litebox_runner_optee_on_linux_userland/src/tests.rs b/litebox_runner_optee_on_linux_userland/src/tests.rs index 8438610d2..404251696 100644 --- a/litebox_runner_optee_on_linux_userland/src/tests.rs +++ b/litebox_runner_optee_on_linux_userland/src/tests.rs @@ -259,20 +259,16 @@ impl TaCommandParamsBase64 { value_a: *value_a, value_b: *value_b, }, - TaCommandParamsBase64::ValueOutput {} => { - UteeParamOwned::ValueOutput { out_address: None } - } + TaCommandParamsBase64::ValueOutput {} => UteeParamOwned::ValueOutput {}, TaCommandParamsBase64::ValueInout { value_a, value_b } => UteeParamOwned::ValueInout { value_a: *value_a, value_b: *value_b, - out_address: None, }, TaCommandParamsBase64::MemrefInput { data_base64 } => UteeParamOwned::MemrefInput { data: Self::decode_base64(data_base64).into_boxed_slice(), }, TaCommandParamsBase64::MemrefOutput { buffer_size } => UteeParamOwned::MemrefOutput { buffer_size: usize::try_from(*buffer_size).unwrap(), - out_addresses: None, }, TaCommandParamsBase64::MemrefInout { data_base64, @@ -287,7 +283,6 @@ impl TaCommandParamsBase64 { UteeParamOwned::MemrefInout { data: decoded_data.into_boxed_slice(), buffer_size, - out_addresses: None, } } } diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 760ad5170..3a721a1c6 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -31,6 +31,9 @@ use litebox_platform_multiplex::Platform; pub mod loader; pub(crate) mod syscalls; +pub mod msg_handler; +pub mod ptr; + const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; /// Initialize the shim to run a task with the given parameters. @@ -841,3 +844,6 @@ pub fn session_id_pool<'a>() -> &'a SessionIdPool { static SESSION_ID_POOL: OnceBox = OnceBox::new(); SESSION_ID_POOL.get_or_init(|| alloc::boxed::Box::new(SessionIdPool::new())) } + +pub type NormalWorldConstPtr = crate::ptr::PhysConstPtr; +pub type NormalWorldMutPtr = crate::ptr::PhysMutPtr; diff --git a/litebox_shim_optee/src/loader/ta_stack.rs b/litebox_shim_optee/src/loader/ta_stack.rs index d77743eb8..299919510 100644 --- a/litebox_shim_optee/src/loader/ta_stack.rs +++ b/litebox_shim_optee/src/loader/ta_stack.rs @@ -213,30 +213,19 @@ impl TaStack { UteeParamOwned::ValueInput { value_a, value_b } => { self.push_param_values(TeeParamType::ValueInput, Some((*value_a, *value_b)))?; } - UteeParamOwned::ValueOutput { out_address: _ } => { + UteeParamOwned::ValueOutput {} => { self.push_param_values(TeeParamType::ValueOutput, None)?; } - UteeParamOwned::ValueInout { - value_a, - value_b, - out_address: _, - } => { + UteeParamOwned::ValueInout { value_a, value_b } => { self.push_param_values(TeeParamType::ValueInout, Some((*value_a, *value_b)))?; } UteeParamOwned::MemrefInput { data } => { self.push_param_memref(TeeParamType::MemrefInput, Some(data), data.len())?; } - UteeParamOwned::MemrefInout { - data, - buffer_size, - out_addresses: _, - } => { + UteeParamOwned::MemrefInout { data, buffer_size } => { self.push_param_memref(TeeParamType::MemrefInout, Some(data), *buffer_size)?; } - UteeParamOwned::MemrefOutput { - buffer_size, - out_addresses: _, - } => { + UteeParamOwned::MemrefOutput { buffer_size } => { self.push_param_memref(TeeParamType::MemrefOutput, None, *buffer_size)?; } UteeParamOwned::None => self.push_param_none()?, diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs new file mode 100644 index 000000000..2b6eaeed5 --- /dev/null +++ b/litebox_shim_optee/src/msg_handler.rs @@ -0,0 +1,635 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//! OP-TEE's message passing is a bit complex because it involves with multiple actors +//! (normal world: client app and driver; secure world: OP-TEE OS and TAs), +//! consists multiple layers, and relies on shared memory references (i.e., no serialization). +//! +//! Since the normal world is out of LiteBox's scope, the OP-TEE shim starts with handling +//! an OP-TEE SMC call from the normal-world OP-TEE driver which consists of +//! up to nine register values. By checking the SMC function ID, the shim determines whether +//! it is for passing an OP-TEE message or a pure SMC function call (e.g., get OP-TEE OS +//! version). If it is for passing an OP-TEE message/command, the shim accesses a normal world +//! physical address containing `OpteeMsgArg` structure (the address is contained in +//! the SMC call arguments). This `OpteeMsgArg` structure may contain references to normal +//! world physical addresses to exchange a large amount of data. Also, like the OP-TEE +//! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register +//! shared memory). +use crate::{NormalWorldConstPtr, NormalWorldMutPtr}; +use alloc::{boxed::Box, vec::Vec}; +use hashbrown::HashMap; +use litebox::mm::linux::PAGE_SIZE; +use litebox::platform::RawConstPointer; +use litebox::platform::vmap::{PhysPageAddr, PhysPointerError}; +use litebox::utils::TruncateExt; +use litebox_common_optee::{ + OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, + OpteeMsgParamValue, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, + OpteeSmcResult, OpteeSmcReturn, TeeParamType, TeeUuid, UteeEntryFunc, UteeParamOwned, + UteeParams, +}; +use once_cell::race::OnceBox; + +// OP-TEE version and build info (2.0) +// TODO: Consider replacing it with our own version info +const OPTEE_MSG_REVISION_MAJOR: usize = 2; +const OPTEE_MSG_REVISION_MINOR: usize = 0; +const OPTEE_MSG_BUILD_ID: usize = 0; + +// This UID is from OP-TEE OS +// TODO: Consider replacing it with our own UID +const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; +const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; +const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; + +// This is the UUID of OP-TEE Trusted OS +// TODO: Consider replacing it with our own UUID +const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x4861_78e0; +const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e_0002; +const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5_c51b; + +// We do not support notification for now +const MAX_NOTIF_VALUE: usize = 0; +const NUM_RPC_PARMS: usize = 4; + +#[inline] +fn page_align_down(address: u64) -> u64 { + address & !(PAGE_SIZE as u64 - 1) +} + +#[inline] +fn page_align_up(len: u64) -> u64 { + len.next_multiple_of(PAGE_SIZE as u64) +} + +/// The result of handling an OP-TEE SMC call along with an extracted OP-TEE message argument to handle. +pub struct OpteeSmcHandled<'a> { + pub result: OpteeSmcResult<'a>, + pub msg_to_handle: Option, +} + +/// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +/// It returns an `OpteeSmcResult` representing the result of the SMC call and +/// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE message which should be handled by +/// `handle_optee_msg_arg` or `decode_ta_request`. +/// +/// # Panics +/// +/// Panics if the normal world physical address in `smc` cannot be converted to `usize`. +pub fn handle_optee_smc_args( + smc: &mut OpteeSmcArgs, +) -> Result, OpteeSmcReturn> { + let func_id = smc.func_id()?; + match func_id { + OpteeSmcFunction::CallWithArg + | OpteeSmcFunction::CallWithRpcArg + | OpteeSmcFunction::CallWithRegdArg => { + let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; + let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); + let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; + Ok(OpteeSmcHandled { + result: OpteeSmcResult::Generic { + status: OpteeSmcReturn::Ok, + }, + msg_to_handle: Some(*msg_arg), + }) + } + OpteeSmcFunction::ExchangeCapabilities => { + // TODO: update the below when we support more features + let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM + | OpteeSecureWorldCapabilities::MEMREF_NULL + | OpteeSecureWorldCapabilities::RPC_ARG; + Ok(OpteeSmcHandled { + result: OpteeSmcResult::ExchangeCapabilities { + status: OpteeSmcReturn::Ok, + capabilities: default_cap, + max_notif_value: MAX_NOTIF_VALUE, + data: NUM_RPC_PARMS, + }, + msg_to_handle: None, + }) + } + OpteeSmcFunction::DisableShmCache => { + // Currently, we do not support this feature. + Ok(OpteeSmcHandled { + result: OpteeSmcResult::DisableShmCache { + status: OpteeSmcReturn::ENotAvail, + shm_upper32: 0, + shm_lower32: 0, + }, + msg_to_handle: None, + }) + } + OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_OS_OPTEE_UUID_0, + OPTEE_MSG_OS_OPTEE_UUID_1, + OPTEE_MSG_OS_OPTEE_UUID_2, + OPTEE_MSG_OS_OPTEE_UUID_3, + ], + }, + msg_to_handle: None, + }), + OpteeSmcFunction::CallsUid => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ], + }, + msg_to_handle: None, + }), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcHandled { + result: OpteeSmcResult::OsRevision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + build_id: OPTEE_MSG_BUILD_ID, + }, + msg_to_handle: None, + }), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Revision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + }, + msg_to_handle: None, + }), + _ => Err(OpteeSmcReturn::UnknownFunction), + } +} + +/// This function handles an OP-TEE message contained in `OpteeMsgArg`. +/// Currently, it only handles shared memory registration and unregistration. +/// If an OP-TEE message involves with a TA request, it simply returns +/// `Err(OpteeSmcReturn::Ok)` while expecting that the caller will handle +/// the message with `decode_ta_request`. +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> { + msg_arg.validate()?; + match msg_arg.cmd { + OpteeMessageCommand::RegisterShm => { + let tmem = msg_arg.get_param_tmem(0)?; + if tmem.buf_ptr == 0 || tmem.size == 0 || tmem.shm_ref == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } + // `tmem.buf_ptr` encodes two different information: + // - The physical page address of the first `ShmRefPagesData` + // - The page offset of the first shared memory page (`pages_list[0]`) + let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); + let page_offset = tmem.buf_ptr - shm_ref_pages_data_phys_addr; + let aligned_size = page_align_up(page_offset + tmem.size); + shm_ref_map().register_shm( + shm_ref_pages_data_phys_addr, + page_offset, + aligned_size, + tmem.shm_ref, + )?; + } + OpteeMessageCommand::UnregisterShm => { + let tmem = msg_arg.get_param_tmem(0)?; + if tmem.shm_ref == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } + shm_ref_map() + .remove(tmem.shm_ref) + .ok_or(OpteeSmcReturn::EBadAddr)?; + } + OpteeMessageCommand::OpenSession + | OpteeMessageCommand::InvokeCommand + | OpteeMessageCommand::CloseSession => return Err(OpteeSmcReturn::Ok), + _ => { + todo!("Unimplemented OpteeMessageCommand: {:?}", msg_arg.cmd); + } + } + Ok(()) +} + +/// TA request information extracted from an OP-TEE message. +/// +/// In addition to standard TA information (i.e., TA UUID, session ID, command ID, +/// and parameters), it contains shared memory information (`out_shm_info`) to +/// write back output data to the normal world once the TA execution is done. +pub struct TaRequestInfo { + pub uuid: Option, + pub session: u32, + pub entry_func: UteeEntryFunc, + pub cmd_id: u32, + pub params: [UteeParamOwned; UteeParamOwned::TEE_NUM_PARAMS], + pub out_shm_info: [Option>; UteeParamOwned::TEE_NUM_PARAMS], +} + +/// This function decodes a TA request contained in `OpteeMsgArg`. +/// +/// It copies the entire parameter data from the normal world shared memory into LiteBox's memory +/// to create `UteeParamOwned` structures to avoid potential data corruption during TA execution. +/// +/// # Panics +/// +/// Panics if any conversion from `u64` to `usize` fails. OP-TEE shim doesn't support a 32-bit environment. +pub fn decode_ta_request( + msg_arg: &OpteeMsgArg, +) -> Result, OpteeSmcReturn> { + let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; + let (ta_uuid, skip): (Option, usize) = if ta_entry_func == UteeEntryFunc::OpenSession { + // If it is an OpenSession request, extract the TA UUID from the first two parameters + let mut data = [0u32; 4]; + data[0] = (msg_arg.get_param_value(0)?.a).truncate(); + data[1] = (msg_arg.get_param_value(0)?.b).truncate(); + data[2] = (msg_arg.get_param_value(1)?.a).truncate(); + data[3] = (msg_arg.get_param_value(1)?.b).truncate(); + // Skip the first two parameters as they convey the TA UUID + (Some(TeeUuid::from_u32_array(data)), 2) + } else { + (None, 0) + }; + + let mut ta_req_info = TaRequestInfo { + uuid: ta_uuid, + session: msg_arg.session, + entry_func: ta_entry_func, + cmd_id: msg_arg.func, + params: [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS], + out_shm_info: [const { None }; UteeParamOwned::TEE_NUM_PARAMS], + }; + + let num_params = msg_arg.num_params as usize; + for (i, param) in msg_arg + .params + .iter() + .take(num_params) + .skip(skip) + .enumerate() + { + ta_req_info.params[i] = match param.attr_type() { + OpteeMsgAttrType::None => UteeParamOwned::None, + OpteeMsgAttrType::ValueInput => { + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInput { + value_a: value.a, + value_b: value.b, + } + } + OpteeMsgAttrType::ValueOutput => UteeParamOwned::ValueOutput {}, + OpteeMsgAttrType::ValueInout => { + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInout { + value_a: value.a, + value_b: value.b, + } + } + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; + let data_size = tmem.size.truncate(); + build_memref_input(&shm_info, data_size)? + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; + let data_size = rmem.size.truncate(); + build_memref_input(&shm_info, data_size)? + } + OpteeMsgAttrType::TmemOutput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; + let buffer_size = tmem.size.truncate(); + + ta_req_info.out_shm_info[i] = Some(shm_info); + UteeParamOwned::MemrefOutput { buffer_size } + } + OpteeMsgAttrType::RmemOutput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; + let buffer_size = rmem.size.truncate(); + + ta_req_info.out_shm_info[i] = Some(shm_info); + UteeParamOwned::MemrefOutput { buffer_size } + } + OpteeMsgAttrType::TmemInout => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; + let buffer_size = tmem.size.truncate(); + + ta_req_info.out_shm_info[i] = Some(shm_info.clone()); + build_memref_inout(&shm_info, buffer_size)? + } + OpteeMsgAttrType::RmemInout => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; + let buffer_size = rmem.size.truncate(); + + ta_req_info.out_shm_info[i] = Some(shm_info.clone()); + build_memref_inout(&shm_info, buffer_size)? + } + _ => return Err(OpteeSmcReturn::EBadCmd), + }; + } + + Ok(ta_req_info) +} + +#[inline] +fn build_memref_input( + shm_info: &ShmInfo, + data_size: usize, +) -> Result { + let mut data = alloc::vec![0u8; data_size]; + read_data_from_shm(shm_info, &mut data)?; + Ok(UteeParamOwned::MemrefInput { data: data.into() }) +} + +#[inline] +fn build_memref_inout( + shm_info: &ShmInfo, + buffer_size: usize, +) -> Result { + let mut buffer = alloc::vec![0u8; buffer_size]; + read_data_from_shm(shm_info, &mut buffer)?; + Ok(UteeParamOwned::MemrefInout { + data: buffer.into(), + buffer_size, + }) +} + +/// This function prepares for returning from OP-TEE secure world to the normal world. +/// +/// It writes back TA execution outputs associated with shared memory references and updates +/// the `OpteeMsgArg` structure to return value-based outputs. +/// `ta_params` is a reference to `UteeParams` structure that stores TA's output within its memory. +/// `ta_req_info` refers to the decoded TA request information including the normal world +/// shared memory addresses to write back output data. +pub fn prepare_for_return_to_normal_world( + ta_params: &UteeParams, + ta_req_info: &TaRequestInfo, + msg_arg: &mut OpteeMsgArg, +) -> Result<(), OpteeSmcReturn> { + for index in 0..UteeParams::TEE_NUM_PARAMS { + let param_type = ta_params + .get_type(index) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + match param_type { + TeeParamType::ValueOutput | TeeParamType::ValueInout => { + if let Ok(Some((value_a, value_b))) = ta_params.get_values(index) { + msg_arg.set_param_value( + index, + OpteeMsgParamValue { + a: value_a, + b: value_b, + c: 0, + }, + )?; + } + } + TeeParamType::MemrefOutput | TeeParamType::MemrefInout => { + if let Ok(Some((addr, len))) = ta_params.get_values(index) { + // SAFETY + // `addr` is expected to be a valid address of a TA and `addr + len` does not + // exceed the TA's memory region. + let ptr = crate::UserConstPtr::::from_usize(addr.truncate()); + let slice = unsafe { ptr.to_cow_slice(len.truncate()) } + .ok_or(OpteeSmcReturn::EBadAddr)?; + + if slice.is_empty() { + continue; + } + if let Some(out_shm_info) = &ta_req_info.out_shm_info[index] { + write_data_to_shm(out_shm_info, slice.as_ref())?; + } + } + } + _ => {} + } + } + Ok(()) +} + +/// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to +/// share with the secure world (VTL1). Each [`ShmRefPagesData`] occupies one memory page +/// where `pages_list` contains a list of physical page addresses and `next_page_data` +/// contains the physical address of the next [`ShmRefPagesData`] if any. Entries of `pages_list` +/// and `next_page_data` contain zero if the list ends. These physical page addresses are +/// virtually contiguous in the normal world. All these address values must be page aligned. +/// +/// `pages_data` from [Linux](https://elixir.bootlin.com/linux/v6.18.2/source/drivers/tee/optee/smc_abi.c#L409) +#[derive(Clone, Copy)] +#[repr(C)] +struct ShmRefPagesData { + pub pages_list: [u64; Self::PAGELIST_ENTRIES_PER_PAGE], + pub next_page_data: u64, +} +impl ShmRefPagesData { + const PAGELIST_ENTRIES_PER_PAGE: usize = + PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); +} + +/// Data structure to maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// `page_addrs` contains an array of physical page addresses. +/// `page_offset` indicates the page offset of the first page (i.e., `pages[0]`) which should be +/// smaller than `ALIGN`. +#[derive(Clone)] +pub struct ShmInfo { + page_addrs: Box<[PhysPageAddr]>, + page_offset: usize, +} + +impl ShmInfo { + pub fn new( + page_addrs: Box<[PhysPageAddr]>, + page_offset: usize, + ) -> Result { + if page_offset >= ALIGN { + return Err(OpteeSmcReturn::EBadAddr); + } + Ok(Self { + page_addrs, + page_offset, + }) + } +} + +/// Conversion from `ShmInfo` to `NormalWorldConstPtr` and `NormalWorldMutPtr`. +/// +/// OP-TEE shared memory regions are untyped, so we use `u8` as the base type. +impl TryFrom> for NormalWorldConstPtr { + type Error = PhysPointerError; + + fn try_from(shm_info: ShmInfo) -> Result { + NormalWorldConstPtr::new(&shm_info.page_addrs, shm_info.page_offset) + } +} + +impl TryFrom> for NormalWorldMutPtr { + type Error = PhysPointerError; + + fn try_from(shm_info: ShmInfo) -> Result { + NormalWorldMutPtr::new(&shm_info.page_addrs, shm_info.page_offset) + } +} + +/// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// This data structure is for registering shared memory regions before they are +/// used during OP-TEE calls with parameters referencing shared memory. +/// Any normal memory references without this registration will be rejected. +struct ShmRefMap { + inner: spin::mutex::SpinMutex>>, +} + +impl ShmRefMap { + pub fn new() -> Self { + Self { + inner: spin::mutex::SpinMutex::new(HashMap::new()), + } + } + + pub fn insert(&self, shm_ref: u64, info: ShmInfo) -> Result<(), OpteeSmcReturn> { + let mut guard = self.inner.lock(); + if guard.contains_key(&shm_ref) { + Err(OpteeSmcReturn::ENotAvail) + } else { + let _ = guard.insert(shm_ref, info); + Ok(()) + } + } + + pub fn remove(&self, shm_ref: u64) -> Option> { + let mut guard = self.inner.lock(); + guard.remove(&shm_ref) + } + + pub fn get(&self, shm_ref: u64) -> Option> { + let guard = self.inner.lock(); + guard.get(&shm_ref).cloned() + } + + /// This function registers shared memory information that the normal world (VTL0) provides. + /// Specifically, it walks through a linked list of [`ShmRefPagesData`] structures referenced by + /// `shm_ref_pages_data_phys_addr` to create a slice of the shared physical page addresses + /// and registers the slice with `shm_ref` as its identifier. `page_offset` indicates + /// the page offset of the first page (i.e., `pages_list[0]` of the first [`ShmRefPagesData`]). + /// `aligned_size` indicates the page-aligned size of the shared memory region to register. + pub fn register_shm( + &self, + shm_ref_pages_data_phys_addr: u64, + page_offset: u64, + aligned_size: u64, + shm_ref: u64, + ) -> Result<(), OpteeSmcReturn> { + if page_offset >= ALIGN as u64 || aligned_size == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } + let num_pages = usize::try_from(aligned_size).unwrap() / ALIGN; + let mut pages = Vec::with_capacity(num_pages); + let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); + loop { + let mut cur_ptr = NormalWorldConstPtr::::with_usize(cur_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let pages_data = + unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; + for page in &pages_data.pages_list { + if *page == 0 || pages.len() == num_pages { + break; + } else { + pages.push( + PhysPageAddr::new(usize::try_from(*page).unwrap()) + .ok_or(OpteeSmcReturn::EBadAddr)?, + ); + } + } + if pages_data.next_page_data == 0 || pages.len() == num_pages { + break; + } else { + cur_addr = usize::try_from(pages_data.next_page_data).unwrap(); + } + } + + self.insert( + shm_ref, + ShmInfo::new( + pages.into_boxed_slice(), + usize::try_from(page_offset).unwrap(), + )?, + )?; + Ok(()) + } +} + +fn shm_ref_map() -> &'static ShmRefMap { + static SHM_REF_MAP: OnceBox> = OnceBox::new(); + SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) +} + +/// Get the normal world shared memory information (physical addresses and page offset) from `OpteeMsgParamTmem`. +/// +/// Note that we use this function for handing TA requests and in this context +/// `OpteeMsgParamTmem` and `OpteeMsgParamRmem` are equivalent because every shared memory +/// reference accessible by TAs must be registered in advance. +/// `OpteeMsgParamTmem` is needed when we register shared memory regions (rmem is not allowed for this purpose). +fn get_shm_info_from_optee_msg_param_tmem( + tmem: OpteeMsgParamTmem, +) -> Result, OpteeSmcReturn> { + let rmem = OpteeMsgParamRmem { + offs: tmem.buf_ptr, + size: tmem.size, + shm_ref: tmem.shm_ref, + }; + get_shm_info_from_optee_msg_param_rmem(rmem) +} + +/// Get the normal world shared memory information (physical addresses and page offset) from `OpteeMsgParamRmem`. +/// +/// `rmem.offs` must be an offset within the shared memory region registered with `rmem.shm_ref` before +/// and `rmem.offs + rmem.size` must not exceed the size of the registered shared memory region. +fn get_shm_info_from_optee_msg_param_rmem( + rmem: OpteeMsgParamRmem, +) -> Result, OpteeSmcReturn> { + let Some(shm_info) = shm_ref_map().get(rmem.shm_ref) else { + return Err(OpteeSmcReturn::ENotAvail); + }; + let page_offset = shm_info.page_offset; + let start = page_offset + .checked_add(rmem.offs.truncate()) + .ok_or(OpteeSmcReturn::EBadAddr)?; + let end = start + .checked_add(rmem.size.truncate()) + .ok_or(OpteeSmcReturn::EBadAddr)?; + let start_page_index = start / PAGE_SIZE; + let end_page_index = end.div_ceil(PAGE_SIZE); + if start_page_index >= shm_info.page_addrs.len() || end_page_index > shm_info.page_addrs.len() { + return Err(OpteeSmcReturn::EBadAddr); + } + let mut page_addrs = Vec::with_capacity(end_page_index - start_page_index); + page_addrs.copy_from_slice(&shm_info.page_addrs[start_page_index..end_page_index]); + ShmInfo::new(page_addrs.into_boxed_slice(), page_offset) +} + +/// Read data from the normal world shared memory pages whose physical addresses are given in +/// `shm_info` into `buffer`. The size of `buffer` indicates the number of bytes to read. +fn read_data_from_shm( + shm_info: &ShmInfo, + buffer: &mut [u8], +) -> Result<(), OpteeSmcReturn> { + let mut ptr: NormalWorldConstPtr = shm_info.clone().try_into()?; + unsafe { + ptr.read_slice_at_offset(0, buffer)?; + } + Ok(()) +} + +/// Write data in `buffer` to the normal world shared memory pages whose physical addresses are given +/// in `shm_info`. The size of `buffer` indicates the number of bytes to write. +fn write_data_to_shm( + shm_info: &ShmInfo, + buffer: &[u8], +) -> Result<(), OpteeSmcReturn> { + let mut ptr: NormalWorldMutPtr = shm_info.clone().try_into()?; + unsafe { + ptr.write_slice_at_offset(0, buffer)?; + } + Ok(()) +} diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs new file mode 100644 index 000000000..0625804bb --- /dev/null +++ b/litebox_shim_optee/src/ptr.rs @@ -0,0 +1,570 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//! Physical Pointer Abstraction with On-demand Mapping +//! +//! This module adds supports for accessing physical addresses (e.g., VTL0 or +//! normal-world physical memory) from LiteBox with on-demand mapping. +//! In the context of LVBS and OP-TEE, accessing physical memory is necessary +//! because VTL0 and VTL1 as well as normal world and secure world do not share +//! the same virtual address space, but they still have to share data through memory. +//! VTL1 and secure world receive physical addresses from VTL0 and normal world, +//! respectively, and they need to read from or write to those addresses. +//! +//! To simplify all these, we could persistently map the entire VTL0/normal-world +//! physical memory into VTL1/secure-world address space at once and just access them +//! through corresponding virtual addresses. However, this module does not take these +//! approaches due to scalability (e.g., how to deal with a system with terabytes of +//! physical memory?) and security concerns (e.g., data corruption or information +//! leakage due to concurrent or persistent access). +//! +//! Instead, the approach this module takes is to map the required physical memory +//! region on-demand when accessing them while using a LiteBox-owned buffer to copy +//! data to/from those regions. This way, this module can ensure that data must be +//! copied into LiteBox-owned memory before being used while avoiding any unknown +//! side effects due to persistent memory mapping. +//! +//! Considerations: +//! +//! Ideally, this module should be able to validate whether a given physical address +//! is okay to access or even exists in the first place. For example, accessing +//! LiteBox's own memory with this physical pointer abstraction must be prohibited to +//! prevent the Boomerang attack and any other undefined memory access. Also, some +//! device memory is mapped to certain physical address ranges and LiteBox should not +//! touch them without in-depth knowledge. However, this is a bit tricky because, in +//! many cases, LiteBox does not directly interact with the underlying hardware or +//! BIOS/UEFI such that it does not have complete knowledge of the physical memory +//! layout. In the case of LVBS, LiteBox obtains the physical memory information +//! from VTL0 including the total physical memory size and the memory range assigned +//! to VTL1/LiteBox. Thus, this module can at least confirm a given physical address +//! does not belong to VTL1's physical memory. +//! +//! This module should allow byte-level access while transparently handling page +//! mapping and data access across page boundaries. This could become complicated +//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, +//! unaligned access is matter to be considered. +//! +//! In addition, often times, this physical pointer abstraction is involved with +//! a list of physical addresses (i.e., scatter-gather list). For example, in +//! the worse case, a two-byte value can span across two non-contiguous physical +//! pages (the last byte of the first page and the first byte of the second page). +//! Thus, to enhance the performance, we may need to consider mapping multiple pages +//! at once, copy data from/to them, and unmap them later. +//! +//! When this module needs to access data across physical page boundaries, it assumes +//! that those physical pages are virtually contiguous in VTL0 or normal-world address +//! space. Otherwise, this module could end up with accessing unrelated data. This is +//! best-effort assumption and ensuring this is the caller's responsibility (e.g., even +//! if this module always requires a list of physical addresses, the caller might +//! provide a wrong list by mistake or intentionally). + +// TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, +// we can move them to a different crate (e.g., `litebox`) if needed. + +use litebox::platform::vmap::{ + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; +use litebox_platform_multiplex::platform; + +#[inline] +fn align_down(address: usize, align: usize) -> usize { + address & !(align - 1) +} + +#[inline] +fn align_up(len: usize, align: usize) -> usize { + len.next_multiple_of(align) +} + +/// Represent a physical pointer to an object with on-demand mapping. +/// - `pages`: An array of page-aligned physical addresses. Physical addresses in this array should be +/// virtually contiguous. +/// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. +/// - `count`: The number of objects of type `T` that can be accessed from this pointer. +/// - `map_info`: The mapping information of the currently mapped physical pages, if any. +/// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough +/// memory for an object of type `T`. +#[derive(Clone)] +#[repr(C)] +pub struct PhysMutPtr { + pages: alloc::boxed::Box<[PhysPageAddr]>, + offset: usize, + count: usize, + map_info: Option>, + _type: core::marker::PhantomData, +} + +impl PhysMutPtr { + /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be + /// smaller than `ALIGN`. Also, `pages` should contain enough pages to cover at least one + /// object of type `T` starting from `offset`. If these conditions are not met, this function + /// returns `Err(PhysPointerError)`. + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { + if offset >= ALIGN { + return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); + } + let size = if pages.is_empty() { + 0 + } else { + pages + .len() + .checked_mul(ALIGN) + .ok_or(PhysPointerError::Overflow)? + - offset + }; + if size < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + size, + core::mem::size_of::(), + )); + } + platform().validate(pages.into())?; + Ok(Self { + pages: pages.into(), + offset, + count: size / core::mem::size_of::(), + map_info: None, + _type: core::marker::PhantomData, + }) + } + + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for + /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysMutPtr` may read/write data in a wrong order. + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { + if bytes < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + bytes, + core::mem::size_of::(), + )); + } + let start_page = align_down(pa, ALIGN); + let end_page = align_up( + pa.checked_add(bytes).ok_or(PhysPointerError::Overflow)?, + ALIGN, + ); + let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); + let mut current_page = start_page; + while current_page < end_page { + pages.push( + PhysPageAddr::::new(current_page) + .ok_or(PhysPointerError::InvalidPhysicalAddress(current_page))?, + ); + current_page += ALIGN; + } + Self::new(&pages, pa - start_page) + } + + /// Create a new `PhysMutPtr` from the given physical address for a single object. + /// + /// This is a shortcut for `PhysMutPtr::with_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. + pub fn with_usize(pa: usize) -> Result { + Self::with_contiguous_pages(pa, core::mem::size_of::()) + } + + /// Read the value at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller + let Some(src) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let src = src.wrapping_add(count); + let val = { + let mut buffer = core::mem::MaybeUninit::::uninit(); + if (src as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(src, buffer.as_mut_ptr(), 1); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + src.cast::(), + buffer.as_mut_ptr().cast::(), + core::mem::size_of::(), + ); + } + } + unsafe { buffer.assume_init() } + }; + unsafe { + self.unmap()?; + } + Ok(alloc::boxed::Box::new(val)) + } + + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller + let Some(src) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let src = src.wrapping_add(count); + if (src as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(src, values.as_mut_ptr(), values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + src.cast::(), + values.as_mut_ptr().cast::(), + core::mem::size_of_val(values), + ); + } + } + unsafe { + self.unmap()?; + } + Ok(()) + } + + /// Write the value at the given offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. + pub unsafe fn write_at_offset( + &mut self, + count: usize, + value: T, + ) -> Result<(), PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller + let Some(dst) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { core::ptr::write(dst, value) }; + } else { + unsafe { core::ptr::write_unaligned(dst, value) }; + } + unsafe { + self.unmap()?; + } + Ok(()) + } + + /// Write a slice of values at the given offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. + pub unsafe fn write_slice_at_offset( + &mut self, + count: usize, + values: &[T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller + let Some(dst) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(values.as_ptr(), dst, values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + values.as_ptr().cast::(), + dst.cast::(), + core::mem::size_of_val(values), + ); + } + } + unsafe { + self.unmap()?; + } + Ok(()) + } + + /// Map the physical pages from `start` to `end` indexes. + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn map_range( + &mut self, + start: usize, + end: usize, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + if start >= end || end > self.pages.len() { + return Err(PhysPointerError::IndexOutOfBounds(end, self.pages.len())); + } + let accept_perms = PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE; + if perms.bits() & !accept_perms.bits() != 0 { + return Err(PhysPointerError::UnsupportedPermissions(perms.bits())); + } + if self.map_info.is_none() { + let sub_pages = &self.pages[start..end]; + unsafe { + platform().vmap(sub_pages.into(), perms).map(|info| { + self.map_info = Some(info); + })?; + } + Ok(()) + } else { + Err(PhysPointerError::AlreadyMapped( + self.pages.first().map_or(0, |p| p.as_usize()), + )) + } + } + + /// Unmap the physical pages if mapped. + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn unmap(&mut self) -> Result<(), PhysPointerError> { + if let Some(map_info) = self.map_info.take() { + unsafe { + platform().vunmap(map_info)?; + } + self.map_info = None; + Ok(()) + } else { + Err(PhysPointerError::Unmapped( + self.pages.first().map_or(0, |p| p.as_usize()), + )) + } + } + + /// Get the base virtual pointer if mapped. + #[inline] + fn base_ptr(&self) -> Option<*mut T> { + let Some(map_info) = &self.map_info else { + return None; + }; + Some(map_info.base.wrapping_add(self.offset).cast::()) + } +} + +impl Drop for PhysMutPtr { + fn drop(&mut self) { + let _ = unsafe { self.unmap() }; + } +} + +impl core::fmt::Debug for PhysMutPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysMutPtr") + .field("pages[0]", &self.pages.first().map_or(0, |p| p.as_usize())) + .field("offset", &self.offset) + .finish_non_exhaustive() + } +} + +/// Represent a physical pointer to a read-only object. This wraps around [`PhysMutPtr`] and +/// exposes only read access. +#[derive(Clone)] +#[repr(C)] +pub struct PhysConstPtr { + inner: PhysMutPtr, +} + +impl PhysConstPtr { + /// Create a new `PhysConstPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller + /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of + /// type `T` starting from `offset`. If these conditions are not met, this function returns + /// `Err(PhysPointerError)`. + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::new(pages, offset)?, + }) + } + + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for + /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysConstPtr` may read data in a wrong order. + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::with_contiguous_pages(pa, bytes)?, + }) + } + + /// Create a new `PhysConstPtr` from the given physical address for a single object. + /// + /// This is a shortcut for `PhysConstPtr::with_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. + pub fn with_usize(pa: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::with_usize(pa)?, + }) + } + + /// Read the value at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + unsafe { self.inner.read_at_offset(count) } + } + + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + unsafe { self.inner.read_slice_at_offset(count, values) } + } +} + +impl Drop for PhysConstPtr { + fn drop(&mut self) { + let _ = unsafe { self.inner.unmap() }; + } +} + +impl core::fmt::Debug for PhysConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysConstPtr") + .field( + "pages[0]", + &self.inner.pages.first().map_or(0, |p| p.as_usize()), + ) + .field("offset", &self.inner.offset) + .finish_non_exhaustive() + } +}