diff --git a/Cargo.lock b/Cargo.lock index 93b174c06..d17118788 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,6 +881,7 @@ dependencies = [ "litebox_common_linux", "modular-bitfield", "num_enum", + "thiserror", ] [[package]] diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index 894dcc311..49c76bc60 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -39,7 +39,7 @@ fn ratchet_globals() -> Result<()> { ("litebox_runner_lvbs/", 3), ("litebox_runner_snp/", 1), ("litebox_shim_linux/", 1), - ("litebox_shim_optee/", 1), + ("litebox_shim_optee/", 2), ], |file| { Ok(file @@ -69,6 +69,7 @@ fn ratchet_maybe_uninit() -> Result<()> { ("litebox_platform_linux_userland/", 3), ("litebox_platform_lvbs/", 5), ("litebox_shim_linux/", 5), + ("litebox_shim_optee/", 1), ], |file| { Ok(file diff --git a/litebox/src/mm/linux.rs b/litebox/src/mm/linux.rs index 188e9a3b6..aa1348f30 100644 --- a/litebox/src/mm/linux.rs +++ b/litebox/src/mm/linux.rs @@ -216,6 +216,7 @@ impl core::ops::Add for NonZeroPageSize { } /// A non-zero address that is `ALIGN`-aligned. +#[derive(Clone, Copy)] pub struct NonZeroAddress(usize); impl NonZeroAddress { diff --git a/litebox_common_linux/src/lib.rs b/litebox_common_linux/src/lib.rs index 42cd6f271..172edacf8 100644 --- a/litebox_common_linux/src/lib.rs +++ b/litebox_common_linux/src/lib.rs @@ -21,6 +21,7 @@ pub mod errno; pub mod loader; pub mod mm; pub mod signal; +pub mod vmap; extern crate alloc; diff --git a/litebox_common_linux/src/vmap.rs b/litebox_common_linux/src/vmap.rs new file mode 100644 index 000000000..e2c6e3d65 --- /dev/null +++ b/litebox_common_linux/src/vmap.rs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +use litebox::platform::page_mgmt::MemoryRegionPermissions; +use thiserror::Error; + +/// A provider to map and unmap physical pages with virtually contiguous addresses. +/// +/// `ALIGN`: The page frame size. +/// +/// This provider exists to service `litebox_shim_optee::ptr::PhysMutPtr` and +/// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need +/// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). +pub trait VmapManager { + /// Map the given `PhysPageAddrArray` into virtually contiguous addresses with the given + /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. + /// + /// This function is analogous to Linux kernel's `vmap()`. + /// + /// # Safety + /// + /// The caller should ensure that `pages` are not in active use by other entities + /// (especially, there should be no read/write or write/write conflicts). + /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps + /// from the caller, hypervisor, or hardware. + /// Multiple LiteBox threads might concurrently call this function with overlapping + /// physical pages, so the implementation should safely handle such cases. + unsafe fn vmap( + &self, + _pages: &PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + + /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). + /// + /// This function is analogous to Linux kernel's `vunmap()`. + /// + /// # Safety + /// + /// The caller should ensure that the virtual addresses in `vmap_info` are not in active + /// use by other entities. + unsafe fn vunmap(&self, _vmap_info: PhysPageMapInfo) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + + /// Validate that the given physical pages are not owned by LiteBox. + /// + /// Platform is expected to track which physical memory addresses are owned by LiteBox (e.g., VTL1 memory addresses). + /// + /// Returns `Ok(())` if the physical pages are not owned by LiteBox. Otherwise, returns `Err(PhysPointerError)`. + fn validate_unowned(&self, _pages: &PhysPageAddrArray) -> Result<(), PhysPointerError> { + Ok(()) + } + + /// Protect the given physical pages to ensure concurrent read or exclusive write access: + /// - Read protection: prevent others from writing to the pages. + /// - Read/write protection: prevent others from reading or writing to the pages. + /// - No protection: allow others to read and write the pages. + /// + /// This function can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// If the platform does not support such protection, this function returns `Ok(())` without any action. + /// + /// Returns `Ok(())` if it successfully protects the pages. If it fails, returns + /// `Err(PhysPointerError)`. + /// + /// # Safety + /// + /// This function relies on hypercalls or other privileged hardware features and assumes those features + /// are safe to use. + /// The caller should unprotect the pages when they are no longer needed to access them. + unsafe fn protect( + &self, + _pages: &PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + Ok(()) + } +} + +/// Data structure representing a physical address with page alignment. +/// +/// Currently, this is an alias to `crate::mm::linux::NonZeroAddress`. This might change if +/// we selectively conduct sanity checks based on whether an address is virtual or physical +/// (e.g., whether a virtual address is canonical, whether a physical address is tagged with +/// a valid key ID, etc.). +pub type PhysPageAddr = litebox::mm::linux::NonZeroAddress; + +/// Data structure for an array of physical page addresses which are virtually contiguous. +pub type PhysPageAddrArray = [PhysPageAddr]; + +/// Data structure to maintain the mapping information returned by `vmap()`. +#[derive(Clone)] +pub struct PhysPageMapInfo { + /// Virtual address of the mapped region which is page aligned. + pub base: *mut u8, + /// The size of the mapped region in bytes. + pub size: usize, +} + +bitflags::bitflags! { + /// Physical page map permissions which is a restricted version of + /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. + /// + /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED + /// permissions are explicitly prohibited. + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub struct PhysPageMapPermissions: u8 { + /// Readable + const READ = 1 << 0; + /// Writable + const WRITE = 1 << 1; + } +} + +impl From for PhysPageMapPermissions { + fn from(perms: MemoryRegionPermissions) -> Self { + let mut phys_perms = PhysPageMapPermissions::empty(); + if perms.contains(MemoryRegionPermissions::READ) { + phys_perms |= PhysPageMapPermissions::READ; + } + if perms.contains(MemoryRegionPermissions::WRITE) { + phys_perms |= PhysPageMapPermissions::WRITE; + } + phys_perms + } +} + +impl From for MemoryRegionPermissions { + fn from(perms: PhysPageMapPermissions) -> Self { + let mut mem_perms = MemoryRegionPermissions::empty(); + if perms.contains(PhysPageMapPermissions::READ) { + mem_perms |= MemoryRegionPermissions::READ; + } + if perms.contains(PhysPageMapPermissions::WRITE) { + mem_perms |= MemoryRegionPermissions::WRITE; + } + mem_perms + } +} + +/// Possible errors for physical pointer access with `VmapManager` +#[non_exhaustive] +#[derive(Error, Debug)] +pub enum PhysPointerError { + #[error("Physical address {0:#x} is invalid to access")] + InvalidPhysicalAddress(usize), + #[error("Physical address {0:#x} is not aligned to {1} bytes")] + UnalignedPhysicalAddress(usize, usize), + #[error("Offset {0:#x} is not aligned to {1} bytes")] + UnalignedOffset(usize, usize), + #[error("Base offset {0:#x} is greater than or equal to alignment ({1} bytes)")] + InvalidBaseOffset(usize, usize), + #[error( + "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" + )] + InsufficientPhysicalPages(usize, usize), + #[error("Index {0} is out of bounds (count: {1})")] + IndexOutOfBounds(usize, usize), + #[error("Physical address {0:#x} is already mapped")] + AlreadyMapped(usize), + #[error("Physical address {0:#x} is unmapped")] + Unmapped(usize), + #[error("No mapping information available")] + NoMappingInfo, + #[error("Overflow occurred during calculation")] + Overflow, + #[error("Non-contiguous physical pages in the array")] + NonContiguousPages, + #[error("The operation is unsupported on this platform")] + UnsupportedOperation, + #[error("Unsupported permissions: {0:#x}")] + UnsupportedPermissions(u8), +} diff --git a/litebox_common_optee/Cargo.toml b/litebox_common_optee/Cargo.toml index 5b88e7c9f..901997b43 100644 --- a/litebox_common_optee/Cargo.toml +++ b/litebox_common_optee/Cargo.toml @@ -9,6 +9,7 @@ litebox = { path = "../litebox/", version = "0.1.0" } litebox_common_linux = { path = "../litebox_common_linux/", version = "0.1.0" } modular-bitfield = { version = "0.12.0", default-features = false } num_enum = { version = "0.7.3", default-features = false } +thiserror = { version = "2.0.6", default-features = false } [lints] workspace = true diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index d4595db59..363851d4c 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1114,47 +1114,67 @@ pub enum OpteeMessageCommand { UnregisterShm = OPTEE_MSG_CMD_UNREGISTER_SHM, DoBottomHalf = OPTEE_MSG_CMD_DO_BOTTOM_HALF, StopAsyncNotif = OPTEE_MSG_CMD_STOP_ASYNC_NOTIF, - Unknown = 0xffff_ffff, } -/// Temporary reference memory parameter +impl TryFrom for UteeEntryFunc { + type Error = OpteeSmcReturnCode; + fn try_from(cmd: OpteeMessageCommand) -> Result { + match cmd { + OpteeMessageCommand::OpenSession => Ok(UteeEntryFunc::OpenSession), + OpteeMessageCommand::CloseSession => Ok(UteeEntryFunc::CloseSession), + OpteeMessageCommand::InvokeCommand => Ok(UteeEntryFunc::InvokeCommand), + _ => Err(OpteeSmcReturnCode::EBadCmd), + } + } +} + +/// Temporary memory reference parameter +/// +/// `optee_msg_param_tmem` from `optee_os/core/include/optee_msg.h` #[derive(Clone, Copy, Debug)] #[repr(C)] pub struct OpteeMsgParamTmem { /// Physical address of the buffer - buf_ptr: u64, + pub buf_ptr: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Temporary shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// Registered memory reference parameter +/// +/// `optee_msg_param_rmem` from `optee_os/core/include/optee_msg.h` #[derive(Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamRmem { /// Offset into shared memory reference - offs: u64, + pub offs: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// FF-A memory reference parameter +/// +/// `optee_msg_param_fmem` from `optee_os/core/include/optee_msg.h` +/// +/// Note: LiteBox doesn't currently support FF-A shared memory, so this struct is +/// provided for completeness but is not used. #[derive(Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamFmem { /// Lower bits of offset into shared memory reference - offs_low: u32, + pub offs_low: u32, /// Higher bits of offset into shared memory reference - offs_high: u32, + pub offs_high: u32, /// Internal offset into the first page of shared memory reference - internal_offs: u16, + pub internal_offs: u16, /// Size of the buffer - size: u64, + pub size: u64, /// Global identifier of the shared memory - global_id: u64, + pub global_id: u64, } /// Opaque value parameter @@ -1162,12 +1182,12 @@ pub struct OpteeMsgParamFmem { #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamValue { - a: u64, - b: u64, - c: u64, + pub a: u64, + pub b: u64, + pub c: u64, } -/// Parameter used together with `OpteeMsgArg` +/// Parameter used together with `OpteeMsgArgs` #[derive(Clone, Copy)] #[repr(C)] pub union OpteeMsgParamUnion { @@ -1230,6 +1250,54 @@ impl OpteeMsgParam { pub fn attr_type(&self) -> OpteeMsgAttrType { OpteeMsgAttrType::try_from(self.attr.typ()).unwrap_or(OpteeMsgAttrType::None) } + pub fn get_param_tmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_INOUT + ) { + Some(unsafe { self.u.tmem }) + } else { + None + } + } + pub fn get_param_rmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.rmem }) + } else { + None + } + } + pub fn get_param_fmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.fmem }) + } else { + None + } + } + pub fn get_param_value(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_INOUT + ) { + Some(unsafe { self.u.value }) + } else { + None + } + } } /// `optee_msg_arg` from `optee_os/core/include/optee_msg.h` @@ -1237,32 +1305,84 @@ impl OpteeMsgParam { /// exchange messages. #[derive(Clone, Copy)] #[repr(C)] -pub struct OpteeMsgArg { +pub struct OpteeMsgArgs { /// OP-TEE message command. This is a superset of `UteeEntryFunc`. - cmd: OpteeMessageCommand, + pub cmd: OpteeMessageCommand, /// TA function ID which is used if `cmd == InvokeCommand`. Note that the meaning of `cmd` and `func` /// is swapped compared to TAs. - func: u32, + pub func: u32, /// Session ID. This is "IN" parameter most of the time except for `cmd == OpenSession` where /// the secure world generates and returns a session ID. - session: u32, + pub session: u32, /// Cancellation ID. This is a unique value to identify this request. - cancel_id: u32, + pub cancel_id: u32, pad: u32, /// Return value from the secure world - ret: u32, + pub ret: u32, /// Origin of the return value - ret_origin: TeeOrigin, + pub ret_origin: TeeOrigin, /// Number of parameters contained in `params` - num_params: u32, + pub num_params: u32, /// Parameters to be passed to the secure world. If `cmd == OpenSession`, the first two params contain /// a TA UUID and they are not delivered to the TA. /// Note that, originally, the length of this array is variable. We fix it to `TEE_NUM_PARAMS + 2` to /// simplify the implementation (our OP-TEE Shim supports up to four parameters as well). - params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], + pub params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], } -/// OP-TEE SMC call arguments. +impl OpteeMsgArgs { + /// Validate the message argument structure. + pub fn validate(&self) -> Result<(), OpteeSmcReturnCode> { + let _ = OpteeMessageCommand::try_from(self.cmd as u32) + .map_err(|_| OpteeSmcReturnCode::EBadCmd)?; + if self.cmd == OpteeMessageCommand::OpenSession && self.num_params < 2 { + return Err(OpteeSmcReturnCode::EBadCmd); + } + if self.num_params as usize > self.params.len() { + Err(OpteeSmcReturnCode::EBadCmd) + } else { + Ok(()) + } + } + pub fn get_param_tmem(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturnCode::ENotAvail) + } else { + Ok(self.params[index] + .get_param_tmem() + .ok_or(OpteeSmcReturnCode::EBadCmd)?) + } + } + pub fn get_param_rmem(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturnCode::ENotAvail) + } else { + Ok(self.params[index] + .get_param_rmem() + .ok_or(OpteeSmcReturnCode::EBadCmd)?) + } + } + pub fn get_param_fmem(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturnCode::ENotAvail) + } else { + Ok(self.params[index] + .get_param_fmem() + .ok_or(OpteeSmcReturnCode::EBadCmd)?) + } + } + pub fn get_param_value(&self, index: usize) -> Result { + if index >= self.num_params as usize { + Err(OpteeSmcReturnCode::ENotAvail) + } else { + Ok(self.params[index] + .get_param_value() + .ok_or(OpteeSmcReturnCode::EBadCmd)?) + } + } +} + +/// A memory page to exchange OP-TEE SMC call arguments. /// OP-TEE assumes that the underlying architecture is Arm with TrustZone and /// thus it uses Secure Monitor Call (SMC) calling convention (SMCCC). /// Since we currently rely on the existing OP-TEE driver which assumes SMCCC, we translate it into @@ -1270,9 +1390,30 @@ pub struct OpteeMsgArg { /// Specifically, OP-TEE SMC call uses up to nine CPU registers to pass arguments. /// However, since VTL call only supports up to four parameters, we allocate a VTL0 memory page and /// exchange all arguments through that memory page. +/// TODO: Since this is LVBS-specific structure to facilitate the translation between VTL call convention, +/// we might want to move it to the `litebox_platform_lvbs` crate later. +/// Also, we might need to document how to inteprete this structure by referencing `optee_smc.h` and +/// Arm's SMCCC. #[repr(align(4096))] #[derive(Clone, Copy)] #[repr(C)] +pub struct OpteeSmcArgsPage { + pub args: [usize; Self::NUM_OPTEE_SMC_ARGS], +} +impl OpteeSmcArgsPage { + const NUM_OPTEE_SMC_ARGS: usize = 9; +} + +impl From<&OpteeSmcArgsPage> for OpteeSmcArgs { + fn from(page: &OpteeSmcArgsPage) -> Self { + let mut smc = OpteeSmcArgs::default(); + smc.args.copy_from_slice(&page.args); + smc + } +} + +/// OP-TEE SMC call arguments. +#[derive(Clone, Copy, Default)] pub struct OpteeSmcArgs { args: [usize; Self::NUM_OPTEE_SMC_ARGS], } @@ -1280,35 +1421,29 @@ pub struct OpteeSmcArgs { impl OpteeSmcArgs { const NUM_OPTEE_SMC_ARGS: usize = 9; - pub fn arg_index(&self, index: usize) -> Option { - if index < Self::NUM_OPTEE_SMC_ARGS { - Some(self.args[index]) - } else { - None - } - } - /// Get the function ID of an OP-TEE SMC call - pub fn func_id(&self) -> Result { - OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK).map_err(|_| Errno::EINVAL) + pub fn func_id(&self) -> Result { + OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK) + .map_err(|_| OpteeSmcReturnCode::EBadCmd) } - /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy + /// Get the physical address of `OpteeMsgArgs`. The secure world is expected to map and copy /// this structure. - pub fn optee_msg_arg_phys_addr(&self) -> Result { + pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. if self.args[1] & 0xffff_ffff_0000_0000 == 0 && self.args[2] & 0xffff_ffff_0000_0000 == 0 { let addr = (self.args[1] << 32) | self.args[2]; - Ok(addr) + Ok(addr as u64) } else { - Err(Errno::EINVAL) + Err(OpteeSmcReturnCode::EBadAddr) } } } /// `OPTEE_SMC_FUNCID_*` from `core/arch/arm/include/sm/optee_smc.h` /// TODO: Add stuffs based on the OP-TEE driver that LVBS is using. +const OPTEE_SMC_FUNCID_GET_OS_UUID: usize = 0x0; const OPTEE_SMC_FUNCID_GET_OS_REVISION: usize = 0x1; const OPTEE_SMC_FUNCID_CALL_WITH_ARG: usize = 0x4; const OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES: usize = 0x9; @@ -1322,6 +1457,7 @@ const OPTEE_SMC_FUNCID_CALLS_REVISION: usize = 0xff03; #[derive(PartialEq, TryFromPrimitive)] #[repr(usize)] pub enum OpteeSmcFunction { + GetOsUuid = OPTEE_SMC_FUNCID_GET_OS_UUID, GetOsRevision = OPTEE_SMC_FUNCID_GET_OS_REVISION, CallWithArg = OPTEE_SMC_FUNCID_CALL_WITH_ARG, ExchangeCapabilities = OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES, @@ -1338,64 +1474,102 @@ impl OpteeSmcFunction { /// OP-TEE SMC call result. /// OP-TEE SMC call uses CPU registers to pass input and output values. -/// Thus, this structure is technically equivalent to `OpteeSmcArgs`, but we separate them for clarity. -#[repr(align(4096))] -#[derive(Clone, Copy)] -#[repr(C)] -pub struct OpteeSmcResult { - args: [usize; Self::NUM_OPTEE_SMC_ARGS], -} - -impl OpteeSmcResult { - const NUM_OPTEE_SMC_ARGS: usize = 9; - - pub fn return_status(&mut self, status: OpteeSmcReturn) { - self.args[0] = status as usize; - } - - pub fn exchange_capabilities( - &mut self, - status: OpteeSmcReturn, +/// Thus, we convert this into `OpteeSmcArgs` later. +#[non_exhaustive] +pub enum OpteeSmcResult<'a> { + Generic { + status: OpteeSmcReturnCode, + }, + ExchangeCapabilities { + status: OpteeSmcReturnCode, capabilities: OpteeSecureWorldCapabilities, max_notif_value: usize, data: usize, - ) { - self.return_status(status); - self.args[1] = capabilities.bits(); - self.args[2] = max_notif_value; - self.args[3] = data; - } - - /// # Panics - /// panics if any element of `data` cannot be converted to `usize`. - pub fn uuid(&mut self, data: [u32; 4]) { - // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - self.args[0] = usize::try_from(data[0]).unwrap(); - self.args[1] = usize::try_from(data[1]).unwrap(); - self.args[2] = usize::try_from(data[2]).unwrap(); - self.args[3] = usize::try_from(data[3]).unwrap(); - } - - pub fn revision(&mut self, major: usize, minor: usize) { - self.args[0] = major; - self.args[1] = minor; - } - - pub fn os_revision(&mut self, major: usize, minor: usize, build_id: usize) { - self.args[0] = major; - self.args[1] = minor; - self.args[2] = build_id; - } - - pub fn disable_shm_cache( - &mut self, - status: OpteeSmcReturn, + }, + Uuid { + data: &'a [u32; 4], + }, + Revision { + major: usize, + minor: usize, + }, + OsRevision { + major: usize, + minor: usize, + build_id: usize, + }, + DisableShmCache { + status: OpteeSmcReturnCode, shm_upper32: usize, shm_lower32: usize, - ) { - self.args[0] = status as usize; - self.args[1] = shm_upper32; - self.args[2] = shm_lower32; + }, + CallWithArg { + msg_arg: Box, + }, +} + +impl From> for OpteeSmcArgs { + fn from(value: OpteeSmcResult) -> Self { + match value { + OpteeSmcResult::Generic { status } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc + } + OpteeSmcResult::ExchangeCapabilities { + status, + capabilities, + max_notif_value, + data, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = capabilities.bits(); + smc.args[2] = max_notif_value; + smc.args[3] = data; + smc + } + OpteeSmcResult::Uuid { data } => { + let mut smc = OpteeSmcArgs::default(); + for (i, arg) in smc.args.iter_mut().enumerate().take(4) { + *arg = data[i] as usize; + } + smc + } + OpteeSmcResult::Revision { major, minor } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc + } + OpteeSmcResult::OsRevision { + major, + minor, + build_id, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc.args[2] = build_id; + smc + } + OpteeSmcResult::DisableShmCache { + status, + shm_upper32, + shm_lower32, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = shm_upper32; + smc.args[2] = shm_lower32; + smc + } + OpteeSmcResult::CallWithArg { .. } => { + panic!( + "OpteeSmcResult::CallWithArg cannot be converted to OpteeSmcArgs directly. Handle the incorporated OpteeMsgArgs." + ); + } + } } } @@ -1425,7 +1599,7 @@ const OPTEE_SMC_RETURN_UNKNOWN_FUNCTION: usize = 0xffff_ffff; #[non_exhaustive] #[derive(Copy, Clone, PartialEq, TryFromPrimitive)] #[repr(usize)] -pub enum OpteeSmcReturn { +pub enum OpteeSmcReturnCode { Ok = OPTEE_SMC_RETURN_OK, EThreadLimit = OPTEE_SMC_RETURN_ETHREAD_LIMIT, EBusy = OPTEE_SMC_RETURN_EBUSY, diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index bc7657fd8..cd1692b06 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -18,7 +18,9 @@ use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; -use litebox_common_linux::{MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall}; +use litebox_common_linux::{ + MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall, vmap::VmapManager, +}; mod syscall_intercept; @@ -2187,6 +2189,13 @@ impl litebox::platform::CrngProvider for LinuxUserland { } } +/// Dummy `VmapManager`. +/// +/// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). +/// We might need to emulate these functions' behaviors using virtual addresses for development or +/// testing, or use a kernel module to provide this functionality (if needed). +impl VmapManager for LinuxUserland {} + #[cfg(test)] mod tests { use core::sync::atomic::AtomicU32; diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index e824d92a1..49c69bbc7 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -24,7 +24,14 @@ use litebox::platform::{ PunchthroughProvider, PunchthroughToken, RawMutex as _, RawPointerProvider, }; use litebox::{mm::linux::PageRange, platform::page_mgmt::FixedAddressBehavior}; -use litebox_common_linux::{PunchthroughSyscall, errno::Errno}; +use litebox_common_linux::{ + PunchthroughSyscall, + errno::Errno, + vmap::{ + PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, + VmapManager, + }, +}; use x86_64::structures::paging::{ PageOffset, PageSize, PageTableFlags, PhysFrame, Size4KiB, frame::PhysFrameRange, mapper::MapToError, @@ -760,6 +767,149 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< } } +/// Checks whether the given physical addresses are contiguous with respect to ALIGN. +/// +/// Note: This is a temporary check to let `VmapManager` work with this platform +/// which does not yet support virtually contiguous mapping of non-contiguous physical pages +/// (for now, it maps physical pages with a fixed offset). +#[cfg(feature = "optee_syscall")] +fn check_contiguity( + addrs: &[PhysPageAddr], +) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0].as_usize(); + let second = window[1].as_usize(); + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) +} + +#[cfg(feature = "optee_syscall")] +impl VmapManager for LinuxKernel { + unsafe fn vmap( + &self, + pages: &PhysPageAddrArray, + perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError> { + // TODO: Remove this check once this platform supports virtually contiguous + // non-contiguous physical page mapping. + check_contiguity(pages)?; + + if pages.is_empty() { + return Err(PhysPointerError::InvalidPhysicalAddress(0)); + } + let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); + let phys_end = x86_64::PhysAddr::new( + pages + .last() + .unwrap() + .as_usize() + .checked_add(ALIGN) + .ok_or(PhysPointerError::Overflow)? as u64, + ); + let frame_range = if ALIGN == PAGE_SIZE { + PhysFrame::range( + PhysFrame::::containing_address(phys_start), + PhysFrame::::containing_address(phys_end), + ) + } else { + unimplemented!("ALIGN other than 4KiB is not supported yet") + }; + + let mut flags = PageTableFlags::PRESENT; + if perms.contains(PhysPageMapPermissions::WRITE) { + flags |= PageTableFlags::WRITABLE; + } + + if let Ok(page_addr) = self.page_table.map_phys_frame_range(frame_range, flags) { + Ok(PhysPageMapInfo { + base: page_addr, + size: pages.len() * ALIGN, + }) + } else { + Err(PhysPointerError::InvalidPhysicalAddress( + pages[0].as_usize(), + )) + } + } + + unsafe fn vunmap(&self, vmap_info: PhysPageMapInfo) -> Result<(), PhysPointerError> { + if ALIGN == PAGE_SIZE { + let Some(page_range) = PageRange::::new( + vmap_info.base as usize, + vmap_info.base.wrapping_add(vmap_info.size) as usize, + ) else { + return Err(PhysPointerError::UnalignedPhysicalAddress( + vmap_info.base as usize, + ALIGN, + )); + }; + unsafe { + self.page_table + .unmap_pages(page_range, false) + .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize)) + } + } else { + unimplemented!("ALIGN other than 4KiB is not supported yet") + } + } + + fn validate_unowned(&self, pages: &PhysPageAddrArray) -> Result<(), PhysPointerError> { + if pages.is_empty() { + return Ok(()); + } + let start_address = self.vtl1_phys_frame_range.start.start_address().as_u64(); + let end_address = self.vtl1_phys_frame_range.end.start_address().as_u64(); + for page in pages { + let addr = page.as_usize() as u64; + // a physical page belonging to LiteBox (VTL1) should not be used for `vmap` + if addr >= start_address && addr < end_address { + return Err(PhysPointerError::InvalidPhysicalAddress(page.as_usize())); + } + } + Ok(()) + } + + unsafe fn protect( + &self, + pages: &PhysPageAddrArray, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); + let phys_end = x86_64::PhysAddr::new( + pages + .last() + .unwrap() + .as_usize() + .checked_add(ALIGN) + .ok_or(PhysPointerError::Overflow)? as u64, + ); + let frame_range = if ALIGN == PAGE_SIZE { + PhysFrame::range( + PhysFrame::::containing_address(phys_start), + PhysFrame::::containing_address(phys_end), + ) + } else { + unimplemented!("ALIGN other than 4KiB is not supported yet") + }; + + let mem_attr = if perms.contains(PhysPageMapPermissions::WRITE) { + // VTL1 wants to write data to the pages, preventing VTL0 from reading/executing the pages. + crate::mshv::heki::MemAttr::empty() + } else if perms.contains(PhysPageMapPermissions::READ) { + // VTL1 wants to read data from the pages, preventing VTL0 from writing to the pages. + crate::mshv::heki::MemAttr::MEM_ATTR_READ | crate::mshv::heki::MemAttr::MEM_ATTR_EXEC + } else { + // VTL1 no longer protects the pages. + crate::mshv::heki::MemAttr::all() + }; + crate::mshv::vsm::protect_physical_memory_range(frame_range, mem_attr) + .map_err(|_| PhysPointerError::UnsupportedPermissions(perms.bits())) + } +} + // NOTE: The below code is a naive workaround to let LVBS code to access the platform. // Rather than doing this, we should implement LVBS interface/provider for the platform. diff --git a/litebox_platform_lvbs/src/mshv/mod.rs b/litebox_platform_lvbs/src/mshv/mod.rs index 71f1a8835..e5ba8f013 100644 --- a/litebox_platform_lvbs/src/mshv/mod.rs +++ b/litebox_platform_lvbs/src/mshv/mod.rs @@ -3,7 +3,7 @@ //! Hyper-V-specific code -mod heki; +pub(crate) mod heki; pub mod hvcall; mod hvcall_mm; mod hvcall_vp; diff --git a/litebox_platform_lvbs/src/mshv/vsm.rs b/litebox_platform_lvbs/src/mshv/vsm.rs index 308ff3f01..07eed67c0 100644 --- a/litebox_platform_lvbs/src/mshv/vsm.rs +++ b/litebox_platform_lvbs/src/mshv/vsm.rs @@ -1320,7 +1320,7 @@ fn copy_heki_pages_from_vtl0(pa: u64, nranges: u64) -> Option> { /// `phys_frame_range` specifies the physical frame range to protect /// `mem_attr` specifies the memory attributes to be applied to the range #[inline] -fn protect_physical_memory_range( +pub(crate) fn protect_physical_memory_range( phys_frame_range: PhysFrameRange, mem_attr: MemAttr, ) -> Result<(), Errno> { diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 6f8af863a..b846a3a1c 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -33,6 +33,9 @@ use litebox_platform_multiplex::Platform; pub mod loader; pub(crate) mod syscalls; +pub mod msg_handler; +pub mod ptr; + const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; pub struct OpteeShimEntrypoints { @@ -1211,6 +1214,9 @@ impl Default for SessionIdPool { } } +pub type NormalWorldConstPtr = crate::ptr::PhysConstPtr; +pub type NormalWorldMutPtr = crate::ptr::PhysMutPtr; + #[cfg(test)] mod test_utils { use super::*; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs new file mode 100644 index 000000000..158b16c3c --- /dev/null +++ b/litebox_shim_optee/src/msg_handler.rs @@ -0,0 +1,305 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//! OP-TEE's message passing is a bit complex because it involves with multiple actors +//! (normal world: client app and driver; secure world: OP-TEE OS and TAs), +//! consists multiple layers, and relies on shared memory references (i.e., no serialization). +//! +//! Since the normal world is out of LiteBox's scope, the OP-TEE shim starts with handling +//! an OP-TEE SMC call from the normal-world OP-TEE driver which consists of +//! up to nine register values. By checking the SMC function ID, the shim determines whether +//! it is for passing an OP-TEE message or a pure SMC function call (e.g., get OP-TEE OS +//! version). If it is for passing an OP-TEE message/command, the shim accesses a normal world +//! physical address containing `OpteeMsgArgs` structure (the address is contained in +//! the SMC call arguments). This `OpteeMsgArgs` structure may contain references to normal +//! world physical addresses to exchange a large amount of data. Also, like the OP-TEE +//! SMC call, some OP-TEE messages/commands target OP-TEE shim not TAs (e.g., register +//! shared memory). +use crate::NormalWorldConstPtr; +use alloc::{boxed::Box, vec::Vec}; +use hashbrown::HashMap; +use litebox::{mm::linux::PAGE_SIZE, utils::TruncateExt}; +use litebox_common_linux::vmap::PhysPageAddr; +use litebox_common_optee::{ + OpteeMessageCommand, OpteeMsgArgs, OpteeSecureWorldCapabilities, OpteeSmcArgs, + OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturnCode, +}; +use once_cell::race::OnceBox; + +// OP-TEE version and build info (2.0) +// TODO: Consider replacing it with our own version info +const OPTEE_MSG_REVISION_MAJOR: usize = 2; +const OPTEE_MSG_REVISION_MINOR: usize = 0; +const OPTEE_MSG_BUILD_ID: usize = 0; + +// This UID is from OP-TEE OS +// TODO: Consider replacing it with our own UID +const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; +const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; +const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; + +// This is the UUID of OP-TEE Trusted OS +// TODO: Consider replacing it with our own UUID +const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x4861_78e0; +const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e_0002; +const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5_c51b; + +// We do not support notification for now +const MAX_NOTIF_VALUE: usize = 0; +const NUM_RPC_PARMS: usize = 4; + +#[inline] +fn page_align_down(address: u64) -> u64 { + address & !(PAGE_SIZE as u64 - 1) +} + +#[inline] +fn page_align_up(len: u64) -> u64 { + len.next_multiple_of(PAGE_SIZE as u64) +} + +/// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +/// It returns an `OpteeSmcResult` representing the result of the SMC call or `OpteeMsgArgs` it contains +/// if the SMC call involves with an OP-TEE message which should be handled by +/// `handle_optee_msg_arg` or `handle_ta_request`. +pub fn handle_optee_smc_args( + smc: &mut OpteeSmcArgs, +) -> Result, OpteeSmcReturnCode> { + let func_id = smc.func_id()?; + match func_id { + OpteeSmcFunction::CallWithArg + | OpteeSmcFunction::CallWithRpcArg + | OpteeSmcFunction::CallWithRegdArg => { + let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; + let msg_arg_addr: usize = msg_arg_addr.truncate(); + let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturnCode::EBadAddr)?; + let msg_arg = + unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturnCode::EBadAddr)?; + Ok(OpteeSmcResult::CallWithArg { + msg_arg: Box::new(*msg_arg), + }) + } + OpteeSmcFunction::ExchangeCapabilities => { + // TODO: update the below when we support more features + let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM + | OpteeSecureWorldCapabilities::MEMREF_NULL + | OpteeSecureWorldCapabilities::RPC_ARG; + Ok(OpteeSmcResult::ExchangeCapabilities { + status: OpteeSmcReturnCode::Ok, + capabilities: default_cap, + max_notif_value: MAX_NOTIF_VALUE, + data: NUM_RPC_PARMS, + }) + } + OpteeSmcFunction::DisableShmCache => { + // Currently, we do not support this feature. + Ok(OpteeSmcResult::DisableShmCache { + status: OpteeSmcReturnCode::ENotAvail, + shm_upper32: 0, + shm_lower32: 0, + }) + } + OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_OS_OPTEE_UUID_0, + OPTEE_MSG_OS_OPTEE_UUID_1, + OPTEE_MSG_OS_OPTEE_UUID_2, + OPTEE_MSG_OS_OPTEE_UUID_3, + ], + }), + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ], + }), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::OsRevision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + build_id: OPTEE_MSG_BUILD_ID, + }), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::Revision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + }), + _ => Err(OpteeSmcReturnCode::UnknownFunction), + } +} + +/// This function handles an OP-TEE message contained in `OpteeMsgArgs`. +/// Currently, it only handles shared memory registration and unregistration. +/// If an OP-TEE message involves with a TA request, it simply returns +/// `Err(OpteeSmcReturnCode::Ok)` while expecting that the caller will handle +/// the message with `handle_ta_request`. +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArgs) -> Result<(), OpteeSmcReturnCode> { + msg_arg.validate()?; + match msg_arg.cmd { + OpteeMessageCommand::RegisterShm => { + let tmem = msg_arg.get_param_tmem(0)?; + if tmem.buf_ptr == 0 || tmem.size == 0 || tmem.shm_ref == 0 { + return Err(OpteeSmcReturnCode::EBadAddr); + } + // `tmem.buf_ptr` encodes two different information: + // - The physical page address of the first `ShmRefPagesData` + // - The page offset of the first shared memory page (`pages_list[0]`) + let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); + let page_offset = tmem.buf_ptr - shm_ref_pages_data_phys_addr; + let aligned_size = page_align_up(page_offset + tmem.size); + shm_ref_map().register_shm( + shm_ref_pages_data_phys_addr, + page_offset, + aligned_size, + tmem.shm_ref, + )?; + } + OpteeMessageCommand::UnregisterShm => { + let tmem = msg_arg.get_param_tmem(0)?; + if tmem.shm_ref == 0 { + return Err(OpteeSmcReturnCode::EBadAddr); + } + shm_ref_map() + .remove(tmem.shm_ref) + .ok_or(OpteeSmcReturnCode::EBadAddr)?; + } + OpteeMessageCommand::OpenSession + | OpteeMessageCommand::InvokeCommand + | OpteeMessageCommand::CloseSession => return Err(OpteeSmcReturnCode::Ok), + _ => { + todo!("Unimplemented OpteeMessageCommand: {:?}", msg_arg.cmd); + } + } + Ok(()) +} + +/// This function handles a TA request contained in `OpteeMsgArgs` +pub fn handle_ta_request(_msg_arg: &OpteeMsgArgs) -> Result { + todo!() +} + +/// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to +/// share with the secure world (VTL1). Each [`ShmRefPagesData`] occupies one memory page +/// where `pages_list` contains a list of physical page addresses and `next_page_data` +/// contains the physical address of the next [`ShmRefPagesData`] if any. Entries of `pages_list` +/// and `next_page_data` contain zero if the list ends. These physical page addresses are +/// virtually contiguous in the normal world. All these address values must be page aligned. +/// +/// `pages_data` from [Linux](https://elixir.bootlin.com/linux/v6.18.2/source/drivers/tee/optee/smc_abi.c#L409) +#[derive(Clone, Copy)] +#[repr(C)] +struct ShmRefPagesData { + pub pages_list: [u64; Self::PAGELIST_ENTRIES_PER_PAGE], + pub next_page_data: u64, +} +impl ShmRefPagesData { + const PAGELIST_ENTRIES_PER_PAGE: usize = + PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); +} + +/// Data structure to maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// `pages` contains an array of physical page addresses. +/// `page_offset` indicates the page offset of the first page (i.e., `pages[0]`) which should be +/// smaller than `ALIGN`. +#[expect(unused)] +#[derive(Clone)] +struct ShmRefInfo { + pub pages: Box<[PhysPageAddr]>, + pub page_offset: usize, +} + +/// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// This data structure is for registering shared memory regions before they are +/// used during OP-TEE calls with parameters referencing shared memory. +/// Any normal memory references without this registration will be rejected. +struct ShmRefMap { + inner: spin::mutex::SpinMutex>>, +} + +impl ShmRefMap { + pub fn new() -> Self { + Self { + inner: spin::mutex::SpinMutex::new(HashMap::new()), + } + } + + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturnCode> { + let mut guard = self.inner.lock(); + if guard.contains_key(&shm_ref) { + Err(OpteeSmcReturnCode::ENotAvail) + } else { + let _ = guard.insert(shm_ref, info); + Ok(()) + } + } + + pub fn remove(&self, shm_ref: u64) -> Option> { + let mut guard = self.inner.lock(); + guard.remove(&shm_ref) + } + + #[expect(unused)] + pub fn get(&self, shm_ref: u64) -> Option> { + let guard = self.inner.lock(); + guard.get(&shm_ref).cloned() + } + + /// This function registers shared memory information that the normal world (VTL0) provides. + /// Specifically, it walks through a linked list of [`ShmRefPagesData`] structures referenced by + /// `shm_ref_pages_data_phys_addr` to create a slice of the shared physical page addresses + /// and registers the slice with `shm_ref` as its identifier. `page_offset` indicates + /// the page offset of the first page (i.e., `pages_list[0]` of the first [`ShmRefPagesData`]). + /// `aligned_size` indicates the page-aligned size of the shared memory region to register. + pub fn register_shm( + &self, + shm_ref_pages_data_phys_addr: u64, + page_offset: u64, + aligned_size: u64, + shm_ref: u64, + ) -> Result<(), OpteeSmcReturnCode> { + if page_offset >= ALIGN as u64 || aligned_size == 0 { + return Err(OpteeSmcReturnCode::EBadAddr); + } + let num_pages = usize::try_from(aligned_size).unwrap() / ALIGN; + let mut pages = Vec::with_capacity(num_pages); + let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); + loop { + let mut cur_ptr = NormalWorldConstPtr::::with_usize(cur_addr) + .map_err(|_| OpteeSmcReturnCode::EBadAddr)?; + let pages_data = + unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturnCode::EBadAddr)?; + for page in &pages_data.pages_list { + if *page == 0 || pages.len() == num_pages { + break; + } else { + pages.push( + PhysPageAddr::new(usize::try_from(*page).unwrap()) + .ok_or(OpteeSmcReturnCode::EBadAddr)?, + ); + } + } + if pages_data.next_page_data == 0 || pages.len() == num_pages { + break; + } else { + cur_addr = usize::try_from(pages_data.next_page_data).unwrap(); + } + } + + self.insert( + shm_ref, + ShmRefInfo { + pages: pages.into_boxed_slice(), + page_offset: usize::try_from(page_offset).unwrap(), + }, + )?; + Ok(()) + } +} + +fn shm_ref_map() -> &'static ShmRefMap { + static SHM_REF_MAP: OnceBox> = OnceBox::new(); + SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) +} diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs new file mode 100644 index 000000000..a19973369 --- /dev/null +++ b/litebox_shim_optee/src/ptr.rs @@ -0,0 +1,570 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//! Physical Pointer Abstraction with On-demand Mapping +//! +//! This module adds supports for accessing physical addresses (e.g., VTL0 or +//! normal-world physical memory) from LiteBox with on-demand mapping. +//! In the context of LVBS and OP-TEE, accessing physical memory is necessary +//! because VTL0 and VTL1 as well as normal world and secure world do not share +//! the same virtual address space, but they still have to share data through memory. +//! VTL1 and secure world receive physical addresses from VTL0 and normal world, +//! respectively, and they need to read from or write to those addresses. +//! +//! To simplify all these, we could persistently map the entire VTL0/normal-world +//! physical memory into VTL1/secure-world address space at once and just access them +//! through corresponding virtual addresses. However, this module does not take these +//! approaches due to scalability (e.g., how to deal with a system with terabytes of +//! physical memory?) and security concerns (e.g., data corruption or information +//! leakage due to concurrent or persistent access). +//! +//! Instead, the approach this module takes is to map the required physical memory +//! region on-demand when accessing them while using a LiteBox-owned buffer to copy +//! data to/from those regions. This way, this module can ensure that data must be +//! copied into LiteBox-owned memory before being used while avoiding any unknown +//! side effects due to persistent memory mapping. +//! +//! Considerations: +//! +//! Ideally, this module should be able to validate whether a given physical address +//! is okay to access or even exists in the first place. For example, accessing +//! LiteBox's own memory with this physical pointer abstraction must be prohibited to +//! prevent the Boomerang attack and any other undefined memory access. Also, some +//! device memory is mapped to certain physical address ranges and LiteBox should not +//! touch them without in-depth knowledge. However, this is a bit tricky because, in +//! many cases, LiteBox does not directly interact with the underlying hardware or +//! BIOS/UEFI such that it does not have complete knowledge of the physical memory +//! layout. In the case of LVBS, LiteBox obtains the physical memory information +//! from VTL0 including the total physical memory size and the memory range assigned +//! to VTL1/LiteBox. Thus, this module can at least confirm a given physical address +//! does not belong to VTL1's physical memory. +//! +//! This module should allow byte-level access while transparently handling page +//! mapping and data access across page boundaries. This could become complicated +//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, +//! unaligned access is a matter to be considered. +//! +//! In addition, often times, this physical pointer abstraction is involved with +//! a list of physical addresses (i.e., scatter-gather list). For example, in +//! the worse case, a two-byte value can span across two non-contiguous physical +//! pages (the last byte of the first page and the first byte of the second page). +//! Thus, to enhance the performance, we may need to consider mapping multiple pages +//! at once, copy data from/to them, and unmap them later. +//! +//! When this module needs to access data across physical page boundaries, it assumes +//! that those physical pages are virtually contiguous in VTL0 or normal-world address +//! space. Otherwise, this module could end up with accessing misordered data. This is +//! best-effort assumption and ensuring this is the caller's responsibility (e.g., even +//! if this module always requires a list of physical addresses, the caller might +//! provide a wrong list by mistake or intentionally). + +// TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, +// we can move them to a different crate (e.g., `litebox`) if needed. + +use litebox_common_linux::vmap::{ + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapManager, +}; +use litebox_platform_multiplex::platform; + +#[inline] +fn align_down(address: usize, align: usize) -> usize { + address & !(align - 1) +} + +#[inline] +fn align_up(len: usize, align: usize) -> usize { + len.next_multiple_of(align) +} + +/// Represent a physical pointer to an object with on-demand mapping. +/// - `pages`: An array of page-aligned physical addresses. We expect physical addresses in this array are +/// virtually contiguous. +/// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. +/// - `count`: The number of objects of type `T` that can be accessed from this pointer. +/// - `map_info`: The mapping information of the currently mapped physical pages, if any. +/// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough +/// memory for an object of type `T`. +#[derive(Clone)] +#[repr(C)] +pub struct PhysMutPtr { + pages: alloc::boxed::Box<[PhysPageAddr]>, + offset: usize, + count: usize, + map_info: Option>, + _type: core::marker::PhantomData, +} + +impl PhysMutPtr { + /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be + /// smaller than `ALIGN`. Also, `pages` should contain enough pages to cover at least one + /// object of type `T` starting from `offset`. If these conditions are not met, this function + /// returns `Err(PhysPointerError)`. + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { + if offset >= ALIGN { + return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); + } + let size = if pages.is_empty() { + 0 + } else { + pages + .len() + .checked_mul(ALIGN) + .ok_or(PhysPointerError::Overflow)? + - offset + }; + if size < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + size, + core::mem::size_of::(), + )); + } + platform().validate_unowned(pages)?; + Ok(Self { + pages: pages.into(), + offset, + count: size / core::mem::size_of::(), + map_info: None, + _type: core::marker::PhantomData, + }) + } + + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for + /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysMutPtr` may read/write data in a wrong order. + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { + if bytes < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + bytes, + core::mem::size_of::(), + )); + } + let start_page = align_down(pa, ALIGN); + let end_page = align_up( + pa.checked_add(bytes).ok_or(PhysPointerError::Overflow)?, + ALIGN, + ); + let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); + let mut current_page = start_page; + while current_page < end_page { + pages.push( + PhysPageAddr::::new(current_page) + .ok_or(PhysPointerError::InvalidPhysicalAddress(current_page))?, + ); + current_page += ALIGN; + } + Self::new(&pages, pa - start_page) + } + + /// Create a new `PhysMutPtr` from the given physical address for a single object. + /// + /// This is a shortcut for `PhysMutPtr::with_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. + pub fn with_usize(pa: usize) -> Result { + Self::with_contiguous_pages(pa, core::mem::size_of::()) + } + + /// Read the value at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller + let Some(src) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let src = src.wrapping_add(count); + let val = { + let mut buffer = core::mem::MaybeUninit::::uninit(); + if (src as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(src, buffer.as_mut_ptr(), 1); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + src.cast::(), + buffer.as_mut_ptr().cast::(), + core::mem::size_of::(), + ); + } + } + unsafe { buffer.assume_init() } + }; + unsafe { + self.unmap()?; + } + Ok(alloc::boxed::Box::new(val)) + } + + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller + let Some(src) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let src = src.wrapping_add(count); + if (src as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(src, values.as_mut_ptr(), values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + src.cast::(), + values.as_mut_ptr().cast::(), + core::mem::size_of_val(values), + ); + } + } + unsafe { + self.unmap()?; + } + Ok(()) + } + + /// Write the value at the given offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. + pub unsafe fn write_at_offset( + &mut self, + count: usize, + value: T, + ) -> Result<(), PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller + let Some(dst) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { core::ptr::write(dst, value) }; + } else { + unsafe { core::ptr::write_unaligned(dst, value) }; + } + unsafe { + self.unmap()?; + } + Ok(()) + } + + /// Write a slice of values at the given offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. + pub unsafe fn write_slice_at_offset( + &mut self, + count: usize, + values: &[T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller + let Some(dst) = self.base_ptr() else { + unsafe { + self.unmap()?; + } + return Err(PhysPointerError::NoMappingInfo); + }; + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(values.as_ptr(), dst, values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + values.as_ptr().cast::(), + dst.cast::(), + core::mem::size_of_val(values), + ); + } + } + unsafe { + self.unmap()?; + } + Ok(()) + } + + /// Map the physical pages from `start` to `end` indexes. + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn map_range( + &mut self, + start: usize, + end: usize, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + if start >= end || end > self.pages.len() { + return Err(PhysPointerError::IndexOutOfBounds(end, self.pages.len())); + } + let accept_perms = PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE; + if perms.bits() & !accept_perms.bits() != 0 { + return Err(PhysPointerError::UnsupportedPermissions(perms.bits())); + } + if self.map_info.is_none() { + let sub_pages = &self.pages[start..end]; + unsafe { + platform().vmap(sub_pages, perms).map(|info| { + self.map_info = Some(info); + })?; + } + Ok(()) + } else { + Err(PhysPointerError::AlreadyMapped( + self.pages.first().map_or(0, |p| p.as_usize()), + )) + } + } + + /// Unmap the physical pages if mapped. + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn unmap(&mut self) -> Result<(), PhysPointerError> { + if let Some(map_info) = self.map_info.take() { + unsafe { + platform().vunmap(map_info)?; + } + self.map_info = None; + Ok(()) + } else { + Err(PhysPointerError::Unmapped( + self.pages.first().map_or(0, |p| p.as_usize()), + )) + } + } + + /// Get the base virtual pointer if mapped. + #[inline] + fn base_ptr(&self) -> Option<*mut T> { + let Some(map_info) = &self.map_info else { + return None; + }; + Some(map_info.base.wrapping_add(self.offset).cast::()) + } +} + +impl Drop for PhysMutPtr { + fn drop(&mut self) { + let _ = unsafe { self.unmap() }; + } +} + +impl core::fmt::Debug for PhysMutPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysMutPtr") + .field("pages[0]", &self.pages.first().map_or(0, |p| p.as_usize())) + .field("offset", &self.offset) + .finish_non_exhaustive() + } +} + +/// Represent a physical pointer to a read-only object. This wraps around [`PhysMutPtr`] and +/// exposes only read access. +#[derive(Clone)] +#[repr(C)] +pub struct PhysConstPtr { + inner: PhysMutPtr, +} + +impl PhysConstPtr { + /// Create a new `PhysConstPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller + /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of + /// type `T` starting from `offset`. If these conditions are not met, this function returns + /// `Err(PhysPointerError)`. + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::new(pages, offset)?, + }) + } + + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for + /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysConstPtr` may read data in a wrong order. + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::with_contiguous_pages(pa, bytes)?, + }) + } + + /// Create a new `PhysConstPtr` from the given physical address for a single object. + /// + /// This is a shortcut for `PhysConstPtr::with_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. + pub fn with_usize(pa: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::with_usize(pa)?, + }) + } + + /// Read the value at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + unsafe { self.inner.read_at_offset(count) } + } + + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently written by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + unsafe { self.inner.read_slice_at_offset(count, values) } + } +} + +impl Drop for PhysConstPtr { + fn drop(&mut self) { + let _ = unsafe { self.inner.unmap() }; + } +} + +impl core::fmt::Debug for PhysConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysConstPtr") + .field( + "pages[0]", + &self.inner.pages.first().map_or(0, |p| p.as_usize()), + ) + .field("offset", &self.inner.offset) + .finish_non_exhaustive() + } +}