From 18d4a636267164e8fe76aacc09e88e30ded7c2e2 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 10 Dec 2025 23:00:53 +0000 Subject: [PATCH 01/45] msg_proto --- litebox_common_optee/src/lib.rs | 66 +++++++++++++++------------ litebox_shim_optee/src/lib.rs | 2 + litebox_shim_optee/src/msg_proto.rs | 71 +++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 28 deletions(-) create mode 100644 litebox_shim_optee/src/msg_proto.rs diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index d4595db59..d0bfea66a 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1340,7 +1340,7 @@ impl OpteeSmcFunction { /// OP-TEE SMC call uses CPU registers to pass input and output values. /// Thus, this structure is technically equivalent to `OpteeSmcArgs`, but we separate them for clarity. #[repr(align(4096))] -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Default)] #[repr(C)] pub struct OpteeSmcResult { args: [usize; Self::NUM_OPTEE_SMC_ARGS], @@ -1349,53 +1349,63 @@ pub struct OpteeSmcResult { impl OpteeSmcResult { const NUM_OPTEE_SMC_ARGS: usize = 9; - pub fn return_status(&mut self, status: OpteeSmcReturn) { - self.args[0] = status as usize; + pub fn new(status: OpteeSmcReturn) -> Self { + let mut res = Self::default(); + res.args[0] = status as usize; + res } - pub fn exchange_capabilities( - &mut self, + pub fn new_exchange_capabilities( status: OpteeSmcReturn, capabilities: OpteeSecureWorldCapabilities, max_notif_value: usize, data: usize, - ) { - self.return_status(status); - self.args[1] = capabilities.bits(); - self.args[2] = max_notif_value; - self.args[3] = data; + ) -> Self { + let mut res = Self::default(); + res.args[0] = status as usize; + res.args[1] = capabilities.bits(); + res.args[2] = max_notif_value; + res.args[3] = data; + res } /// # Panics /// panics if any element of `data` cannot be converted to `usize`. - pub fn uuid(&mut self, data: [u32; 4]) { + pub fn new_uuid(data: &[u32; 4]) -> Self { + let mut res = Self::default(); // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - self.args[0] = usize::try_from(data[0]).unwrap(); - self.args[1] = usize::try_from(data[1]).unwrap(); - self.args[2] = usize::try_from(data[2]).unwrap(); - self.args[3] = usize::try_from(data[3]).unwrap(); + res.args[0] = usize::try_from(data[0]).unwrap(); + res.args[1] = usize::try_from(data[1]).unwrap(); + res.args[2] = usize::try_from(data[2]).unwrap(); + res.args[3] = usize::try_from(data[3]).unwrap(); + res } - pub fn revision(&mut self, major: usize, minor: usize) { - self.args[0] = major; - self.args[1] = minor; + pub fn new_revision(major: usize, minor: usize) -> Self { + let mut res = Self::default(); + res.args[0] = major; + res.args[1] = minor; + res } - pub fn os_revision(&mut self, major: usize, minor: usize, build_id: usize) { - self.args[0] = major; - self.args[1] = minor; - self.args[2] = build_id; + pub fn new_os_revision(major: usize, minor: usize, build_id: usize) -> Self { + let mut res = Self::default(); + res.args[0] = major; + res.args[1] = minor; + res.args[2] = build_id; + res } - pub fn disable_shm_cache( - &mut self, + pub fn new_disable_shm_cache( status: OpteeSmcReturn, shm_upper32: usize, shm_lower32: usize, - ) { - self.args[0] = status as usize; - self.args[1] = shm_upper32; - self.args[2] = shm_lower32; + ) -> Self { + let mut res = Self::default(); + res.args[0] = status as usize; + res.args[1] = shm_upper32; + res.args[2] = shm_lower32; + res } } diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 6f8af863a..196ef8a2c 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -33,6 +33,8 @@ use litebox_platform_multiplex::Platform; pub mod loader; pub(crate) mod syscalls; +pub mod msg_proto; + const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; pub struct OpteeShimEntrypoints { diff --git a/litebox_shim_optee/src/msg_proto.rs b/litebox_shim_optee/src/msg_proto.rs new file mode 100644 index 000000000..62b140864 --- /dev/null +++ b/litebox_shim_optee/src/msg_proto.rs @@ -0,0 +1,71 @@ +use litebox_common_linux::errno::Errno; +use litebox_common_optee::{ + OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, + OpteeSmcReturn, +}; + +// TODO: Replace these with version and build info +const OPTEE_MSG_REVISION_MAJOR: usize = 2; +const OPTEE_MSG_REVISION_MINOR: usize = 0; +const OPTEE_MSG_BUILD_ID: usize = 0; + +// TODO: Replace this with an actual UID +const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; +const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; +const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; + +// We do not support notification for now +const MAX_NOTIF_VALUE: usize = 0; +const NUM_RPC_PARMS: usize = 4; + +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { + let func_id = smc.func_id()?; + + match func_id { + OpteeSmcFunction::CallWithArg + | OpteeSmcFunction::CallWithRpcArg + | OpteeSmcFunction::CallWithRegdArg => { + // TODO: handle the contained `OpteeMsgArg` and return appropriate result + Ok(OpteeSmcResult::new(OpteeSmcReturn::Ok)) + } + OpteeSmcFunction::ExchangeCapabilities => { + // TODO: update the below when we support more features + let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM + | OpteeSecureWorldCapabilities::MEMREF_NULL + | OpteeSecureWorldCapabilities::RPC_ARG; + Ok(OpteeSmcResult::new_exchange_capabilities( + OpteeSmcReturn::Ok, + default_cap, + MAX_NOTIF_VALUE, + NUM_RPC_PARMS, + )) + } + OpteeSmcFunction::DisableShmCache => { + // We do not support this feature + Ok(OpteeSmcResult::new_disable_shm_cache( + OpteeSmcReturn::ENotAvail, + 0, + 0, + )) + } + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ])), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + OPTEE_MSG_BUILD_ID, + )), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + )), + _ => Err(Errno::EINVAL), + } +} + +pub fn halde_optee_msg_arg(_msg: &OpteeMsgArg) {} From 2c4d5857344cf832c935eece96901cf6a0fc481c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 05:49:05 +0000 Subject: [PATCH 02/45] improve optee msg handlers --- litebox_common_optee/src/lib.rs | 79 ++++++--- litebox_shim_optee/src/lib.rs | 2 +- litebox_shim_optee/src/msg_handler.rs | 229 ++++++++++++++++++++++++++ litebox_shim_optee/src/msg_proto.rs | 71 -------- 4 files changed, 288 insertions(+), 93 deletions(-) create mode 100644 litebox_shim_optee/src/msg_handler.rs delete mode 100644 litebox_shim_optee/src/msg_proto.rs diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index d0bfea66a..fd0e595af 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1122,11 +1122,11 @@ pub enum OpteeMessageCommand { #[repr(C)] pub struct OpteeMsgParamTmem { /// Physical address of the buffer - buf_ptr: u64, + pub buf_ptr: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Temporary shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// Registered memory reference parameter @@ -1134,11 +1134,11 @@ pub struct OpteeMsgParamTmem { #[repr(C)] pub struct OpteeMsgParamRmem { /// Offset into shared memory reference - offs: u64, + pub offs: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// FF-A memory reference parameter @@ -1146,15 +1146,15 @@ pub struct OpteeMsgParamRmem { #[repr(C)] pub struct OpteeMsgParamFmem { /// Lower bits of offset into shared memory reference - offs_low: u32, + pub offs_low: u32, /// Higher bits of offset into shared memory reference - offs_high: u32, + pub offs_high: u32, /// Internal offset into the first page of shared memory reference - internal_offs: u16, + pub internal_offs: u16, /// Size of the buffer - size: u64, + pub size: u64, /// Global identifier of the shared memory - global_id: u64, + pub global_id: u64, } /// Opaque value parameter @@ -1162,9 +1162,9 @@ pub struct OpteeMsgParamFmem { #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamValue { - a: u64, - b: u64, - c: u64, + pub a: u64, + pub b: u64, + pub c: u64, } /// Parameter used together with `OpteeMsgArg` @@ -1239,7 +1239,7 @@ impl OpteeMsgParam { #[repr(C)] pub struct OpteeMsgArg { /// OP-TEE message command. This is a superset of `UteeEntryFunc`. - cmd: OpteeMessageCommand, + pub cmd: OpteeMessageCommand, /// TA function ID which is used if `cmd == InvokeCommand`. Note that the meaning of `cmd` and `func` /// is swapped compared to TAs. func: u32, @@ -1262,6 +1262,41 @@ pub struct OpteeMsgArg { params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], } +impl OpteeMsgArg { + #[cfg(target_pointer_width = "64")] + pub fn get_param_tmem(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.tmem }) + } + } + #[cfg(target_pointer_width = "64")] + pub fn get_param_rmem(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.rmem }) + } + } + #[cfg(target_pointer_width = "64")] + pub fn get_param_fmem(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.fmem }) + } + } + #[cfg(target_pointer_width = "64")] + pub fn get_param_value(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.value }) + } + } +} + /// OP-TEE SMC call arguments. /// OP-TEE assumes that the underlying architecture is Arm with TrustZone and /// thus it uses Secure Monitor Call (SMC) calling convention (SMCCC). @@ -1295,12 +1330,13 @@ impl OpteeSmcArgs { /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. - pub fn optee_msg_arg_phys_addr(&self) -> Result { + #[cfg(target_pointer_width = "64")] + pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. if self.args[1] & 0xffff_ffff_0000_0000 == 0 && self.args[2] & 0xffff_ffff_0000_0000 == 0 { let addr = (self.args[1] << 32) | self.args[2]; - Ok(addr) + Ok(addr as u64) } else { Err(Errno::EINVAL) } @@ -1371,13 +1407,14 @@ impl OpteeSmcResult { /// # Panics /// panics if any element of `data` cannot be converted to `usize`. + #[cfg(target_pointer_width = "64")] pub fn new_uuid(data: &[u32; 4]) -> Self { let mut res = Self::default(); // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - res.args[0] = usize::try_from(data[0]).unwrap(); - res.args[1] = usize::try_from(data[1]).unwrap(); - res.args[2] = usize::try_from(data[2]).unwrap(); - res.args[3] = usize::try_from(data[3]).unwrap(); + res.args[0] = data[0] as usize; + res.args[1] = data[1] as usize; + res.args[2] = data[2] as usize; + res.args[3] = data[3] as usize; res } diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 196ef8a2c..0b290ddfc 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -33,7 +33,7 @@ use litebox_platform_multiplex::Platform; pub mod loader; pub(crate) mod syscalls; -pub mod msg_proto; +pub mod msg_handler; const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs new file mode 100644 index 000000000..a690228c4 --- /dev/null +++ b/litebox_shim_optee/src/msg_handler.rs @@ -0,0 +1,229 @@ +use alloc::{boxed::Box, vec::Vec}; +use hashbrown::HashMap; +use litebox::mm::linux::PAGE_SIZE; +use litebox_common_linux::errno::Errno; +use litebox_common_optee::{ + OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, + OpteeSmcResult, OpteeSmcReturn, +}; +use once_cell::race::OnceBox; + +// TODO: Replace these with version and build info +const OPTEE_MSG_REVISION_MAJOR: usize = 2; +const OPTEE_MSG_REVISION_MINOR: usize = 0; +const OPTEE_MSG_BUILD_ID: usize = 0; + +// TODO: Replace this with an actual UID +const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; +const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; +const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; + +// We do not support notification for now +const MAX_NOTIF_VALUE: usize = 0; +const NUM_RPC_PARMS: usize = 4; + +#[inline] +#[cfg(target_pointer_width = "64")] +fn page_align_down(address: u64) -> u64 { + address & !(PAGE_SIZE as u64 - 1) +} + +#[inline] +#[cfg(target_pointer_width = "64")] +fn page_align_up(len: u64) -> u64 { + len.next_multiple_of(PAGE_SIZE as u64) +} + +// Placeholder for copying data from remote memory (e.g., VTL0 physical memory) +// TODO: Specify it in the litebox crate? +// TODO: Define a type for remote address +#[allow(clippy::unnecessary_wraps)] +fn copy_from_remote_memory(_remote_addr: u64) -> Result +where + T: Copy, +{ + // TODO: implement the actual remote copy + Ok(unsafe { core::mem::zeroed() }) +} + +// Placeholder for copying data to remote memory (e.g., VTL0 physical memory) +// TODO: Specify it in the litebox crate? +// TODO: Define a type for remote address +#[expect(unused)] +#[allow(clippy::unnecessary_wraps)] +fn copy_to_remote_memory(_remote_addr: u64, _data: &T) -> Result<(), Errno> +where + T: Copy, +{ + // TODO: implement the actual remote copy + Ok(()) +} + +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { + let func_id = smc.func_id()?; + + match func_id { + OpteeSmcFunction::CallWithArg + | OpteeSmcFunction::CallWithRpcArg + | OpteeSmcFunction::CallWithRegdArg => { + let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; + let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; + handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::new(OpteeSmcReturn::Ok)) + } + OpteeSmcFunction::ExchangeCapabilities => { + // TODO: update the below when we support more features + let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM + | OpteeSecureWorldCapabilities::MEMREF_NULL + | OpteeSecureWorldCapabilities::RPC_ARG; + Ok(OpteeSmcResult::new_exchange_capabilities( + OpteeSmcReturn::Ok, + default_cap, + MAX_NOTIF_VALUE, + NUM_RPC_PARMS, + )) + } + OpteeSmcFunction::DisableShmCache => { + // We do not support this feature + Ok(OpteeSmcResult::new_disable_shm_cache( + OpteeSmcReturn::ENotAvail, + 0, + 0, + )) + } + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ])), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + OPTEE_MSG_BUILD_ID, + )), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + )), + _ => Err(Errno::EINVAL), + } +} + +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), Errno> { + match msg_arg.cmd { + OpteeMessageCommand::RegisterShm => { + if let Ok(tmem) = msg_arg.get_param_tmem(0) { + shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; + } else { + return Err(Errno::EINVAL); + } + } + OpteeMessageCommand::UnregisterShm => { + if let Ok(tmem) = msg_arg.get_param_tmem(0) { + shm_ref_map().remove(tmem.shm_ref).ok_or(Errno::ENOENT)?; + } else { + return Err(Errno::EINVAL); + } + } + _ => {} + } + + Ok(()) +} + +#[derive(Clone)] +struct ShmRefInfo { + pub pages: Box<[u64]>, + pub page_offset: u64, +} + +#[derive(Clone, Copy)] +#[repr(C)] +struct ShmRefPagesData { + pub pages_list: [u64; PAGELIST_ENTRIES_PER_PAGE], + pub next_page_data: u64, +} +const PAGELIST_ENTRIES_PER_PAGE: usize = + PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); + +/// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// This data structure is for registering shared memory regions before they are +/// used during OP-TEE calls with parameters referencing shared memory. +/// Any normal memory references without this registration will be rejected. +struct ShmRefMap { + inner: spin::mutex::SpinMutex>, +} + +impl ShmRefMap { + pub fn new() -> Self { + Self { + inner: spin::mutex::SpinMutex::new(HashMap::new()), + } + } + + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), Errno> { + let mut guard = self.inner.lock(); + if guard.contains_key(&shm_ref) { + Err(Errno::EEXIST) + } else { + let _ = guard.insert(shm_ref, info); + Ok(()) + } + } + + pub fn remove(&self, shm_ref: u64) -> Option { + let mut guard = self.inner.lock(); + guard.remove(&shm_ref) + } + + #[expect(unused)] + pub fn get(&self, shm_ref: u64) -> Option { + let guard = self.inner.lock(); + guard.get(&shm_ref).cloned() + } + + pub fn register_shm(&self, phys_addr: u64, size: u64, shm_ref: u64) -> Result<(), Errno> { + let aligned_phys_addr = page_align_down(phys_addr); + let page_offset = phys_addr - aligned_phys_addr; + let aligned_size = page_align_up(page_offset + size); + let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; + let mut pages = Vec::with_capacity(num_pages); + + let mut cur_addr = aligned_phys_addr; + loop { + let Ok(pages_data) = copy_from_remote_memory::(cur_addr) else { + return Err(Errno::EFAULT); + }; + for page in &pages_data.pages_list { + if *page == 0 || pages.len() == num_pages { + break; + } else if !page.is_multiple_of(u64::try_from(PAGE_SIZE).unwrap()) { + return Err(Errno::EINVAL); + } else { + pages.push(*page); + } + } + if pages_data.next_page_data == 0 || pages.len() == num_pages { + break; + } else { + cur_addr = pages_data.next_page_data; + } + } + + self.insert( + shm_ref, + ShmRefInfo { + pages: pages.into_boxed_slice(), + page_offset, + }, + )?; + + Ok(()) + } +} + +fn shm_ref_map() -> &'static ShmRefMap { + static SHM_REF_MAP: OnceBox = OnceBox::new(); + SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) +} diff --git a/litebox_shim_optee/src/msg_proto.rs b/litebox_shim_optee/src/msg_proto.rs deleted file mode 100644 index 62b140864..000000000 --- a/litebox_shim_optee/src/msg_proto.rs +++ /dev/null @@ -1,71 +0,0 @@ -use litebox_common_linux::errno::Errno; -use litebox_common_optee::{ - OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, - OpteeSmcReturn, -}; - -// TODO: Replace these with version and build info -const OPTEE_MSG_REVISION_MAJOR: usize = 2; -const OPTEE_MSG_REVISION_MINOR: usize = 0; -const OPTEE_MSG_BUILD_ID: usize = 0; - -// TODO: Replace this with an actual UID -const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; -const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; -const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; -const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; - -// We do not support notification for now -const MAX_NOTIF_VALUE: usize = 0; -const NUM_RPC_PARMS: usize = 4; - -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { - let func_id = smc.func_id()?; - - match func_id { - OpteeSmcFunction::CallWithArg - | OpteeSmcFunction::CallWithRpcArg - | OpteeSmcFunction::CallWithRegdArg => { - // TODO: handle the contained `OpteeMsgArg` and return appropriate result - Ok(OpteeSmcResult::new(OpteeSmcReturn::Ok)) - } - OpteeSmcFunction::ExchangeCapabilities => { - // TODO: update the below when we support more features - let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM - | OpteeSecureWorldCapabilities::MEMREF_NULL - | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(OpteeSmcResult::new_exchange_capabilities( - OpteeSmcReturn::Ok, - default_cap, - MAX_NOTIF_VALUE, - NUM_RPC_PARMS, - )) - } - OpteeSmcFunction::DisableShmCache => { - // We do not support this feature - Ok(OpteeSmcResult::new_disable_shm_cache( - OpteeSmcReturn::ENotAvail, - 0, - 0, - )) - } - OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ - OPTEE_MSG_UID_0, - OPTEE_MSG_UID_1, - OPTEE_MSG_UID_2, - OPTEE_MSG_UID_3, - ])), - OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - OPTEE_MSG_BUILD_ID, - )), - OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - )), - _ => Err(Errno::EINVAL), - } -} - -pub fn halde_optee_msg_arg(_msg: &OpteeMsgArg) {} From 17da58fe68cd8075948e883f939f37061d60b06c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 17:25:54 +0000 Subject: [PATCH 03/45] improve abstraction --- litebox_common_optee/src/lib.rs | 172 +++++++++++++++----------- litebox_shim_optee/src/msg_handler.rs | 61 ++++----- 2 files changed, 134 insertions(+), 99 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index fd0e595af..cb24df22b 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1297,7 +1297,7 @@ impl OpteeMsgArg { } } -/// OP-TEE SMC call arguments. +/// A memory page to exchange OP-TEE SMC call arguments. /// OP-TEE assumes that the underlying architecture is Arm with TrustZone and /// thus it uses Secure Monitor Call (SMC) calling convention (SMCCC). /// Since we currently rely on the existing OP-TEE driver which assumes SMCCC, we translate it into @@ -1305,9 +1305,28 @@ impl OpteeMsgArg { /// Specifically, OP-TEE SMC call uses up to nine CPU registers to pass arguments. /// However, since VTL call only supports up to four parameters, we allocate a VTL0 memory page and /// exchange all arguments through that memory page. +/// TODO: Since this is LVBS-specific structure to facilitate the translation between VTL call convention, +/// we might want to move it to the `litebox_platform_lvbs` crate later. #[repr(align(4096))] #[derive(Clone, Copy)] #[repr(C)] +pub struct OpteeSmcArgsPage { + pub args: [usize; Self::NUM_OPTEE_SMC_ARGS], +} +impl OpteeSmcArgsPage { + const NUM_OPTEE_SMC_ARGS: usize = 9; +} + +impl From<&OpteeSmcArgsPage> for OpteeSmcArgs { + fn from(page: &OpteeSmcArgsPage) -> Self { + let mut smc = OpteeSmcArgs::default(); + smc.args.copy_from_slice(&page.args); + smc + } +} + +/// OP-TEE SMC call arguments. +#[derive(Clone, Copy, Default)] pub struct OpteeSmcArgs { args: [usize; Self::NUM_OPTEE_SMC_ARGS], } @@ -1315,14 +1334,6 @@ pub struct OpteeSmcArgs { impl OpteeSmcArgs { const NUM_OPTEE_SMC_ARGS: usize = 9; - pub fn arg_index(&self, index: usize) -> Option { - if index < Self::NUM_OPTEE_SMC_ARGS { - Some(self.args[index]) - } else { - None - } - } - /// Get the function ID of an OP-TEE SMC call pub fn func_id(&self) -> Result { OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK).map_err(|_| Errno::EINVAL) @@ -1374,75 +1385,94 @@ impl OpteeSmcFunction { /// OP-TEE SMC call result. /// OP-TEE SMC call uses CPU registers to pass input and output values. -/// Thus, this structure is technically equivalent to `OpteeSmcArgs`, but we separate them for clarity. -#[repr(align(4096))] -#[derive(Clone, Copy, Default)] -#[repr(C)] -pub struct OpteeSmcResult { - args: [usize; Self::NUM_OPTEE_SMC_ARGS], -} - -impl OpteeSmcResult { - const NUM_OPTEE_SMC_ARGS: usize = 9; - - pub fn new(status: OpteeSmcReturn) -> Self { - let mut res = Self::default(); - res.args[0] = status as usize; - res - } - - pub fn new_exchange_capabilities( +/// Thus, we convert this into `OpteeSmcArgs` later. +#[non_exhaustive] +pub enum OpteeSmcResult<'a> { + Generic { + status: OpteeSmcReturn, + }, + ExchangeCapabilities { status: OpteeSmcReturn, capabilities: OpteeSecureWorldCapabilities, max_notif_value: usize, data: usize, - ) -> Self { - let mut res = Self::default(); - res.args[0] = status as usize; - res.args[1] = capabilities.bits(); - res.args[2] = max_notif_value; - res.args[3] = data; - res - } - - /// # Panics - /// panics if any element of `data` cannot be converted to `usize`. - #[cfg(target_pointer_width = "64")] - pub fn new_uuid(data: &[u32; 4]) -> Self { - let mut res = Self::default(); - // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - res.args[0] = data[0] as usize; - res.args[1] = data[1] as usize; - res.args[2] = data[2] as usize; - res.args[3] = data[3] as usize; - res - } - - pub fn new_revision(major: usize, minor: usize) -> Self { - let mut res = Self::default(); - res.args[0] = major; - res.args[1] = minor; - res - } - - pub fn new_os_revision(major: usize, minor: usize, build_id: usize) -> Self { - let mut res = Self::default(); - res.args[0] = major; - res.args[1] = minor; - res.args[2] = build_id; - res - } - - pub fn new_disable_shm_cache( + }, + Uuid { + data: &'a [u32; 4], + }, + Revision { + major: usize, + minor: usize, + }, + OsRevision { + major: usize, + minor: usize, + build_id: usize, + }, + DisableShmCache { status: OpteeSmcReturn, shm_upper32: usize, shm_lower32: usize, - ) -> Self { - let mut res = Self::default(); - res.args[0] = status as usize; - res.args[1] = shm_upper32; - res.args[2] = shm_lower32; - res + }, +} + +impl From> for OpteeSmcArgs { + fn from(value: OpteeSmcResult) -> Self { + match value { + OpteeSmcResult::Generic { status } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc + } + OpteeSmcResult::ExchangeCapabilities { + status, + capabilities, + max_notif_value, + data, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = capabilities.bits(); + smc.args[2] = max_notif_value; + smc.args[3] = data; + smc + } + OpteeSmcResult::Uuid { data } => { + let mut smc = OpteeSmcArgs::default(); + for (i, arg) in smc.args.iter_mut().enumerate().take(4) { + *arg = data[i] as usize; + } + smc + } + OpteeSmcResult::Revision { major, minor } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc + } + OpteeSmcResult::OsRevision { + major, + minor, + build_id, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc.args[2] = build_id; + smc + } + OpteeSmcResult::DisableShmCache { + status, + shm_upper32, + shm_lower32, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = shm_upper32; + smc.args[2] = shm_lower32; + smc + } + } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index a690228c4..24b6a8dcf 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -60,7 +60,8 @@ where Ok(()) } -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { +/// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, Errno> { let func_id = smc.func_id()?; match func_id { @@ -69,43 +70,47 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; - handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::new(OpteeSmcReturn::Ok)) + handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::Generic { + status: OpteeSmcReturn::Ok, + }) } OpteeSmcFunction::ExchangeCapabilities => { // TODO: update the below when we support more features let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM | OpteeSecureWorldCapabilities::MEMREF_NULL | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(OpteeSmcResult::new_exchange_capabilities( - OpteeSmcReturn::Ok, - default_cap, - MAX_NOTIF_VALUE, - NUM_RPC_PARMS, - )) + Ok(OpteeSmcResult::ExchangeCapabilities { + status: OpteeSmcReturn::Ok, + capabilities: default_cap, + max_notif_value: MAX_NOTIF_VALUE, + data: NUM_RPC_PARMS, + }) } OpteeSmcFunction::DisableShmCache => { // We do not support this feature - Ok(OpteeSmcResult::new_disable_shm_cache( - OpteeSmcReturn::ENotAvail, - 0, - 0, - )) + Ok(OpteeSmcResult::DisableShmCache { + status: OpteeSmcReturn::ENotAvail, + shm_upper32: 0, + shm_lower32: 0, + }) } - OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ - OPTEE_MSG_UID_0, - OPTEE_MSG_UID_1, - OPTEE_MSG_UID_2, - OPTEE_MSG_UID_3, - ])), - OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - OPTEE_MSG_BUILD_ID, - )), - OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - )), + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ], + }), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::OsRevision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + build_id: OPTEE_MSG_BUILD_ID, + }), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::Revision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + }), _ => Err(Errno::EINVAL), } } From 1941a9fb69aad5915f05e7fdf728527c83291b2c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 20:01:50 +0000 Subject: [PATCH 04/45] revison and add RemotePtr placeholders --- litebox_shim_optee/src/lib.rs | 1 + litebox_shim_optee/src/msg_handler.rs | 53 ++++---- litebox_shim_optee/src/remote_pointers.rs | 148 ++++++++++++++++++++++ 3 files changed, 173 insertions(+), 29 deletions(-) create mode 100644 litebox_shim_optee/src/remote_pointers.rs diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 0b290ddfc..924a49c42 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -34,6 +34,7 @@ pub mod loader; pub(crate) mod syscalls; pub mod msg_handler; +pub mod remote_pointers; const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 24b6a8dcf..82a13caac 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,6 +1,8 @@ +use crate::remote_pointers::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; +use litebox::platform::RawConstPointer; use litebox_common_linux::errno::Errno; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, @@ -35,32 +37,17 @@ fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } -// Placeholder for copying data from remote memory (e.g., VTL0 physical memory) -// TODO: Specify it in the litebox crate? -// TODO: Define a type for remote address -#[allow(clippy::unnecessary_wraps)] -fn copy_from_remote_memory(_remote_addr: u64) -> Result -where - T: Copy, -{ - // TODO: implement the actual remote copy - Ok(unsafe { core::mem::zeroed() }) -} +// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical +// address does not belong to VTL1) +pub struct Novalidation; +impl ValidateAccess for Novalidation {} -// Placeholder for copying data to remote memory (e.g., VTL0 physical memory) -// TODO: Specify it in the litebox crate? -// TODO: Define a type for remote address -#[expect(unused)] -#[allow(clippy::unnecessary_wraps)] -fn copy_to_remote_memory(_remote_addr: u64, _data: &T) -> Result<(), Errno> -where - T: Copy, -{ - // TODO: implement the actual remote copy - Ok(()) -} +pub struct Vtl0PhysAddr; +impl RemotePtrKind for Vtl0PhysAddr {} /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +/// # Panics +/// Panics if the physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, Errno> { let func_id = smc.func_id()?; @@ -69,7 +56,13 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; - let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; + let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); + let remote_ptr = + RemoteConstPtr::::from_usize(msg_arg_addr); + let msg_arg = unsafe { remote_ptr.read_at_offset(0) } + .ok_or(Errno::EFAULT)? + .into_owned(); + // let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }) @@ -195,11 +188,13 @@ impl ShmRefMap { let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; let mut pages = Vec::with_capacity(num_pages); - let mut cur_addr = aligned_phys_addr; + let mut cur_addr = usize::try_from(aligned_phys_addr).unwrap(); loop { - let Ok(pages_data) = copy_from_remote_memory::(cur_addr) else { - return Err(Errno::EFAULT); - }; + let cur_ptr = + RemoteConstPtr::::from_usize(cur_addr); + let pages_data = unsafe { cur_ptr.read_at_offset(0) } + .ok_or(Errno::EFAULT)? + .into_owned(); for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; @@ -212,7 +207,7 @@ impl ShmRefMap { if pages_data.next_page_data == 0 || pages.len() == num_pages { break; } else { - cur_addr = pages_data.next_page_data; + cur_addr = usize::try_from(pages_data.next_page_data).unwrap(); } } diff --git a/litebox_shim_optee/src/remote_pointers.rs b/litebox_shim_optee/src/remote_pointers.rs new file mode 100644 index 000000000..fed4d7e6b --- /dev/null +++ b/litebox_shim_optee/src/remote_pointers.rs @@ -0,0 +1,148 @@ +//! Placeholders for implementing remote pointer access (e.g., reading from VTL0 physical memory) +//! TODO: Improve these and move these to the litebox crate later + +use litebox::platform::{RawConstPointer, RawMutPointer}; + +pub trait ValidateAccess {} +pub trait RemotePtrKind {} + +#[repr(C)] +pub struct RemoteConstPtr { + inner: *const T, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, +} + +impl RemoteConstPtr { + pub fn from_ptr(ptr: *const T) -> Self { + Self { + inner: ptr, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, + } + } +} + +impl Clone for RemoteConstPtr { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for RemoteConstPtr {} + +impl core::fmt::Debug for RemoteConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("RemoteConstPtr").field(&self.inner).finish() + } +} + +impl RawConstPointer for RemoteConstPtr { + unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { + // TODO: read data from the remote side + let val: T = unsafe { core::mem::zeroed() }; + Some(alloc::borrow::Cow::Owned(val)) + } + + unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { + // TODO: read data from the remote side + if len == 0 { + return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); + } + let mut data = alloc::vec::Vec::new(); + data.reserve_exact(len); + unsafe { data.set_len(len) }; + Some(alloc::borrow::Cow::Owned(data)) + } + + fn as_usize(&self) -> usize { + self.inner.expose_provenance() + } + + fn from_usize(addr: usize) -> Self { + Self { + inner: core::ptr::with_exposed_provenance(addr), + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, + } + } +} + +#[repr(C)] +pub struct RemoteMutPtr { + inner: *mut T, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, +} + +impl RemoteMutPtr { + pub fn from_ptr(ptr: *mut T) -> Self { + Self { + inner: ptr, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, + } + } +} + +impl Clone for RemoteMutPtr { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for RemoteMutPtr {} + +impl core::fmt::Debug for RemoteMutPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("RemoteMutPtr").field(&self.inner).finish() + } +} + +impl RawConstPointer for RemoteMutPtr { + unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { + // TODO: read data from the remote side + let val: T = unsafe { core::mem::zeroed() }; + Some(alloc::borrow::Cow::Owned(val)) + } + + unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { + // TODO: read data from the remote side + if len == 0 { + return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); + } + let mut data = alloc::vec::Vec::new(); + data.reserve_exact(len); + unsafe { data.set_len(len) }; + Some(alloc::borrow::Cow::Owned(data)) + } + + fn as_usize(&self) -> usize { + self.inner.expose_provenance() + } + + fn from_usize(addr: usize) -> Self { + Self::from_ptr(core::ptr::with_exposed_provenance_mut(addr)) + } +} + +impl RawMutPointer for RemoteMutPtr { + unsafe fn write_at_offset<'a>(self, _count: isize, _value: T) -> Option<()> { + Some(()) + } + + fn mutate_subslice_with( + self, + _range: impl core::ops::RangeBounds, + _f: impl FnOnce(&mut [T]) -> R, + ) -> Option { + unimplemented!("use write_slice_at_offset instead") + } + + fn copy_from_slice(self, _start_offset: usize, _buf: &[T]) -> Option<()> + where + T: Copy, + { + Some(()) + } +} From 6557c402c509067fa54235dcdecbb86eccfd6a20 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 23:31:34 +0000 Subject: [PATCH 05/45] rename --- litebox_shim_optee/src/lib.rs | 2 +- litebox_shim_optee/src/msg_handler.rs | 2 +- litebox_shim_optee/src/{remote_pointers.rs => ptr.rs} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename litebox_shim_optee/src/{remote_pointers.rs => ptr.rs} (100%) diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 924a49c42..0591f0f39 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -34,7 +34,7 @@ pub mod loader; pub(crate) mod syscalls; pub mod msg_handler; -pub mod remote_pointers; +pub mod ptr; const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 82a13caac..39967eec0 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,4 +1,4 @@ -use crate::remote_pointers::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; +use crate::ptr::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; diff --git a/litebox_shim_optee/src/remote_pointers.rs b/litebox_shim_optee/src/ptr.rs similarity index 100% rename from litebox_shim_optee/src/remote_pointers.rs rename to litebox_shim_optee/src/ptr.rs From 91032b7959a86b2857eb6d884fd7f7bd22d0fd40 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 00:57:52 +0000 Subject: [PATCH 06/45] revision --- litebox_shim_optee/src/msg_handler.rs | 18 ++++-------------- litebox_shim_optee/src/ptr.rs | 19 ++++++++++++++++++- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 39967eec0..22a9c1b54 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,4 +1,4 @@ -use crate::ptr::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; +use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; @@ -37,14 +37,6 @@ fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } -// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical -// address does not belong to VTL1) -pub struct Novalidation; -impl ValidateAccess for Novalidation {} - -pub struct Vtl0PhysAddr; -impl RemotePtrKind for Vtl0PhysAddr {} - /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// # Panics /// Panics if the physical address in `smc` cannot be converted to `usize`. @@ -57,9 +49,8 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let remote_ptr = - RemoteConstPtr::::from_usize(msg_arg_addr); - let msg_arg = unsafe { remote_ptr.read_at_offset(0) } + let ptr = NormalWorldConstPtr::::from_usize(msg_arg_addr); + let msg_arg = unsafe { ptr.read_at_offset(0) } .ok_or(Errno::EFAULT)? .into_owned(); // let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; @@ -190,8 +181,7 @@ impl ShmRefMap { let mut cur_addr = usize::try_from(aligned_phys_addr).unwrap(); loop { - let cur_ptr = - RemoteConstPtr::::from_usize(cur_addr); + let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); let pages_data = unsafe { cur_ptr.read_at_offset(0) } .ok_or(Errno::EFAULT)? .into_owned(); diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index fed4d7e6b..f77e93cc8 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -1,4 +1,5 @@ -//! Placeholders for implementing remote pointer access (e.g., reading from VTL0 physical memory) +//! Placeholders for specifying remote pointer access (e.g., reading data from +//! VTL0 physical memory) //! TODO: Improve these and move these to the litebox crate later use litebox::platform::{RawConstPointer, RawMutPointer}; @@ -146,3 +147,19 @@ impl RawMutPointer for RemoteM Some(()) } } + +// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical +// address does not belong to VTL1) +pub struct Novalidation; +impl ValidateAccess for Novalidation {} + +pub struct Vtl0PhysAddr; +impl RemotePtrKind for Vtl0PhysAddr {} + +/// Normal world const pointer type. For now, normal world implies VTL0 but it can be something else +/// including TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldConstPtr = RemoteConstPtr; + +/// Normal world mutable pointer type. For now, normal world implies VTL0 but it can be something else +/// including TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldMutPtr = RemoteMutPtr; From e7605505e5a6f3d99175d642b5644e0a4a2f6355 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 03:50:42 +0000 Subject: [PATCH 07/45] revise remote pointers --- litebox_shim_optee/src/ptr.rs | 121 ++++++++++++++++++++++------------ 1 file changed, 80 insertions(+), 41 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index f77e93cc8..9b53cab21 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -4,44 +4,58 @@ use litebox::platform::{RawConstPointer, RawMutPointer}; +// TODO: use the one from the litebox crate pub trait ValidateAccess {} -pub trait RemotePtrKind {} + +/// Trait to access a pointer to remote memory +/// For now, we only consider copying the entire value before acccessing it. +/// We do not consider byte-level access or unaligned access. +pub trait RemoteMemoryAccess { + fn read_at_offset(ptr: *mut T, count: isize) -> Option; + + fn write_at_offset(ptr: *mut T, count: isize, value: T) -> Option<()>; + + fn slice_from(ptr: *mut T, len: usize) -> Option>; + + fn copy_from_slice(start_offset: usize, buf: &[T]) -> Option<()>; +} #[repr(C)] -pub struct RemoteConstPtr { +pub struct RemoteConstPtr { inner: *const T, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } -impl RemoteConstPtr { +impl RemoteConstPtr { pub fn from_ptr(ptr: *const T) -> Self { Self { inner: ptr, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } } } -impl Clone for RemoteConstPtr { +impl Clone for RemoteConstPtr { fn clone(&self) -> Self { *self } } -impl Copy for RemoteConstPtr {} +impl Copy for RemoteConstPtr {} -impl core::fmt::Debug for RemoteConstPtr { +impl core::fmt::Debug for RemoteConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_tuple("RemoteConstPtr").field(&self.inner).finish() } } -impl RawConstPointer for RemoteConstPtr { - unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { - // TODO: read data from the remote side - let val: T = unsafe { core::mem::zeroed() }; +impl RawConstPointer + for RemoteConstPtr +{ + unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { + let val = A::read_at_offset(self.inner.cast_mut(), count)?; Some(alloc::borrow::Cow::Owned(val)) } @@ -63,47 +77,48 @@ impl RawConstPointer for Remot fn from_usize(addr: usize) -> Self { Self { inner: core::ptr::with_exposed_provenance(addr), - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } } } #[repr(C)] -pub struct RemoteMutPtr { +pub struct RemoteMutPtr { inner: *mut T, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } -impl RemoteMutPtr { +impl RemoteMutPtr { pub fn from_ptr(ptr: *mut T) -> Self { Self { inner: ptr, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } } } -impl Clone for RemoteMutPtr { +impl Clone for RemoteMutPtr { fn clone(&self) -> Self { *self } } -impl Copy for RemoteMutPtr {} +impl Copy for RemoteMutPtr {} -impl core::fmt::Debug for RemoteMutPtr { +impl core::fmt::Debug for RemoteMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_tuple("RemoteMutPtr").field(&self.inner).finish() } } -impl RawConstPointer for RemoteMutPtr { - unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { - // TODO: read data from the remote side - let val: T = unsafe { core::mem::zeroed() }; +impl RawConstPointer + for RemoteMutPtr +{ + unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { + let val = A::read_at_offset(self.inner, count)?; Some(alloc::borrow::Cow::Owned(val)) } @@ -112,10 +127,8 @@ impl RawConstPointer for Remot if len == 0 { return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); } - let mut data = alloc::vec::Vec::new(); - data.reserve_exact(len); - unsafe { data.set_len(len) }; - Some(alloc::borrow::Cow::Owned(data)) + let data = A::slice_from(self.inner, len)?; + Some(alloc::borrow::Cow::Owned(data.into())) } fn as_usize(&self) -> usize { @@ -127,9 +140,11 @@ impl RawConstPointer for Remot } } -impl RawMutPointer for RemoteMutPtr { - unsafe fn write_at_offset<'a>(self, _count: isize, _value: T) -> Option<()> { - Some(()) +impl RawMutPointer + for RemoteMutPtr +{ + unsafe fn write_at_offset<'a>(self, count: isize, value: T) -> Option<()> { + A::write_at_offset(self.inner, count, value) } fn mutate_subslice_with( @@ -140,11 +155,11 @@ impl RawMutPointer for RemoteM unimplemented!("use write_slice_at_offset instead") } - fn copy_from_slice(self, _start_offset: usize, _buf: &[T]) -> Option<()> + fn copy_from_slice(self, start_offset: usize, buf: &[T]) -> Option<()> where T: Copy, { - Some(()) + A::copy_from_slice(start_offset, buf) } } @@ -153,13 +168,37 @@ impl RawMutPointer for RemoteM pub struct Novalidation; impl ValidateAccess for Novalidation {} -pub struct Vtl0PhysAddr; -impl RemotePtrKind for Vtl0PhysAddr {} +pub struct Vtl0PhysMemoryAccess; +impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { + fn read_at_offset(_ptr: *mut T, _count: isize) -> Option { + // TODO: read a value from VTL0 physical memory + let val: T = unsafe { core::mem::zeroed() }; + Some(val) + } + + fn write_at_offset(_ptr: *mut T, _count: isize, _value: T) -> Option<()> { + // TODO: write a value to VTL0 physical memory + Some(()) + } + + fn slice_from(_ptr: *mut T, len: usize) -> Option> { + // TODO: read a slice from VTL0 physical memory + let mut data: alloc::vec::Vec = alloc::vec::Vec::new(); + data.reserve_exact(len); + unsafe { data.set_len(len) }; + Some(data.into_boxed_slice()) + } + + fn copy_from_slice(_start_offset: usize, _buf: &[T]) -> Option<()> { + // TODO: write a slice to VTL0 physical memory + Some(()) + } +} -/// Normal world const pointer type. For now, normal world implies VTL0 but it can be something else -/// including TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldConstPtr = RemoteConstPtr; +/// Normal world const pointer type. For now, we only consider VTL0 physical memory but it can be +/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldConstPtr = RemoteConstPtr; -/// Normal world mutable pointer type. For now, normal world implies VTL0 but it can be something else -/// including TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldMutPtr = RemoteMutPtr; +/// Normal world mutable pointer type. For now, we only consider VTL0 physical memory but it can be +/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldMutPtr = RemoteMutPtr; From 731ca17160ed0118b3f20d544edab6925d28c099 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 03:57:44 +0000 Subject: [PATCH 08/45] ratchet --- litebox_shim_optee/src/msg_handler.rs | 9 +++------ litebox_shim_optee/src/ptr.rs | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 22a9c1b54..0c297d6bb 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -50,9 +50,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result::from_usize(msg_arg_addr); - let msg_arg = unsafe { ptr.read_at_offset(0) } - .ok_or(Errno::EFAULT)? - .into_owned(); + let msg_arg = unsafe { ptr.read_at_offset(0) }.ok_or(Errno::EFAULT)?; // let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, @@ -121,6 +119,7 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), Errno> { Ok(()) } +#[expect(dead_code)] #[derive(Clone)] struct ShmRefInfo { pub pages: Box<[u64]>, @@ -182,9 +181,7 @@ impl ShmRefMap { let mut cur_addr = usize::try_from(aligned_phys_addr).unwrap(); loop { let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); - let pages_data = unsafe { cur_ptr.read_at_offset(0) } - .ok_or(Errno::EFAULT)? - .into_owned(); + let pages_data = unsafe { cur_ptr.read_at_offset(0) }.ok_or(Errno::EFAULT)?; for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 9b53cab21..393d01317 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -54,20 +54,20 @@ impl core::fmt::Debug for RemoteConstPtr { impl RawConstPointer for RemoteConstPtr { - unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { + unsafe fn read_at_offset(self, count: isize) -> Option { let val = A::read_at_offset(self.inner.cast_mut(), count)?; - Some(alloc::borrow::Cow::Owned(val)) + Some(val) } - unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { + unsafe fn to_owned_slice(self, len: usize) -> Option> { // TODO: read data from the remote side if len == 0 { - return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); + return Some(alloc::boxed::Box::new([])); } let mut data = alloc::vec::Vec::new(); data.reserve_exact(len); unsafe { data.set_len(len) }; - Some(alloc::borrow::Cow::Owned(data)) + Some(data.into_boxed_slice()) } fn as_usize(&self) -> usize { @@ -117,18 +117,18 @@ impl core::fmt::Debug for RemoteMutPtr { impl RawConstPointer for RemoteMutPtr { - unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { + unsafe fn read_at_offset(self, count: isize) -> Option { let val = A::read_at_offset(self.inner, count)?; - Some(alloc::borrow::Cow::Owned(val)) + Some(val) } - unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { + unsafe fn to_owned_slice(self, len: usize) -> Option> { // TODO: read data from the remote side if len == 0 { - return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); + return Some(alloc::boxed::Box::new([])); } let data = A::slice_from(self.inner, len)?; - Some(alloc::borrow::Cow::Owned(data.into())) + Some(data) } fn as_usize(&self) -> usize { From 0e08c572b96becb19da9cd7f6351ae3c91ced5fd Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 06:10:59 +0000 Subject: [PATCH 09/45] handle ta request. wip --- litebox_common_optee/src/lib.rs | 35 +++++++---- litebox_shim_optee/src/msg_handler.rs | 88 ++++++++++++++++++++++----- 2 files changed, 96 insertions(+), 27 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index cb24df22b..336a1b795 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1117,6 +1117,18 @@ pub enum OpteeMessageCommand { Unknown = 0xffff_ffff, } +impl TryFrom for UteeEntryFunc { + type Error = OpteeSmcReturn; + fn try_from(cmd: OpteeMessageCommand) -> Result { + match cmd { + OpteeMessageCommand::OpenSession => Ok(UteeEntryFunc::OpenSession), + OpteeMessageCommand::CloseSession => Ok(UteeEntryFunc::CloseSession), + OpteeMessageCommand::InvokeCommand => Ok(UteeEntryFunc::InvokeCommand), + _ => Err(OpteeSmcReturn::EBadCmd), + } + } +} + /// Temporary reference memory parameter #[derive(Clone, Copy, Debug)] #[repr(C)] @@ -1242,24 +1254,24 @@ pub struct OpteeMsgArg { pub cmd: OpteeMessageCommand, /// TA function ID which is used if `cmd == InvokeCommand`. Note that the meaning of `cmd` and `func` /// is swapped compared to TAs. - func: u32, + pub func: u32, /// Session ID. This is "IN" parameter most of the time except for `cmd == OpenSession` where /// the secure world generates and returns a session ID. - session: u32, + pub session: u32, /// Cancellation ID. This is a unique value to identify this request. - cancel_id: u32, + pub cancel_id: u32, pad: u32, /// Return value from the secure world - ret: u32, + pub ret: u32, /// Origin of the return value - ret_origin: TeeOrigin, + pub ret_origin: TeeOrigin, /// Number of parameters contained in `params` - num_params: u32, + pub num_params: u32, /// Parameters to be passed to the secure world. If `cmd == OpenSession`, the first two params contain /// a TA UUID and they are not delivered to the TA. /// Note that, originally, the length of this array is variable. We fix it to `TEE_NUM_PARAMS + 2` to /// simplify the implementation (our OP-TEE Shim supports up to four parameters as well). - params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], + pub params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], } impl OpteeMsgArg { @@ -1335,21 +1347,22 @@ impl OpteeSmcArgs { const NUM_OPTEE_SMC_ARGS: usize = 9; /// Get the function ID of an OP-TEE SMC call - pub fn func_id(&self) -> Result { - OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK).map_err(|_| Errno::EINVAL) + pub fn func_id(&self) -> Result { + OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK) + .map_err(|_| OpteeSmcReturn::EBadCmd) } /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. #[cfg(target_pointer_width = "64")] - pub fn optee_msg_arg_phys_addr(&self) -> Result { + pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. if self.args[1] & 0xffff_ffff_0000_0000 == 0 && self.args[2] & 0xffff_ffff_0000_0000 == 0 { let addr = (self.args[1] << 32) | self.args[2]; Ok(addr as u64) } else { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::EBadAddr) } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 0c297d6bb..c2da13145 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -5,8 +5,8 @@ use litebox::mm::linux::PAGE_SIZE; use litebox::platform::RawConstPointer; use litebox_common_linux::errno::Errno; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, - OpteeSmcResult, OpteeSmcReturn, + OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeSecureWorldCapabilities, OpteeSmcArgs, + OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, UteeEntryFunc, UteeParamOwned, }; use once_cell::race::OnceBox; @@ -40,7 +40,7 @@ fn page_align_up(len: u64) -> u64 { /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// # Panics /// Panics if the physical address in `smc` cannot be converted to `usize`. -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, Errno> { +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; match func_id { @@ -51,8 +51,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result::from_usize(msg_arg_addr); let msg_arg = unsafe { ptr.read_at_offset(0) }.ok_or(Errno::EFAULT)?; - // let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; - handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::Generic { + handle_optee_msg_arg(&msg_arg).map(|_| OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }) } @@ -93,30 +92,82 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Err(Errno::EINVAL), + _ => Err(OpteeSmcReturn::UnknownFunction), } } -pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), Errno> { +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result { match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { if let Ok(tmem) = msg_arg.get_param_tmem(0) { shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; } else { - return Err(Errno::EINVAL); + return Err(OpteeSmcReturn::EBadAddr); } } OpteeMessageCommand::UnregisterShm => { if let Ok(tmem) = msg_arg.get_param_tmem(0) { - shm_ref_map().remove(tmem.shm_ref).ok_or(Errno::ENOENT)?; + shm_ref_map() + .remove(tmem.shm_ref) + .ok_or(OpteeSmcReturn::EBadAddr)?; } else { - return Err(Errno::EINVAL); + return Err(OpteeSmcReturn::EBadCmd); } } - _ => {} + OpteeMessageCommand::OpenSession + | OpteeMessageCommand::InvokeCommand + | OpteeMessageCommand::CloseSession => return handle_ta_request(msg_arg), + _ => { + todo!("Unimplemented OpteeMessageCommand: {:?}", msg_arg.cmd); + } + } + + Ok(*msg_arg) +} + +pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { + let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; + + let shift: usize = if ta_entry_func == UteeEntryFunc::OpenSession { + // TODO: load a TA using its UUID (if not yet loaded) + + 2 // first two params are for TA UUID + } else { + 0 + }; + let num_params = usize::try_from(msg_arg.num_params).unwrap(); + + let ta_cmd_id = msg_arg.func; + let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; + + for (i, param) in msg_arg.params[shift..shift + num_params].iter().enumerate() { + ta_params[i] = match param.attr_type() { + OpteeMsgAttrType::None => UteeParamOwned::None, + OpteeMsgAttrType::ValueInput => { + let value = msg_arg + .get_param_value(shift + i) + .map_err(|_| OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInput { + value_a: value.a, + value_b: value.b, + } + } + OpteeMsgAttrType::ValueOutput => UteeParamOwned::ValueOutput { out_address: None }, + OpteeMsgAttrType::ValueInout => { + let value = msg_arg + .get_param_value(shift + i) + .map_err(|_| OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInout { + value_a: value.a, + value_b: value.b, + out_address: None, + } + } + _ => todo!(), + } } - Ok(()) + Ok(*msg_arg) } #[expect(dead_code)] @@ -150,10 +201,10 @@ impl ShmRefMap { } } - pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), Errno> { + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { let mut guard = self.inner.lock(); if guard.contains_key(&shm_ref) { - Err(Errno::EEXIST) + Err(OpteeSmcReturn::ENotAvail) } else { let _ = guard.insert(shm_ref, info); Ok(()) @@ -171,7 +222,12 @@ impl ShmRefMap { guard.get(&shm_ref).cloned() } - pub fn register_shm(&self, phys_addr: u64, size: u64, shm_ref: u64) -> Result<(), Errno> { + pub fn register_shm( + &self, + phys_addr: u64, + size: u64, + shm_ref: u64, + ) -> Result<(), OpteeSmcReturn> { let aligned_phys_addr = page_align_down(phys_addr); let page_offset = phys_addr - aligned_phys_addr; let aligned_size = page_align_up(page_offset + size); @@ -186,7 +242,7 @@ impl ShmRefMap { if *page == 0 || pages.len() == num_pages { break; } else if !page.is_multiple_of(u64::try_from(PAGE_SIZE).unwrap()) { - return Err(Errno::EINVAL); + return Err(OpteeSmcReturn::EBadAddr); } else { pages.push(*page); } From b7fc73683b88040d03b26d856e7c97e79da762e5 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 16:42:28 +0000 Subject: [PATCH 10/45] support tmem and some revision --- litebox_common_optee/src/lib.rs | 60 +++++++++++++++--- litebox_shim_optee/src/msg_handler.rs | 90 +++++++++++++++++++++++---- 2 files changed, 129 insertions(+), 21 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 336a1b795..7f5aec46a 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1242,6 +1242,54 @@ impl OpteeMsgParam { pub fn attr_type(&self) -> OpteeMsgAttrType { OpteeMsgAttrType::try_from(self.attr.typ()).unwrap_or(OpteeMsgAttrType::None) } + pub fn get_param_tmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_INOUT + ) { + Some(unsafe { self.u.tmem }) + } else { + None + } + } + pub fn get_param_rmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.rmem }) + } else { + None + } + } + pub fn get_param_fmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.fmem }) + } else { + None + } + } + pub fn get_param_value(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_INOUT + ) { + Some(unsafe { self.u.value }) + } else { + None + } + } } /// `optee_msg_arg` from `optee_os/core/include/optee_msg.h` @@ -1275,36 +1323,32 @@ pub struct OpteeMsgArg { } impl OpteeMsgArg { - #[cfg(target_pointer_width = "64")] pub fn get_param_tmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.tmem }) + Ok(self.params[index].get_param_tmem().ok_or(Errno::EINVAL)?) } } - #[cfg(target_pointer_width = "64")] pub fn get_param_rmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.rmem }) + Ok(self.params[index].get_param_rmem().ok_or(Errno::EINVAL)?) } } - #[cfg(target_pointer_width = "64")] pub fn get_param_fmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.fmem }) + Ok(self.params[index].get_param_fmem().ok_or(Errno::EINVAL)?) } } - #[cfg(target_pointer_width = "64")] pub fn get_param_value(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.value }) + Ok(self.params[index].get_param_value().ok_or(Errno::EINVAL)?) } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index c2da13145..15500d84e 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -5,8 +5,9 @@ use litebox::mm::linux::PAGE_SIZE; use litebox::platform::RawConstPointer; use litebox_common_linux::errno::Errno; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeSecureWorldCapabilities, OpteeSmcArgs, - OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, UteeEntryFunc, UteeParamOwned, + OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, + OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, + UteeEntryFunc, UteeParamOwned, }; use once_cell::race::OnceBox; @@ -39,7 +40,7 @@ fn page_align_up(len: u64) -> u64 { /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// # Panics -/// Panics if the physical address in `smc` cannot be converted to `usize`. +/// Panics if the normal world physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; @@ -96,6 +97,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Result { match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { @@ -125,28 +127,31 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result Result { let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; - let shift: usize = if ta_entry_func == UteeEntryFunc::OpenSession { + let skip: usize = if ta_entry_func == UteeEntryFunc::OpenSession { // TODO: load a TA using its UUID (if not yet loaded) 2 // first two params are for TA UUID } else { 0 }; - let num_params = usize::try_from(msg_arg.num_params).unwrap(); + let num_params: usize = msg_arg.num_params.try_into().unwrap(); let ta_cmd_id = msg_arg.func; let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; - for (i, param) in msg_arg.params[shift..shift + num_params].iter().enumerate() { + // TODO: handle `out_address` + for (i, param) in msg_arg.params[skip..skip + num_params].iter().enumerate() { ta_params[i] = match param.attr_type() { OpteeMsgAttrType::None => UteeParamOwned::None, OpteeMsgAttrType::ValueInput => { - let value = msg_arg - .get_param_value(shift + i) - .map_err(|_| OpteeSmcReturn::EBadCmd)?; + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInput { value_a: value.a, value_b: value.b, @@ -154,16 +159,56 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result UteeParamOwned::ValueOutput { out_address: None }, OpteeMsgAttrType::ValueInout => { - let value = msg_arg - .get_param_value(shift + i) - .map_err(|_| OpteeSmcReturn::EBadCmd)?; + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInout { value_a: value.a, value_b: value.b, out_address: None, } } - _ => todo!(), + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { + let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let data_size: usize = tmem.size.try_into().unwrap(); + let slice = unsafe { ptr.to_cow_slice(data_size) } + .ok_or(OpteeSmcReturn::EBadAddr)? + .into_owned(); + UteeParamOwned::MemrefInput { data: slice.into() } + } else { + UteeParamOwned::None + } + } + OpteeMsgAttrType::TmemOutput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { + let buffer_size: usize = tmem.size.try_into().unwrap(); + UteeParamOwned::MemrefOutput { + buffer_size, + out_addresses: Some(Box::new([phys_addr])), + } + } else { + UteeParamOwned::None + } + } + OpteeMsgAttrType::TmemInout => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { + let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let buffer_size: usize = tmem.size.try_into().unwrap(); + let slice = unsafe { ptr.to_cow_slice(buffer_size) } + .ok_or(OpteeSmcReturn::EBadAddr)? + .into_owned(); + UteeParamOwned::MemrefInout { + data: slice.into(), + buffer_size, + out_addresses: Some(Box::new([phys_addr])), + } + } else { + UteeParamOwned::None + } + } + _ => todo!("handle OpteeMsgParamRmem"), } } @@ -270,3 +315,22 @@ fn shm_ref_map() -> &'static ShmRefMap { static SHM_REF_MAP: OnceBox = OnceBox::new(); SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } + +/// Get a normal world physical address of OP-TEE shared memory from `OpteeMsgParamTmem`. +fn get_shm_phys_addr_from_optee_msg_param_tmem(tmem: OpteeMsgParamTmem) -> Option { + if tmem.buf_ptr == 0 || tmem.size == 0 { + None + } else { + // TODO: validate this address + Some(tmem.buf_ptr.try_into().unwrap()) + } +} + +/// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. +/// All addresses must be page-aligned except possibly the first one. +/// These addresses are virtually contiguous within the normal world, but not necessarily +/// physically contiguous. +#[expect(unused)] +fn get_shm_phys_addrs_from_optee_msg_param_rmem(_rmem: OpteeMsgParamTmem) -> Option> { + None +} From 8d87f2e81d4d4464222e64b75e4ee7ea514d37d5 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 17:59:51 +0000 Subject: [PATCH 11/45] fix tmem and rmem handling --- litebox_shim_optee/src/msg_handler.rs | 121 +++++++++++++++++++------- 1 file changed, 88 insertions(+), 33 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 15500d84e..b52990ec7 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -166,43 +166,89 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { - let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let data_size: usize = tmem.size.try_into().unwrap(); - let slice = unsafe { ptr.to_cow_slice(data_size) } - .ok_or(OpteeSmcReturn::EBadAddr)? - .into_owned(); + OpteeMsgAttrType::TmemInput | OpteeMsgAttrType::RmemInput => { + if let (Ok(phys_addrs), data_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { + // TODO: loop to handle scatter-gather list + // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let slice = alloc::vec![0u8; data_size]; UteeParamOwned::MemrefInput { data: slice.into() } } else { UteeParamOwned::None } } - OpteeMsgAttrType::TmemOutput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { - let buffer_size: usize = tmem.size.try_into().unwrap(); + OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { + if let (Ok(phys_addrs), buffer_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { UteeParamOwned::MemrefOutput { buffer_size, - out_addresses: Some(Box::new([phys_addr])), + out_addresses: Some(phys_addrs), } } else { UteeParamOwned::None } } - OpteeMsgAttrType::TmemInout => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { - let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let buffer_size: usize = tmem.size.try_into().unwrap(); - let slice = unsafe { ptr.to_cow_slice(buffer_size) } - .ok_or(OpteeSmcReturn::EBadAddr)? - .into_owned(); + OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { + if let (Ok(phys_addrs), buffer_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { + // TODO: loop to handle scatter-gather list + // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let slice = alloc::vec![0u8; buffer_size]; UteeParamOwned::MemrefInout { data: slice.into(), buffer_size, - out_addresses: Some(Box::new([phys_addr])), + out_addresses: Some(phys_addrs), } } else { UteeParamOwned::None @@ -222,6 +268,7 @@ struct ShmRefInfo { pub page_offset: u64, } +/// Scatter-gather list of OP-TEE shared physical pages in VTL0. #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { @@ -261,7 +308,6 @@ impl ShmRefMap { guard.remove(&shm_ref) } - #[expect(unused)] pub fn get(&self, shm_ref: u64) -> Option { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() @@ -317,20 +363,29 @@ fn shm_ref_map() -> &'static ShmRefMap { } /// Get a normal world physical address of OP-TEE shared memory from `OpteeMsgParamTmem`. -fn get_shm_phys_addr_from_optee_msg_param_tmem(tmem: OpteeMsgParamTmem) -> Option { - if tmem.buf_ptr == 0 || tmem.size == 0 { - None - } else { - // TODO: validate this address - Some(tmem.buf_ptr.try_into().unwrap()) - } +/// Note that we use this function for handing TA requests and in that context there is no +/// difference between `OpteeMsgParamTmem` and `OpteeMsgParamRmem`. +/// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. +fn get_shm_phys_addrs_from_optee_msg_param_tmem( + tmem: OpteeMsgParamTmem, +) -> Result, OpteeSmcReturn> { + let rmem = OpteeMsgParamRmem { + offs: tmem.buf_ptr, + size: tmem.size, + shm_ref: tmem.shm_ref, + }; + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem) } /// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. /// All addresses must be page-aligned except possibly the first one. /// These addresses are virtually contiguous within the normal world, but not necessarily /// physically contiguous. -#[expect(unused)] -fn get_shm_phys_addrs_from_optee_msg_param_rmem(_rmem: OpteeMsgParamTmem) -> Option> { - None +fn get_shm_phys_addrs_from_optee_msg_param_rmem( + rmem: OpteeMsgParamRmem, +) -> Result, OpteeSmcReturn> { + let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { + return Err(OpteeSmcReturn::EBadAddr); + }; + Ok(Box::new([])) } From 052c7ab2b64ce46c54123caff4e7fc9541197b6d Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 18:06:14 +0000 Subject: [PATCH 12/45] separate out handle_ta_request --- litebox_shim_optee/src/msg_handler.rs | 171 +------------------------- 1 file changed, 6 insertions(+), 165 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index b52990ec7..d119110da 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -5,9 +5,8 @@ use litebox::mm::linux::PAGE_SIZE; use litebox::platform::RawConstPointer; use litebox_common_linux::errno::Errno; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, - OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, - UteeEntryFunc, UteeParamOwned, + OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, + OpteeSmcResult, OpteeSmcReturn, }; use once_cell::race::OnceBox; @@ -27,13 +26,11 @@ const MAX_NOTIF_VALUE: usize = 0; const NUM_RPC_PARMS: usize = 4; #[inline] -#[cfg(target_pointer_width = "64")] fn page_align_down(address: u64) -> u64 { address & !(PAGE_SIZE as u64 - 1) } #[inline] -#[cfg(target_pointer_width = "64")] fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } @@ -128,140 +125,11 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result Result { - let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; - - let skip: usize = if ta_entry_func == UteeEntryFunc::OpenSession { - // TODO: load a TA using its UUID (if not yet loaded) - - 2 // first two params are for TA UUID - } else { - 0 - }; - let num_params: usize = msg_arg.num_params.try_into().unwrap(); - - let ta_cmd_id = msg_arg.func; - let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; - - // TODO: handle `out_address` - for (i, param) in msg_arg.params[skip..skip + num_params].iter().enumerate() { - ta_params[i] = match param.attr_type() { - OpteeMsgAttrType::None => UteeParamOwned::None, - OpteeMsgAttrType::ValueInput => { - let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; - UteeParamOwned::ValueInput { - value_a: value.a, - value_b: value.b, - } - } - OpteeMsgAttrType::ValueOutput => UteeParamOwned::ValueOutput { out_address: None }, - OpteeMsgAttrType::ValueInout => { - let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; - UteeParamOwned::ValueInout { - value_a: value.a, - value_b: value.b, - out_address: None, - } - } - OpteeMsgAttrType::TmemInput | OpteeMsgAttrType::RmemInput => { - if let (Ok(phys_addrs), data_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), - } - } { - // TODO: loop to handle scatter-gather list - // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let slice = alloc::vec![0u8; data_size]; - UteeParamOwned::MemrefInput { data: slice.into() } - } else { - UteeParamOwned::None - } - } - OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { - if let (Ok(phys_addrs), buffer_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), - } - } { - UteeParamOwned::MemrefOutput { - buffer_size, - out_addresses: Some(phys_addrs), - } - } else { - UteeParamOwned::None - } - } - OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { - if let (Ok(phys_addrs), buffer_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), - } - } { - // TODO: loop to handle scatter-gather list - // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let slice = alloc::vec![0u8; buffer_size]; - UteeParamOwned::MemrefInout { - data: slice.into(), - buffer_size, - out_addresses: Some(phys_addrs), - } - } else { - UteeParamOwned::None - } - } - _ => todo!("handle OpteeMsgParamRmem"), - } - } - - Ok(*msg_arg) +pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result { + todo!() } -#[expect(dead_code)] +#[expect(unused)] #[derive(Clone)] struct ShmRefInfo { pub pages: Box<[u64]>, @@ -308,6 +176,7 @@ impl ShmRefMap { guard.remove(&shm_ref) } + #[expect(unused)] pub fn get(&self, shm_ref: u64) -> Option { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() @@ -361,31 +230,3 @@ fn shm_ref_map() -> &'static ShmRefMap { static SHM_REF_MAP: OnceBox = OnceBox::new(); SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } - -/// Get a normal world physical address of OP-TEE shared memory from `OpteeMsgParamTmem`. -/// Note that we use this function for handing TA requests and in that context there is no -/// difference between `OpteeMsgParamTmem` and `OpteeMsgParamRmem`. -/// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. -fn get_shm_phys_addrs_from_optee_msg_param_tmem( - tmem: OpteeMsgParamTmem, -) -> Result, OpteeSmcReturn> { - let rmem = OpteeMsgParamRmem { - offs: tmem.buf_ptr, - size: tmem.size, - shm_ref: tmem.shm_ref, - }; - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem) -} - -/// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. -/// All addresses must be page-aligned except possibly the first one. -/// These addresses are virtually contiguous within the normal world, but not necessarily -/// physically contiguous. -fn get_shm_phys_addrs_from_optee_msg_param_rmem( - rmem: OpteeMsgParamRmem, -) -> Result, OpteeSmcReturn> { - let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { - return Err(OpteeSmcReturn::EBadAddr); - }; - Ok(Box::new([])) -} From e127b6359b96e28206a2d7e5f2e4ce29acce3546 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 18:34:34 +0000 Subject: [PATCH 13/45] get os uuid --- litebox_common_optee/src/lib.rs | 2 ++ litebox_shim_optee/src/msg_handler.rs | 22 +++++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 7f5aec46a..ea5284334 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1413,6 +1413,7 @@ impl OpteeSmcArgs { /// `OPTEE_SMC_FUNCID_*` from `core/arch/arm/include/sm/optee_smc.h` /// TODO: Add stuffs based on the OP-TEE driver that LVBS is using. +const OPTEE_SMC_FUNCID_GET_OS_UUID: usize = 0x0; const OPTEE_SMC_FUNCID_GET_OS_REVISION: usize = 0x1; const OPTEE_SMC_FUNCID_CALL_WITH_ARG: usize = 0x4; const OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES: usize = 0x9; @@ -1426,6 +1427,7 @@ const OPTEE_SMC_FUNCID_CALLS_REVISION: usize = 0xff03; #[derive(PartialEq, TryFromPrimitive)] #[repr(usize)] pub enum OpteeSmcFunction { + GetOsUuid = OPTEE_SMC_FUNCID_GET_OS_UUID, GetOsRevision = OPTEE_SMC_FUNCID_GET_OS_REVISION, CallWithArg = OPTEE_SMC_FUNCID_CALL_WITH_ARG, ExchangeCapabilities = OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES, diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index d119110da..6d0663c1d 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -10,17 +10,26 @@ use litebox_common_optee::{ }; use once_cell::race::OnceBox; -// TODO: Replace these with version and build info +// OP-TEE version and build info (2.0) +// TODO: Consider repacing it with our own version info const OPTEE_MSG_REVISION_MAJOR: usize = 2; const OPTEE_MSG_REVISION_MINOR: usize = 0; const OPTEE_MSG_BUILD_ID: usize = 0; -// TODO: Replace this with an actual UID +// This UID is from OP-TEE OS +// TODO: Consider replacing it with our own UID const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; +// This is the UUID of OP-TEE Trusted OS +// TODO: Consider replacing it with our own UUID +const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x486178e0; +const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f811e3; +const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e0002; +const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5c51b; + // We do not support notification for now const MAX_NOTIF_VALUE: usize = 0; const NUM_RPC_PARMS: usize = 4; @@ -40,7 +49,6 @@ fn page_align_up(len: u64) -> u64 { /// Panics if the normal world physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; - match func_id { OpteeSmcFunction::CallWithArg | OpteeSmcFunction::CallWithRpcArg @@ -73,6 +81,14 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_OS_OPTEE_UUID_0, + OPTEE_MSG_OS_OPTEE_UUID_1, + OPTEE_MSG_OS_OPTEE_UUID_2, + OPTEE_MSG_OS_OPTEE_UUID_3, + ], + }), OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { data: &[ OPTEE_MSG_UID_0, From 06ce3554170c2e9d9222a9bb586bec7e53269ae2 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 19:09:25 +0000 Subject: [PATCH 14/45] comment --- litebox_shim_optee/src/msg_handler.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 6d0663c1d..2f4ac376d 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,3 +1,16 @@ +//! OP-TEE's message passing is a bit complex because it involves with multiple actors +//! (normal world: client app and driver; secure world: OP-TEE OS and TAs), +//! consists multiple layers, and relies on shared memory references (i.e., no serialization). +//! +//! Since the normal world is out of LiteBox's scope, the OP-TEE shim starts with handling +//! an OP-TEE SMC call from the normal-world OP-TEE driver which consists of +//! up to nine register values. By checking the SMC function ID, the shim determines whether +//! it is for passing an OP-TEE message or a pure SMC function call (e.g., get OP-TEE OS +//! version). If it is for passing an OP-TEE message/command, the shim accesses a normal world +//! physical address containing `OpteeMsgArg` structure (the address is contained in +//! the SMC call arguments). This `OpteeMsgArg` structure may contain references to normal +//! world physical addresses to exchange a large amount of data. Also, a certain OP-TEE +//! message/command does not involve with any TA (e.g., register shared memory). use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; @@ -11,7 +24,7 @@ use litebox_common_optee::{ use once_cell::race::OnceBox; // OP-TEE version and build info (2.0) -// TODO: Consider repacing it with our own version info +// TODO: Consider replacing it with our own version info const OPTEE_MSG_REVISION_MAJOR: usize = 2; const OPTEE_MSG_REVISION_MINOR: usize = 0; const OPTEE_MSG_BUILD_ID: usize = 0; From 27b736759847cfc5ceb223b6958095d47e4c6e83 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 19:26:33 +0000 Subject: [PATCH 15/45] replace Errno with OpteeSmcReturn --- litebox_common_optee/src/lib.rs | 32 +++++++++++++++--------- litebox_shim_optee/src/msg_handler.rs | 35 ++++++++++++--------------- 2 files changed, 36 insertions(+), 31 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index ea5284334..5de4d86d6 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1323,32 +1323,40 @@ pub struct OpteeMsgArg { } impl OpteeMsgArg { - pub fn get_param_tmem(&self, index: usize) -> Result { + pub fn get_param_tmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_tmem().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_tmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } - pub fn get_param_rmem(&self, index: usize) -> Result { + pub fn get_param_rmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_rmem().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_rmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } - pub fn get_param_fmem(&self, index: usize) -> Result { + pub fn get_param_fmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_fmem().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_fmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } - pub fn get_param_value(&self, index: usize) -> Result { + pub fn get_param_value(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_value().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_value() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 2f4ac376d..89573e127 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -9,8 +9,9 @@ //! version). If it is for passing an OP-TEE message/command, the shim accesses a normal world //! physical address containing `OpteeMsgArg` structure (the address is contained in //! the SMC call arguments). This `OpteeMsgArg` structure may contain references to normal -//! world physical addresses to exchange a large amount of data. Also, a certain OP-TEE -//! message/command does not involve with any TA (e.g., register shared memory). +//! world physical addresses to exchange a large amount of data. Also, like the OP-TEE +//! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register +//! shared memory). use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; @@ -87,7 +88,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { - // We do not support this feature + // Currently, we do not support this feature. Ok(OpteeSmcResult::DisableShmCache { status: OpteeSmcReturn::ENotAvail, shm_upper32: 0, @@ -127,20 +128,14 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Result { match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { - if let Ok(tmem) = msg_arg.get_param_tmem(0) { - shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; - } else { - return Err(OpteeSmcReturn::EBadAddr); - } + let tmem = msg_arg.get_param_tmem(0)?; + shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; } OpteeMessageCommand::UnregisterShm => { - if let Ok(tmem) = msg_arg.get_param_tmem(0) { - shm_ref_map() - .remove(tmem.shm_ref) - .ok_or(OpteeSmcReturn::EBadAddr)?; - } else { - return Err(OpteeSmcReturn::EBadCmd); - } + let tmem = msg_arg.get_param_tmem(0)?; + shm_ref_map() + .remove(tmem.shm_ref) + .ok_or(OpteeSmcReturn::EBadAddr)?; } OpteeMessageCommand::OpenSession | OpteeMessageCommand::InvokeCommand @@ -165,15 +160,17 @@ struct ShmRefInfo { pub page_offset: u64, } -/// Scatter-gather list of OP-TEE shared physical pages in VTL0. +/// A scatter-gather list of OP-TEE shared physical pages in VTL0. #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { - pub pages_list: [u64; PAGELIST_ENTRIES_PER_PAGE], + pub pages_list: [u64; Self::PAGELIST_ENTRIES_PER_PAGE], pub next_page_data: u64, } -const PAGELIST_ENTRIES_PER_PAGE: usize = - PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); +impl ShmRefPagesData { + const PAGELIST_ENTRIES_PER_PAGE: usize = + PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); +} /// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. /// This data structure is for registering shared memory regions before they are From 077f95149fc775ba6d0606bbc070200265cd386c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Sun, 14 Dec 2025 05:26:28 +0000 Subject: [PATCH 16/45] add comments --- litebox_common_optee/src/lib.rs | 3 +++ litebox_shim_optee/src/msg_handler.rs | 14 +++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 5de4d86d6..654b052a1 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1324,6 +1324,9 @@ pub struct OpteeMsgArg { impl OpteeMsgArg { pub fn get_param_tmem(&self, index: usize) -> Result { + // `self.params.len()` indicates the maximum number of parameters possible whereas `self.num_params` + // indicates the number of parameters that the message sender specifies (which must be less than or + // equal to the maximum). if index >= self.params.len() || index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 89573e127..62a128b9b 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -160,7 +160,12 @@ struct ShmRefInfo { pub page_offset: u64, } -/// A scatter-gather list of OP-TEE shared physical pages in VTL0. +/// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to +/// share with the secure world (VTL1). Each [`ShmRefPagesData`] occupies one memory page +/// where `pages_list` contains a list of physical page addresses and `next_page_data` +/// contains the physical address of the next [`ShmRefPagesData`] if any. Entries of `pages_list` +/// and `next_page_data` contain zero if the list ends. These physical page addresses are +/// virtually contiguous in the normal world. All these address values must be page aligned. #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { @@ -208,6 +213,13 @@ impl ShmRefMap { guard.get(&shm_ref).cloned() } + /// This function registers shared memory information that the normal world (VTL0) provides. + /// Specifically, it walks through [`ShmRefPagesData`] structures referenced by `phys_addr` + /// to create a slice of the shared physical page addresses and registers the slice with + /// `shm_ref` as its identifier. `size` indicates the total size of this registered shared + /// memory region. Note that `phys_addr` may not be page aligned. In that case, its page-aligned + /// address points to the first [`ShmRefPagesData`] structure while its page offset indicates + /// the page offset of the first page (i.e., `pages_list[0]` of the first [`ShmRefPagesData`]). pub fn register_shm( &self, phys_addr: u64, From 900ab8c91a9abbea1c38fb57429d87d0e8249220 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Sun, 14 Dec 2025 14:49:31 +0000 Subject: [PATCH 17/45] validate message --- litebox_common_optee/src/lib.rs | 25 +++++++++++++++++-------- litebox_shim_optee/src/msg_handler.rs | 9 +++++---- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 654b052a1..823e1df9d 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1114,7 +1114,6 @@ pub enum OpteeMessageCommand { UnregisterShm = OPTEE_MSG_CMD_UNREGISTER_SHM, DoBottomHalf = OPTEE_MSG_CMD_DO_BOTTOM_HALF, StopAsyncNotif = OPTEE_MSG_CMD_STOP_ASYNC_NOTIF, - Unknown = 0xffff_ffff, } impl TryFrom for UteeEntryFunc { @@ -1323,11 +1322,21 @@ pub struct OpteeMsgArg { } impl OpteeMsgArg { + /// Validate the message argument structure. + pub fn validate(&self) -> Result<(), OpteeSmcReturn> { + let _ = + OpteeMessageCommand::try_from(self.cmd as u32).map_err(|_| OpteeSmcReturn::EBadCmd)?; + if self.cmd == OpteeMessageCommand::OpenSession && self.num_params < 2 { + return Err(OpteeSmcReturn::EBadCmd); + } + if self.num_params as usize > self.params.len() { + Err(OpteeSmcReturn::EBadCmd) + } else { + Ok(()) + } + } pub fn get_param_tmem(&self, index: usize) -> Result { - // `self.params.len()` indicates the maximum number of parameters possible whereas `self.num_params` - // indicates the number of parameters that the message sender specifies (which must be less than or - // equal to the maximum). - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] @@ -1336,7 +1345,7 @@ impl OpteeMsgArg { } } pub fn get_param_rmem(&self, index: usize) -> Result { - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] @@ -1345,7 +1354,7 @@ impl OpteeMsgArg { } } pub fn get_param_fmem(&self, index: usize) -> Result { - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] @@ -1354,7 +1363,7 @@ impl OpteeMsgArg { } } pub fn get_param_value(&self, index: usize) -> Result { - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 62a128b9b..1b02aee61 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -39,10 +39,10 @@ const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; // This is the UUID of OP-TEE Trusted OS // TODO: Consider replacing it with our own UUID -const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x486178e0; -const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f811e3; -const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e0002; -const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5c51b; +const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x4861_78e0; +const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e_0002; +const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5_c51b; // We do not support notification for now const MAX_NOTIF_VALUE: usize = 0; @@ -126,6 +126,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Result { + msg_arg.validate()?; match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { let tmem = msg_arg.get_param_tmem(0)?; From 79eca708d51db6b569ba6cb498ee51fda1b06402 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 15 Dec 2025 05:07:17 +0000 Subject: [PATCH 18/45] clarification --- litebox_shim_optee/src/msg_handler.rs | 40 +++++++++++++++++---------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 1b02aee61..d2eeaf26e 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -130,10 +130,27 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result { let tmem = msg_arg.get_param_tmem(0)?; - shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; + if tmem.buf_ptr == 0 || tmem.size == 0 || tmem.shm_ref == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } + // `tmem.buf_ptr` embeds two different information: + // - The physical page address of the first `ShmRefPagesData` + // - The page offset of the first shared memory page (`pages_list[0]`) + let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); + let page_offset = tmem.buf_ptr - shm_ref_pages_data_phys_addr; + let aligned_size = page_align_up(page_offset + tmem.size); + shm_ref_map().register_shm( + shm_ref_pages_data_phys_addr, + page_offset, + aligned_size, + tmem.shm_ref, + )?; } OpteeMessageCommand::UnregisterShm => { let tmem = msg_arg.get_param_tmem(0)?; + if tmem.shm_ref == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } shm_ref_map() .remove(tmem.shm_ref) .ok_or(OpteeSmcReturn::EBadAddr)?; @@ -215,25 +232,21 @@ impl ShmRefMap { } /// This function registers shared memory information that the normal world (VTL0) provides. - /// Specifically, it walks through [`ShmRefPagesData`] structures referenced by `phys_addr` - /// to create a slice of the shared physical page addresses and registers the slice with - /// `shm_ref` as its identifier. `size` indicates the total size of this registered shared - /// memory region. Note that `phys_addr` may not be page aligned. In that case, its page-aligned - /// address points to the first [`ShmRefPagesData`] structure while its page offset indicates + /// Specifically, it walks through a linked list of [`ShmRefPagesData`] structures referenced by + /// `shm_ref_pages_data_phys_addr` to create a slice of the shared physical page addresses + /// and registers the slice with `shm_ref` as its identifier. `page_offset` indicates /// the page offset of the first page (i.e., `pages_list[0]` of the first [`ShmRefPagesData`]). + /// `aligned_size` indicates the page-aligned size of the shared memory region to register. pub fn register_shm( &self, - phys_addr: u64, - size: u64, + shm_ref_pages_data_phys_addr: u64, + page_offset: u64, + aligned_size: u64, shm_ref: u64, ) -> Result<(), OpteeSmcReturn> { - let aligned_phys_addr = page_align_down(phys_addr); - let page_offset = phys_addr - aligned_phys_addr; - let aligned_size = page_align_up(page_offset + size); let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; let mut pages = Vec::with_capacity(num_pages); - - let mut cur_addr = usize::try_from(aligned_phys_addr).unwrap(); + let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); let pages_data = unsafe { cur_ptr.read_at_offset(0) }.ok_or(Errno::EFAULT)?; @@ -260,7 +273,6 @@ impl ShmRefMap { page_offset, }, )?; - Ok(()) } } From 57694d0ae7e5f3bfb8fe7f4a763f1e1d337bf35e Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 16 Dec 2025 18:08:57 +0000 Subject: [PATCH 19/45] get rid of recursive handler invocation --- litebox_shim_optee/src/msg_handler.rs | 121 ++++++++++++++++---------- 1 file changed, 75 insertions(+), 46 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index d2eeaf26e..f22d22f4d 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -59,9 +59,14 @@ fn page_align_up(len: u64) -> u64 { } /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +/// It returns an `OpteeSmcResult` representing the result of the SMC call and +/// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE messagewhich should be handled by +/// `handle_optee_msg_arg` or `handle_ta_request`. /// # Panics /// Panics if the normal world physical address in `smc` cannot be converted to `usize`. -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { +pub fn handle_optee_smc_args( + smc: &mut OpteeSmcArgs, +) -> Result<(OpteeSmcResult<'_>, Option), OpteeSmcReturn> { let func_id = smc.func_id()?; match func_id { OpteeSmcFunction::CallWithArg @@ -70,62 +75,87 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result::from_usize(msg_arg_addr); - let msg_arg = unsafe { ptr.read_at_offset(0) }.ok_or(Errno::EFAULT)?; - handle_optee_msg_arg(&msg_arg).map(|_| OpteeSmcResult::Generic { - status: OpteeSmcReturn::Ok, - }) + let msg_arg = unsafe { ptr.read_at_offset(0) }.ok_or(OpteeSmcReturn::EBadAddr)?; + Ok(( + OpteeSmcResult::Generic { + status: OpteeSmcReturn::Ok, + }, + Some(msg_arg), + )) } OpteeSmcFunction::ExchangeCapabilities => { // TODO: update the below when we support more features let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM | OpteeSecureWorldCapabilities::MEMREF_NULL | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(OpteeSmcResult::ExchangeCapabilities { - status: OpteeSmcReturn::Ok, - capabilities: default_cap, - max_notif_value: MAX_NOTIF_VALUE, - data: NUM_RPC_PARMS, - }) + Ok(( + OpteeSmcResult::ExchangeCapabilities { + status: OpteeSmcReturn::Ok, + capabilities: default_cap, + max_notif_value: MAX_NOTIF_VALUE, + data: NUM_RPC_PARMS, + }, + None, + )) } OpteeSmcFunction::DisableShmCache => { // Currently, we do not support this feature. - Ok(OpteeSmcResult::DisableShmCache { - status: OpteeSmcReturn::ENotAvail, - shm_upper32: 0, - shm_lower32: 0, - }) + Ok(( + OpteeSmcResult::DisableShmCache { + status: OpteeSmcReturn::ENotAvail, + shm_upper32: 0, + shm_lower32: 0, + }, + None, + )) } - OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcResult::Uuid { - data: &[ - OPTEE_MSG_OS_OPTEE_UUID_0, - OPTEE_MSG_OS_OPTEE_UUID_1, - OPTEE_MSG_OS_OPTEE_UUID_2, - OPTEE_MSG_OS_OPTEE_UUID_3, - ], - }), - OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { - data: &[ - OPTEE_MSG_UID_0, - OPTEE_MSG_UID_1, - OPTEE_MSG_UID_2, - OPTEE_MSG_UID_3, - ], - }), - OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::OsRevision { - major: OPTEE_MSG_REVISION_MAJOR, - minor: OPTEE_MSG_REVISION_MINOR, - build_id: OPTEE_MSG_BUILD_ID, - }), - OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::Revision { - major: OPTEE_MSG_REVISION_MAJOR, - minor: OPTEE_MSG_REVISION_MINOR, - }), + OpteeSmcFunction::GetOsUuid => Ok(( + OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_OS_OPTEE_UUID_0, + OPTEE_MSG_OS_OPTEE_UUID_1, + OPTEE_MSG_OS_OPTEE_UUID_2, + OPTEE_MSG_OS_OPTEE_UUID_3, + ], + }, + None, + )), + OpteeSmcFunction::CallsUid => Ok(( + OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ], + }, + None, + )), + OpteeSmcFunction::GetOsRevision => Ok(( + OpteeSmcResult::OsRevision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + build_id: OPTEE_MSG_BUILD_ID, + }, + None, + )), + OpteeSmcFunction::CallsRevision => Ok(( + OpteeSmcResult::Revision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + }, + None, + )), _ => Err(OpteeSmcReturn::UnknownFunction), } } -/// This function handles an OP-TEE message contained in `OpteeMsgArg` -pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result { +/// This function handles an OP-TEE message contained in `OpteeMsgArg`. +/// Currently, it only handles share memory registration and unregistration. +/// If an OP-TEE message involves with a TA request, it simply returns +/// `Err(OpteeSmcReturn::Ok)` while expecting that the caller will handle +/// the message with `handle_ta_request`. +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> { msg_arg.validate()?; match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { @@ -157,13 +187,12 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result return handle_ta_request(msg_arg), + | OpteeMessageCommand::CloseSession => return Err(OpteeSmcReturn::Ok), _ => { todo!("Unimplemented OpteeMessageCommand: {:?}", msg_arg.cmd); } } - - Ok(*msg_arg) + Ok(()) } /// This function handles a TA request contained in `OpteeMsgArg` From 6a9cf6e2fad8ca998be85b4324cbe8a8c3401865 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 19 Dec 2025 00:58:08 +0000 Subject: [PATCH 20/45] some docs for physical pointer (wip) --- litebox_shim_optee/src/ptr.rs | 65 ++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 5 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 393d01317..54a84a580 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -1,14 +1,69 @@ -//! Placeholders for specifying remote pointer access (e.g., reading data from -//! VTL0 physical memory) +//! Physical Pointer Abstraction with On-demand Mapping +//! +//! This module implements types and traits to support accessing physical addresses +//! (e.g., VTL0 or normal-world physical memory) from LiteBox with on-demand mapping. +//! In the context of LVBS and OP-TEE, accessing physical memory is necessary +//! because VTL0 and VTL1 as well as normal world and secure world do not share +//! the same virtual address space, but they still have to share data through memory. +//! VTL1 and secure world receive physical addresses from VTL0 and normal world, +//! respectively, and they need to read from or write to those addresses. +//! +//! To simplify all these, we could persistently map the entire VTL0/normal-world +//! physical memory into VTL1/secure-world address space at once and just access them +//! through corresponding virtual addresses. Also, we could define some APIs to let +//! LiteBox (shim) map/unmap arbitrary physical addresses. However, we do not take +//! these approaches due to security concerns (e.g., data corruption or information +//! leakage due to concurrent and persistent access). +//! +//! Instead, the approach this module takes is to map the required physical memory +//! region on-demand when accessing them while using a buffer to copy data to/from +//! those regions. This way, we can ensure that data must be copied into +//! LiteBox-managed memory before being used while avoiding any unknown side effects +//! due to persistent memory mapping. +//! +//! Considerations: +//! +//! Ideally, we should be able to validate whether a given physical address is okay +//! to access or even exists in the first place. For example, accessing LiteBox's +//! own memory with this physical pointer abstraction should be prohibited. Also, +//! some device memory is mapped to certain physical address ranges and LiteBox +//! should not touch them without in-depth knowledge. However, this is a bit tricky +//! because, in many cases, LiteBox does not directly interact with the underlying +//! hardware or BIOS/UEFI. In the case of LVBS, LiteBox obtains the physical memory +//! information from VTL0 including the total physical memory size and the memory +//! range assigned to VTL1/LiteBox. Thus, this module can confirm whether a given +//! physical address belongs to VTL0's physical memory. +//! +//! This module should allow byte-level access while transparently handling page +//! mapping and data access across page boundaries. This could become complicated +//! when we consider multiple page sizes (e.g., 4KiB, 2MiB, 1GiB). Also, unaligned +//! access is matter to be considered. +//! +//! In addition, often times, this physical pointer abstraction is involved with +//! a list of physical page addresses (i.e., scatter-gather list). For example, in +//! the worse case, a two-byte data structure can span across two physical pages. +//! Thus, to enhance the performance, we may need to consider mapping multiple pages +//! at once, copy data from/to them, and unmap them later. Currently, our +//! implementation (in `litebox_platform_lvbs`) does not implement this functionality +//! yet and it just maps/unmaps one page at a time. We could define separate +//! interfaces for this functionality later (e.g., its parameter would be a slice of +//! `usize` instead of single `usize`). +//! +//! When this module needs to access data across physical page boundaries, it assumes +//! that those physical pages are virtually contiguous in VTL0 or normal-world address +//! space. Otherwise, this module could end up with accessing incorrect data. This is +//! best-effort assumption and it is the VTL0 or normal-world side's responsibility +//! (e.g., even if we always require a list of physical addresses, they can provide +//! a wrong list by mistake or intentionally). + //! TODO: Improve these and move these to the litebox crate later use litebox::platform::{RawConstPointer, RawMutPointer}; -// TODO: use the one from the litebox crate pub trait ValidateAccess {} -/// Trait to access a pointer to remote memory -/// For now, we only consider copying the entire value before acccessing it. +/// Trait to access a pointer to physical memory +/// For now, we only consider copying the entire value before accessing it. /// We do not consider byte-level access or unaligned access. pub trait RemoteMemoryAccess { fn read_at_offset(ptr: *mut T, count: isize) -> Option; From e69d204439936d90d8c16ff816e25f129598c7df Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Sat, 20 Dec 2025 00:49:10 +0000 Subject: [PATCH 21/45] improve phys ptr abstraction (wip) --- litebox_shim_optee/src/ptr.rs | 291 ++++++++++++++++++++++++++++++---- 1 file changed, 258 insertions(+), 33 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 54a84a580..78bc00d58 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -11,56 +11,76 @@ //! To simplify all these, we could persistently map the entire VTL0/normal-world //! physical memory into VTL1/secure-world address space at once and just access them //! through corresponding virtual addresses. Also, we could define some APIs to let -//! LiteBox (shim) map/unmap arbitrary physical addresses. However, we do not take -//! these approaches due to security concerns (e.g., data corruption or information -//! leakage due to concurrent and persistent access). +//! LiteBox (shim) map/unmap arbitrary physical addresses (i.e., implementing and +//! exposing APIs like Linux kernel's `vmap()` and `vunmap()`). However, this module +//! does not take these approaches due to scalability (e.g., how to deal with a system +//! with terabytes of physical memory?) and security concerns (e.g., data corruption or +//! information leakage due to concurrent and persistent access). //! //! Instead, the approach this module takes is to map the required physical memory //! region on-demand when accessing them while using a buffer to copy data to/from -//! those regions. This way, we can ensure that data must be copied into +//! those regions. This way, this module can ensure that data must be copied into //! LiteBox-managed memory before being used while avoiding any unknown side effects //! due to persistent memory mapping. //! //! Considerations: //! -//! Ideally, we should be able to validate whether a given physical address is okay -//! to access or even exists in the first place. For example, accessing LiteBox's -//! own memory with this physical pointer abstraction should be prohibited. Also, -//! some device memory is mapped to certain physical address ranges and LiteBox -//! should not touch them without in-depth knowledge. However, this is a bit tricky -//! because, in many cases, LiteBox does not directly interact with the underlying -//! hardware or BIOS/UEFI. In the case of LVBS, LiteBox obtains the physical memory -//! information from VTL0 including the total physical memory size and the memory -//! range assigned to VTL1/LiteBox. Thus, this module can confirm whether a given -//! physical address belongs to VTL0's physical memory. +//! Ideally, this module should be able to validate whether a given physical address +//! is okay to access or even exists in the first place. For example, accessing +//! LiteBox's own memory with this physical pointer abstraction must be prohibited to +//! prevent the Boomerang attack. Also, some device memory is mapped to certain +//! physical address ranges and LiteBox should not touch them without in-depth +//! knowledge. However, this is a bit tricky because, in many cases, LiteBox does +//! not directly interact with the underlying hardware or BIOS/UEFI. In the case of +//! LVBS, LiteBox obtains the physical memory information from VTL0 including the +//! total physical memory size and the memory range assigned to VTL1/LiteBox. +//! Thus, this module can at least confirm a given physical address does not belong +//! to VTL1's physical memory. //! //! This module should allow byte-level access while transparently handling page //! mapping and data access across page boundaries. This could become complicated -//! when we consider multiple page sizes (e.g., 4KiB, 2MiB, 1GiB). Also, unaligned -//! access is matter to be considered. +//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, +//! unaligned access is matter to be considered. //! //! In addition, often times, this physical pointer abstraction is involved with -//! a list of physical page addresses (i.e., scatter-gather list). For example, in -//! the worse case, a two-byte data structure can span across two physical pages. -//! Thus, to enhance the performance, we may need to consider mapping multiple pages -//! at once, copy data from/to them, and unmap them later. Currently, our -//! implementation (in `litebox_platform_lvbs`) does not implement this functionality -//! yet and it just maps/unmaps one page at a time. We could define separate -//! interfaces for this functionality later (e.g., its parameter would be a slice of -//! `usize` instead of single `usize`). +//! a list of physical addresses (i.e., scatter-gather list). For example, in +//! the worse case, a two-byte value can span across two non-contiguous physical +//! pages. Thus, to enhance the performance, we may need to consider mapping +//! multiple pages at once, copy data from/to them, and unmap them later. Currently, +//! our implementation (in `litebox_platform_lvbs`) does not implement this +//! functionality yet and it just maps/unmaps one page at a time (this works but is +//! inefficient). //! //! When this module needs to access data across physical page boundaries, it assumes //! that those physical pages are virtually contiguous in VTL0 or normal-world address //! space. Otherwise, this module could end up with accessing incorrect data. This is -//! best-effort assumption and it is the VTL0 or normal-world side's responsibility -//! (e.g., even if we always require a list of physical addresses, they can provide +//! best-effort assumption and ensuring this is the caller's responsibility (e.g., even +//! if this module always requires a list of physical addresses, the caller can provide //! a wrong list by mistake or intentionally). -//! TODO: Improve these and move these to the litebox crate later - use litebox::platform::{RawConstPointer, RawMutPointer}; - -pub trait ValidateAccess {} +use thiserror::Error; + +/// Trait to validate that a physical pointer does not belong to LiteBox-managed memory +/// (including both kernel and userspace memory). +/// +/// This validation is mainly to deal with the Boomerang attack where a normal-world client +/// tricks the secure-world kernel (i.e., LiteBox) to access the secure-world memory. +/// However, even if there is no such threat (e.g., no normal/secure world separation), +/// this validation is still beneficial to ensure the memory safety. +/// +/// Succeeding these operations does not guarantee that the physical pointer is valid to +/// access, just that it is outside of LiteBox-managed memory and won't be used to access +/// it as an unmanaged channel. +pub trait ValidateAccess { + /// Validate that the given physical pointer does not belong to LiteBox-managed memory. + /// + /// Here, we do not use `*const T` or `*mut T` because this is a physical pointer which + /// must not be dereferenced directly. + /// + /// Returns `Some(pa)` if valid. If the pointer is not valid, returns `None`. + fn validate(pa: usize) -> Result; +} /// Trait to access a pointer to physical memory /// For now, we only consider copying the entire value before accessing it. @@ -75,6 +95,189 @@ pub trait RemoteMemoryAccess { fn copy_from_slice(start_offset: usize, buf: &[T]) -> Option<()>; } +/// Data structure for an array of physical pages. These physical pages should be +/// virtually contiguous in the source address space. +#[derive(Clone)] +pub struct PhysPageArray(alloc::boxed::Box<[usize]>); + +impl PhysPageArray<4096> { + /// Create a new `PhysPageArray` from the given slice of physical addresses. + pub fn try_from_slice(addrs: &[usize]) -> Result { + for addr in addrs { + if !addr.is_multiple_of(4096) { + return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, 4096)); + } + } + Ok(Self(addrs.into())) + } +} + +/// Data structure to maintain the mapping information returned by `vmap()`. +/// `base` is the virtual address of the mapped region which is page aligned. +/// `size` is the size of the mapped region in bytes. +#[derive(Clone)] +pub struct PhysPageMapInfo { + pub base: *mut u8, + pub size: usize, +} + +bitflags::bitflags! { + /// Physical page map permissions which is a restricted version of + /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. + /// + /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED + /// permissions are explicitly prohibited. + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub struct PhysPageMapPermissions: u8 { + /// Readable + const READ = 1 << 0; + /// Writable + const WRITE = 1 << 1; + } +} + +/// Trait to map and unmap physical pages into virtually contiguous address space. +/// +/// The implementation of this trait is platform-specific because it depends on how +/// the underlying platform manages page tables and memory regions. +pub trait PhysPageMapper { + /// Map the given [`PhysPageArray`] into virtually contiguous address space with the given + /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. + /// This function is analogous to Linux kernel's `vmap()`. + /// + /// # Safety + /// + /// The caller must ensure that `pages` are not in active use. LiteBox itself cannot fully guarantee this + /// and it needs some helps from the caller, hypervisor, or hardware. + unsafe fn vmap( + pages: PhysPageArray, + perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError>; + /// Unmap the previously mapped virtually contiguous address space ([`PhysPageMapInfo`]). + /// This function is analogous to Linux kernel's `vunmap()`. + /// + /// # Safety + /// + /// The caller must ensure that the virtual addresses belonging to `vmap_info` are not in active use. + /// Like `vmap()`, LiteBox itself cannot fully guarantee this and it needs some helps from other parties. + unsafe fn vunmap( + vmap_info: PhysPageMapInfo, + ) -> Result<(), PhysPointerError>; +} + +/// Represent a physical pointer to a read-only object. +/// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in +/// this array should be virtually contiguous. +/// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. +/// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough +/// memory for an object of type `T`. +/// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses +#[derive(Clone)] +#[repr(C)] +pub struct PhysConstPtr { + pages: PhysPageArray, + offset: usize, + map_info: Option>, + _type: core::marker::PhantomData, + _mapper: core::marker::PhantomData, + _validator: core::marker::PhantomData, +} + +impl + PhysConstPtr +{ + /// Create a new `PhysConstPtr` from the given physical page array and offset. + pub fn try_from_page_array( + pages: PhysPageArray, + offset: usize, + ) -> Result { + if offset >= ALIGN { + return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); + } + let size = if pages.0.is_empty() { + 0 + } else { + ALIGN - offset + (pages.0.len() - 1) * ALIGN + }; + if size < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + size, + core::mem::size_of::(), + )); + } + for pa in &pages.0 { + V::validate::(*pa)?; + } + Ok(Self { + pages, + offset, + map_info: None, + _type: core::marker::PhantomData, + _mapper: core::marker::PhantomData, + _validator: core::marker::PhantomData, + }) + } + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// The caller must ensure that `pa`, ..., `pa+len` are both physically and virtually contiguous. + pub fn try_from_contiguous_pages(pa: usize, len: usize) -> Result { + if len < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + len, + core::mem::size_of::(), + )); + } + let start_page = pa - (pa % ALIGN); + let end_page = pa + len; + let end_page_aligned = if end_page.is_multiple_of(ALIGN) { + end_page + } else { + end_page + (ALIGN - (end_page % ALIGN)) + }; + let mut pages = alloc::vec::Vec::new(); + let mut current_page = start_page; + while current_page < end_page_aligned { + V::validate::(current_page)?; + pages.push(current_page); + current_page += ALIGN; + } + Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) + } + /// Map the physical pages if not already mapped. + fn map(&mut self) -> Result<(), PhysPointerError> { + if self.map_info.is_none() { + unsafe { + self.map_info = Some(M::vmap(self.pages.clone(), PhysPageMapPermissions::READ)?); + } + } + Ok(()) + } + /// Unmap the physical pages if mapped. + fn unmap(&mut self) -> Result<(), PhysPointerError> { + if let Some(map_info) = self.map_info.take() { + unsafe { + M::vunmap(map_info)?; + } + self.map_info = None; + } + Ok(()) + } + pub fn as_usize(&mut self) -> Result { + todo!() + } + pub fn from_usize(&mut self, addr: usize) -> Result<(), PhysPointerError> { + todo!() + } +} + +impl core::fmt::Debug for PhysConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysConstPtr") + .field("pages", &self.pages.0) + .field("offset", &self.offset) + .finish_non_exhaustive() + } +} + #[repr(C)] pub struct RemoteConstPtr { inner: *const T, @@ -218,10 +421,14 @@ impl RawMutPointer } } -// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical -// address does not belong to VTL1) +// TODO: Sample no-op implementations to be removed. Implement a validation mechanism for +// VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) pub struct Novalidation; -impl ValidateAccess for Novalidation {} +impl ValidateAccess for Novalidation { + fn validate(pa: usize) -> Result { + Ok(pa) + } +} pub struct Vtl0PhysMemoryAccess; impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { @@ -250,6 +457,24 @@ impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { } } +/// Possible errors for physical page access +#[non_exhaustive] +#[derive(Error, Debug)] +pub enum PhysPointerError { + #[error("Physical address {0:#x} is invalid to access")] + InvalidPhysicalAddress(usize), + #[error("Physical address {0:#x} is not aligned to {1} bytes")] + UnalignedPhysicalAddress(usize, usize), + #[error("Offset {0:#x} is not aligned to {1} bytes")] + UnalignedOffset(usize, usize), + #[error("Base offset {0:#x} is greater than or equal to alignment ({1} bytes)")] + InvalidBaseOffset(usize, usize), + #[error( + "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" + )] + InsufficientPhysicalPages(usize, usize), +} + /// Normal world const pointer type. For now, we only consider VTL0 physical memory but it can be /// something else like TrustZone normal world, other VMPL or TD partition, or other processes. pub type NormalWorldConstPtr = RemoteConstPtr; From ff9fc3609c1c29dd07bf77f0c73f3943ec49ce9a Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 18:38:53 +0000 Subject: [PATCH 22/45] checkpoint --- litebox_shim_optee/src/msg_handler.rs | 18 +- litebox_shim_optee/src/ptr.rs | 352 ++++++++++---------------- 2 files changed, 150 insertions(+), 220 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index f22d22f4d..b5af78f3b 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -12,12 +12,10 @@ //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register //! shared memory). -use crate::ptr::NormalWorldConstPtr; +use crate::ptr::NormalWorldPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; -use litebox::platform::RawConstPointer; -use litebox_common_linux::errno::Errno; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, @@ -74,13 +72,14 @@ pub fn handle_optee_smc_args( | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let ptr = NormalWorldConstPtr::::from_usize(msg_arg_addr); - let msg_arg = unsafe { ptr.read_at_offset(0) }.ok_or(OpteeSmcReturn::EBadAddr)?; + let mut ptr = NormalWorldPtr::::try_from_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; Ok(( OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }, - Some(msg_arg), + Some(*msg_arg), )) } OpteeSmcFunction::ExchangeCapabilities => { @@ -277,8 +276,11 @@ impl ShmRefMap { let mut pages = Vec::with_capacity(num_pages); let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { - let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); - let pages_data = unsafe { cur_ptr.read_at_offset(0) }.ok_or(Errno::EFAULT)?; + let mut cur_ptr = + NormalWorldPtr::::try_from_usize(cur_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let pages_data = + unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 78bc00d58..6c7a91c5b 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -58,7 +58,7 @@ //! if this module always requires a list of physical addresses, the caller can provide //! a wrong list by mistake or intentionally). -use litebox::platform::{RawConstPointer, RawMutPointer}; +use litebox::platform::page_mgmt::MemoryRegionPermissions; use thiserror::Error; /// Trait to validate that a physical pointer does not belong to LiteBox-managed memory @@ -82,19 +82,6 @@ pub trait ValidateAccess { fn validate(pa: usize) -> Result; } -/// Trait to access a pointer to physical memory -/// For now, we only consider copying the entire value before accessing it. -/// We do not consider byte-level access or unaligned access. -pub trait RemoteMemoryAccess { - fn read_at_offset(ptr: *mut T, count: isize) -> Option; - - fn write_at_offset(ptr: *mut T, count: isize, value: T) -> Option<()>; - - fn slice_from(ptr: *mut T, len: usize) -> Option>; - - fn copy_from_slice(start_offset: usize, buf: &[T]) -> Option<()>; -} - /// Data structure for an array of physical pages. These physical pages should be /// virtually contiguous in the source address space. #[derive(Clone)] @@ -135,6 +122,18 @@ bitflags::bitflags! { const WRITE = 1 << 1; } } +impl From for PhysPageMapPermissions { + fn from(perms: MemoryRegionPermissions) -> Self { + let mut phys_perms = PhysPageMapPermissions::empty(); + if perms.contains(MemoryRegionPermissions::READ) { + phys_perms |= PhysPageMapPermissions::READ; + } + if perms.contains(MemoryRegionPermissions::WRITE) { + phys_perms |= PhysPageMapPermissions::WRITE; + } + phys_perms + } +} /// Trait to map and unmap physical pages into virtually contiguous address space. /// @@ -165,18 +164,20 @@ pub trait PhysPageMapper { ) -> Result<(), PhysPointerError>; } -/// Represent a physical pointer to a read-only object. +/// Represent a physical pointer to an object with on-demand mapping. /// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in /// this array should be virtually contiguous. /// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. +/// - `count`: The number of objects of type `T` that can be accessed from this pointer. /// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough /// memory for an object of type `T`. /// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses #[derive(Clone)] #[repr(C)] -pub struct PhysConstPtr { +pub struct PhysMappedPtr { pages: PhysPageArray, offset: usize, + count: usize, map_info: Option>, _type: core::marker::PhantomData, _mapper: core::marker::PhantomData, @@ -184,9 +185,9 @@ pub struct PhysConstPtr { } impl - PhysConstPtr + PhysMappedPtr { - /// Create a new `PhysConstPtr` from the given physical page array and offset. + /// Create a new `PhysMappedPtr` from the given physical page array and offset. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -211,23 +212,24 @@ impl Ok(Self { pages, offset, + count: size / core::mem::size_of::(), map_info: None, _type: core::marker::PhantomData, _mapper: core::marker::PhantomData, _validator: core::marker::PhantomData, }) } - /// Create a new `PhysConstPtr` from the given contiguous physical address and length. - /// The caller must ensure that `pa`, ..., `pa+len` are both physically and virtually contiguous. - pub fn try_from_contiguous_pages(pa: usize, len: usize) -> Result { - if len < core::mem::size_of::() { + /// Create a new `PhysMappedPtr` from the given contiguous physical address and length. + /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( - len, + bytes, core::mem::size_of::(), )); } let start_page = pa - (pa % ALIGN); - let end_page = pa + len; + let end_page = pa + bytes; let end_page_aligned = if end_page.is_multiple_of(ALIGN) { end_page } else { @@ -242,218 +244,140 @@ impl } Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) } + /// Create a new `PhysMappedPtr` from the given physical address for a single object. + /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + pub fn try_from_usize(pa: usize) -> Result { + Self::try_from_contiguous_pages(pa, core::mem::size_of::()) + } + /// Read the value at the given type-aware offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + let val = { + let mut buffer = core::mem::MaybeUninit::::uninit(); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(addr, buffer.as_mut_ptr(), 1); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + addr.cast::(), + buffer.as_mut_ptr().cast::(), + core::mem::size_of::(), + ); + } + } + unsafe { buffer.assume_init() } + }; + self.unmap_all()?; + Ok(alloc::boxed::Box::new(val)) + } + /// Write the value at the given type-aware offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn write_at_offset( + &mut self, + count: usize, + value: T, + ) -> Result<(), PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { core::ptr::write(addr, value) }; + } else { + unsafe { core::ptr::write_unaligned(addr, value) }; + } + self.unmap_all()?; + Ok(()) + } /// Map the physical pages if not already mapped. - fn map(&mut self) -> Result<(), PhysPointerError> { + fn map_all(&mut self, perms: PhysPageMapPermissions) -> Result<(), PhysPointerError> { if self.map_info.is_none() { unsafe { - self.map_info = Some(M::vmap(self.pages.clone(), PhysPageMapPermissions::READ)?); + self.map_info = Some(M::vmap(self.pages.clone(), perms)?); } + Ok(()) + } else { + Err(PhysPointerError::AlreadyMapped(self.pages.0[0])) } - Ok(()) } /// Unmap the physical pages if mapped. - fn unmap(&mut self) -> Result<(), PhysPointerError> { + fn unmap_all(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { M::vunmap(map_info)?; } self.map_info = None; + Ok(()) + } else { + Err(PhysPointerError::Unmapped(self.pages.0[0])) } - Ok(()) - } - pub fn as_usize(&mut self) -> Result { - todo!() - } - pub fn from_usize(&mut self, addr: usize) -> Result<(), PhysPointerError> { - todo!() } } -impl core::fmt::Debug for PhysConstPtr { +impl core::fmt::Debug for PhysMappedPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("PhysConstPtr") + f.debug_struct("PhysMappedPtr") .field("pages", &self.pages.0) .field("offset", &self.offset) .finish_non_exhaustive() } } -#[repr(C)] -pub struct RemoteConstPtr { - inner: *const T, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, -} - -impl RemoteConstPtr { - pub fn from_ptr(ptr: *const T) -> Self { - Self { - inner: ptr, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, - } - } -} - -impl Clone for RemoteConstPtr { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for RemoteConstPtr {} - -impl core::fmt::Debug for RemoteConstPtr { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_tuple("RemoteConstPtr").field(&self.inner).finish() - } -} - -impl RawConstPointer - for RemoteConstPtr -{ - unsafe fn read_at_offset(self, count: isize) -> Option { - let val = A::read_at_offset(self.inner.cast_mut(), count)?; - Some(val) - } - - unsafe fn to_owned_slice(self, len: usize) -> Option> { - // TODO: read data from the remote side - if len == 0 { - return Some(alloc::boxed::Box::new([])); - } - let mut data = alloc::vec::Vec::new(); - data.reserve_exact(len); - unsafe { data.set_len(len) }; - Some(data.into_boxed_slice()) - } - - fn as_usize(&self) -> usize { - self.inner.expose_provenance() - } - - fn from_usize(addr: usize) -> Self { - Self { - inner: core::ptr::with_exposed_provenance(addr), - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, - } - } -} - -#[repr(C)] -pub struct RemoteMutPtr { - inner: *mut T, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, -} - -impl RemoteMutPtr { - pub fn from_ptr(ptr: *mut T) -> Self { - Self { - inner: ptr, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, - } - } -} - -impl Clone for RemoteMutPtr { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for RemoteMutPtr {} - -impl core::fmt::Debug for RemoteMutPtr { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_tuple("RemoteMutPtr").field(&self.inner).finish() - } -} - -impl RawConstPointer - for RemoteMutPtr -{ - unsafe fn read_at_offset(self, count: isize) -> Option { - let val = A::read_at_offset(self.inner, count)?; - Some(val) - } - - unsafe fn to_owned_slice(self, len: usize) -> Option> { - // TODO: read data from the remote side - if len == 0 { - return Some(alloc::boxed::Box::new([])); - } - let data = A::slice_from(self.inner, len)?; - Some(data) - } - - fn as_usize(&self) -> usize { - self.inner.expose_provenance() - } - - fn from_usize(addr: usize) -> Self { - Self::from_ptr(core::ptr::with_exposed_provenance_mut(addr)) - } -} - -impl RawMutPointer - for RemoteMutPtr -{ - unsafe fn write_at_offset<'a>(self, count: isize, value: T) -> Option<()> { - A::write_at_offset(self.inner, count, value) - } - - fn mutate_subslice_with( - self, - _range: impl core::ops::RangeBounds, - _f: impl FnOnce(&mut [T]) -> R, - ) -> Option { - unimplemented!("use write_slice_at_offset instead") - } - - fn copy_from_slice(self, start_offset: usize, buf: &[T]) -> Option<()> - where - T: Copy, - { - A::copy_from_slice(start_offset, buf) - } -} - // TODO: Sample no-op implementations to be removed. Implement a validation mechanism for // VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) -pub struct Novalidation; -impl ValidateAccess for Novalidation { +pub struct NoValidation; +impl ValidateAccess for NoValidation { fn validate(pa: usize) -> Result { Ok(pa) } } -pub struct Vtl0PhysMemoryAccess; -impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { - fn read_at_offset(_ptr: *mut T, _count: isize) -> Option { - // TODO: read a value from VTL0 physical memory - let val: T = unsafe { core::mem::zeroed() }; - Some(val) - } - - fn write_at_offset(_ptr: *mut T, _count: isize, _value: T) -> Option<()> { - // TODO: write a value to VTL0 physical memory - Some(()) - } - - fn slice_from(_ptr: *mut T, len: usize) -> Option> { - // TODO: read a slice from VTL0 physical memory - let mut data: alloc::vec::Vec = alloc::vec::Vec::new(); - data.reserve_exact(len); - unsafe { data.set_len(len) }; - Some(data.into_boxed_slice()) +pub struct MockPhysMemoryMapper; +impl PhysPageMapper for MockPhysMemoryMapper { + unsafe fn vmap( + pages: PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError> { + Ok(PhysPageMapInfo { + base: core::ptr::null_mut(), + size: pages.0.len() * ALIGN, + }) } - - fn copy_from_slice(_start_offset: usize, _buf: &[T]) -> Option<()> { - // TODO: write a slice to VTL0 physical memory - Some(()) + unsafe fn vunmap( + _vmap_info: PhysPageMapInfo, + ) -> Result<(), PhysPointerError> { + Ok(()) } } @@ -473,12 +397,16 @@ pub enum PhysPointerError { "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" )] InsufficientPhysicalPages(usize, usize), + #[error("Index {0} is out of bounds (count: {1})")] + IndexOutOfBounds(usize, usize), + #[error("Physical address {0:#x} is already mapped")] + AlreadyMapped(usize), + #[error("Physical address {0:#x} is unmapped")] + Unmapped(usize), + #[error("No mapping information available")] + NoMappingInfo, } -/// Normal world const pointer type. For now, we only consider VTL0 physical memory but it can be -/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldConstPtr = RemoteConstPtr; - -/// Normal world mutable pointer type. For now, we only consider VTL0 physical memory but it can be -/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldMutPtr = RemoteMutPtr; +/// Normal world pointer type using MockPhysMemoryMapper for testing purposes. +pub type NormalWorldPtr = + PhysMappedPtr; From a3c9a4a40504f2ed1c4de977784eb49cfebddab0 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 19:00:30 +0000 Subject: [PATCH 23/45] separate const and mut ptrs --- litebox_shim_optee/src/msg_handler.rs | 9 +-- litebox_shim_optee/src/ptr.rs | 83 +++++++++++++++++++++++---- 2 files changed, 77 insertions(+), 15 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index b5af78f3b..89533a1ac 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -12,7 +12,7 @@ //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register //! shared memory). -use crate::ptr::NormalWorldPtr; +use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; @@ -72,8 +72,9 @@ pub fn handle_optee_smc_args( | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let mut ptr = NormalWorldPtr::::try_from_usize(msg_arg_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut ptr = + NormalWorldConstPtr::::try_from_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; Ok(( OpteeSmcResult::Generic { @@ -277,7 +278,7 @@ impl ShmRefMap { let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { let mut cur_ptr = - NormalWorldPtr::::try_from_usize(cur_addr) + NormalWorldConstPtr::::try_from_usize(cur_addr) .map_err(|_| OpteeSmcReturn::EBadAddr)?; let pages_data = unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 6c7a91c5b..3e9eb91a0 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -174,7 +174,7 @@ pub trait PhysPageMapper { /// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses #[derive(Clone)] #[repr(C)] -pub struct PhysMappedPtr { +pub struct PhysMutPtr { pages: PhysPageArray, offset: usize, count: usize, @@ -185,9 +185,9 @@ pub struct PhysMappedPtr { } impl - PhysMappedPtr + PhysMutPtr { - /// Create a new `PhysMappedPtr` from the given physical page array and offset. + /// Create a new `PhysMutPtr` from the given physical page array and offset. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -219,7 +219,7 @@ impl _validator: core::marker::PhantomData, }) } - /// Create a new `PhysMappedPtr` from the given contiguous physical address and length. + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { @@ -244,7 +244,7 @@ impl } Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) } - /// Create a new `PhysMappedPtr` from the given physical address for a single object. + /// Create a new `PhysMutPtr` from the given physical address for a single object. /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) @@ -345,15 +345,72 @@ impl } } -impl core::fmt::Debug for PhysMappedPtr { +impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("PhysMappedPtr") - .field("pages", &self.pages.0) + f.debug_struct("PhysMutPtr") + .field("pages[0]", &self.pages.0[0]) .field("offset", &self.offset) .finish_non_exhaustive() } } +/// Represent a physical pointer to a read-only object. This wraps around [`PhysMutPtr`] and +/// exposes only read access. +#[derive(Clone)] +#[repr(C)] +pub struct PhysConstPtr { + inner: PhysMutPtr, +} +impl + PhysConstPtr +{ + /// Create a new `PhysConstPtr` from the given physical page array and offset. + pub fn try_from_page_array( + pages: PhysPageArray, + offset: usize, + ) -> Result { + Ok(Self { + inner: PhysMutPtr::try_from_page_array(pages, offset)?, + }) + } + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, + }) + } + /// Create a new `PhysConstPtr` from the given physical address for a single object. + /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + pub fn try_from_usize(pa: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::try_from_usize(pa)?, + }) + } + /// Read the value at the given type-aware offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + unsafe { self.inner.read_at_offset(count) } + } +} + +impl core::fmt::Debug for PhysConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysConstPtr") + .field("pages[0]", &self.inner.pages.0[0]) + .field("offset", &self.inner.offset) + .finish_non_exhaustive() + } +} + // TODO: Sample no-op implementations to be removed. Implement a validation mechanism for // VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) pub struct NoValidation; @@ -407,6 +464,10 @@ pub enum PhysPointerError { NoMappingInfo, } -/// Normal world pointer type using MockPhysMemoryMapper for testing purposes. -pub type NormalWorldPtr = - PhysMappedPtr; +/// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. +pub type NormalWorldConstPtr = + PhysConstPtr; + +/// Normal world mutable pointer type using MockPhysMemoryMapper for testing purposes. +pub type NormalWorldMutPtr = + PhysMutPtr; From 5fd251d70c2c96a1b7de266f30572247223b0d4c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 21:31:09 +0000 Subject: [PATCH 24/45] read/write slice --- litebox_shim_optee/src/ptr.rs | 201 +++++++++++++++++++++++++++++----- 1 file changed, 171 insertions(+), 30 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 3e9eb91a0..4d01e3efe 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -61,6 +61,16 @@ use litebox::platform::page_mgmt::MemoryRegionPermissions; use thiserror::Error; +#[inline] +fn align_down(address: usize, align: usize) -> usize { + address & !(align - 1) +} + +#[inline] +fn align_up(len: usize, align: usize) -> usize { + len.next_multiple_of(align) +} + /// Trait to validate that a physical pointer does not belong to LiteBox-managed memory /// (including both kernel and userspace memory). /// @@ -85,17 +95,29 @@ pub trait ValidateAccess { /// Data structure for an array of physical pages. These physical pages should be /// virtually contiguous in the source address space. #[derive(Clone)] -pub struct PhysPageArray(alloc::boxed::Box<[usize]>); - -impl PhysPageArray<4096> { +pub struct PhysPageArray { + inner: alloc::boxed::Box<[usize]>, +} +impl PhysPageArray { /// Create a new `PhysPageArray` from the given slice of physical addresses. pub fn try_from_slice(addrs: &[usize]) -> Result { for addr in addrs { - if !addr.is_multiple_of(4096) { - return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, 4096)); + if !addr.is_multiple_of(ALIGN) { + return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); } } - Ok(Self(addrs.into())) + Ok(Self { + inner: alloc::boxed::Box::from(addrs), + }) + } + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn iter(&self) -> impl Iterator { + self.inner.iter() } } @@ -134,6 +156,18 @@ impl From for PhysPageMapPermissions { phys_perms } } +impl From for MemoryRegionPermissions { + fn from(perms: PhysPageMapPermissions) -> Self { + let mut mem_perms = MemoryRegionPermissions::empty(); + if perms.contains(PhysPageMapPermissions::READ) { + mem_perms |= MemoryRegionPermissions::READ; + } + if perms.contains(PhysPageMapPermissions::WRITE) { + mem_perms |= MemoryRegionPermissions::WRITE; + } + mem_perms + } +} /// Trait to map and unmap physical pages into virtually contiguous address space. /// @@ -195,10 +229,14 @@ impl if offset >= ALIGN { return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); } - let size = if pages.0.is_empty() { + let size = if pages.is_empty() { 0 } else { - ALIGN - offset + (pages.0.len() - 1) * ALIGN + pages + .len() + .checked_mul(ALIGN) + .ok_or(PhysPointerError::Overflow)? + - offset }; if size < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( @@ -206,7 +244,7 @@ impl core::mem::size_of::(), )); } - for pa in &pages.0 { + for pa in pages.iter() { V::validate::(*pa)?; } Ok(Self { @@ -228,28 +266,26 @@ impl core::mem::size_of::(), )); } - let start_page = pa - (pa % ALIGN); - let end_page = pa + bytes; - let end_page_aligned = if end_page.is_multiple_of(ALIGN) { - end_page - } else { - end_page + (ALIGN - (end_page % ALIGN)) - }; - let mut pages = alloc::vec::Vec::new(); + let start_page = align_down(pa, ALIGN); + let end_page = align_up( + pa.checked_add(bytes).ok_or(PhysPointerError::Overflow)?, + ALIGN, + ); + let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; - while current_page < end_page_aligned { + while current_page < end_page { V::validate::(current_page)?; pages.push(current_page); current_page += ALIGN; } - Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) + Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) } - /// Read the value at the given type-aware offset from the physical pointer. + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// @@ -287,10 +323,51 @@ impl } unsafe { buffer.assume_init() } }; - self.unmap_all()?; + self.unmap()?; Ok(alloc::boxed::Box::new(val)) } - /// Write the value at the given type-aware offset to the physical pointer. + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(addr, values.as_mut_ptr(), values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + addr.cast::(), + values.as_mut_ptr().cast::(), + core::mem::size_of_val(values), + ); + } + } + self.unmap()?; + Ok(()) + } + /// Write the value at the given offset to the physical pointer. /// /// # Safety /// @@ -317,7 +394,48 @@ impl } else { unsafe { core::ptr::write_unaligned(addr, value) }; } - self.unmap_all()?; + self.unmap()?; + Ok(()) + } + /// Write a slice of values at the given offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn write_slice_at_offset( + &mut self, + count: usize, + values: &[T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(values.as_ptr(), addr, values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + values.as_ptr().cast::(), + addr.cast::(), + core::mem::size_of_val(values), + ); + } + } + self.unmap()?; Ok(()) } /// Map the physical pages if not already mapped. @@ -328,11 +446,13 @@ impl } Ok(()) } else { - Err(PhysPointerError::AlreadyMapped(self.pages.0[0])) + Err(PhysPointerError::AlreadyMapped( + self.pages.iter().next().copied().unwrap_or(0), + )) } } /// Unmap the physical pages if mapped. - fn unmap_all(&mut self) -> Result<(), PhysPointerError> { + fn unmap(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { M::vunmap(map_info)?; @@ -340,7 +460,9 @@ impl self.map_info = None; Ok(()) } else { - Err(PhysPointerError::Unmapped(self.pages.0[0])) + Err(PhysPointerError::Unmapped( + self.pages.iter().next().copied().unwrap_or(0), + )) } } } @@ -348,7 +470,7 @@ impl impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") - .field("pages[0]", &self.pages.0[0]) + .field("pages[0]", &self.pages.iter().next().copied().unwrap_or(0)) .field("offset", &self.offset) .finish_non_exhaustive() } @@ -387,7 +509,7 @@ impl inner: PhysMutPtr::try_from_usize(pa)?, }) } - /// Read the value at the given type-aware offset from the physical pointer. + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// @@ -400,12 +522,29 @@ impl ) -> Result, PhysPointerError> { unsafe { self.inner.read_at_offset(count) } } + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + unsafe { self.inner.read_slice_at_offset(count, values) } + } } impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") - .field("pages[0]", &self.inner.pages.0[0]) + .field( + "pages[0]", + &self.inner.pages.iter().next().copied().unwrap_or(0), + ) .field("offset", &self.inner.offset) .finish_non_exhaustive() } @@ -428,7 +567,7 @@ impl PhysPageMapper for MockPhysMemoryMapper { ) -> Result, PhysPointerError> { Ok(PhysPageMapInfo { base: core::ptr::null_mut(), - size: pages.0.len() * ALIGN, + size: pages.iter().count() * ALIGN, }) } unsafe fn vunmap( @@ -462,6 +601,8 @@ pub enum PhysPointerError { Unmapped(usize), #[error("No mapping information available")] NoMappingInfo, + #[error("Overflow occurred during calculation")] + Overflow, } /// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. From b764f0a3c792a9903c2aa438c7619e7647d0ad34 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 23:31:46 +0000 Subject: [PATCH 25/45] revised --- dev_tests/src/ratchet.rs | 1 + litebox_shim_optee/src/ptr.rs | 266 ++++++++++++++++++++++++++-------- 2 files changed, 205 insertions(+), 62 deletions(-) diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index 894dcc311..c6414893e 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -69,6 +69,7 @@ fn ratchet_maybe_uninit() -> Result<()> { ("litebox_platform_linux_userland/", 3), ("litebox_platform_lvbs/", 5), ("litebox_shim_linux/", 5), + ("litebox_shim_optee/", 1), ], |file| { Ok(file diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 4d01e3efe..4988a656c 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -15,49 +15,49 @@ //! exposing APIs like Linux kernel's `vmap()` and `vunmap()`). However, this module //! does not take these approaches due to scalability (e.g., how to deal with a system //! with terabytes of physical memory?) and security concerns (e.g., data corruption or -//! information leakage due to concurrent and persistent access). +//! information leakage due to concurrent or persistent access). //! //! Instead, the approach this module takes is to map the required physical memory -//! region on-demand when accessing them while using a buffer to copy data to/from -//! those regions. This way, this module can ensure that data must be copied into -//! LiteBox-managed memory before being used while avoiding any unknown side effects -//! due to persistent memory mapping. +//! region on-demand when accessing them while using a LiteBox-managed buffer to copy +//! data to/from those regions. This way, this module can ensure that data must be +//! copied into LiteBox-managed memory before being used while avoiding any unknown +//! side effects due to persistent memory mapping. //! //! Considerations: //! //! Ideally, this module should be able to validate whether a given physical address //! is okay to access or even exists in the first place. For example, accessing //! LiteBox's own memory with this physical pointer abstraction must be prohibited to -//! prevent the Boomerang attack. Also, some device memory is mapped to certain -//! physical address ranges and LiteBox should not touch them without in-depth -//! knowledge. However, this is a bit tricky because, in many cases, LiteBox does -//! not directly interact with the underlying hardware or BIOS/UEFI. In the case of -//! LVBS, LiteBox obtains the physical memory information from VTL0 including the -//! total physical memory size and the memory range assigned to VTL1/LiteBox. -//! Thus, this module can at least confirm a given physical address does not belong -//! to VTL1's physical memory. +//! prevent the Boomerang attack and any other undefined memory access. Also, some +//! device memory is mapped to certain physical address ranges and LiteBox should not +//! touch them without in-depth knowledge. However, this is a bit tricky because, in +//! many cases, LiteBox does not directly interact with the underlying hardware or +//! BIOS/UEFI such that it does not have complete knowledge of the physical memory +//! layout. In the case of LVBS, LiteBox obtains the physical memory information +//! from VTL0 including the total physical memory size and the memory range assigned +//! to VTL1/LiteBox. Thus, this module can at least confirm a given physical address +//! does not belong to VTL1's physical memory. //! //! This module should allow byte-level access while transparently handling page //! mapping and data access across page boundaries. This could become complicated -//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, +//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, //! unaligned access is matter to be considered. //! //! In addition, often times, this physical pointer abstraction is involved with //! a list of physical addresses (i.e., scatter-gather list). For example, in //! the worse case, a two-byte value can span across two non-contiguous physical -//! pages. Thus, to enhance the performance, we may need to consider mapping -//! multiple pages at once, copy data from/to them, and unmap them later. Currently, -//! our implementation (in `litebox_platform_lvbs`) does not implement this -//! functionality yet and it just maps/unmaps one page at a time (this works but is -//! inefficient). +//! pages (the last byte of the first page and the first byte of the second page). +//! Thus, to enhance the performance, we may need to consider mapping multiple pages +//! at once, copy data from/to them, and unmap them later. //! //! When this module needs to access data across physical page boundaries, it assumes //! that those physical pages are virtually contiguous in VTL0 or normal-world address -//! space. Otherwise, this module could end up with accessing incorrect data. This is +//! space. Otherwise, this module could end up with accessing unrelated data. This is //! best-effort assumption and ensuring this is the caller's responsibility (e.g., even -//! if this module always requires a list of physical addresses, the caller can provide -//! a wrong list by mistake or intentionally). +//! if this module always requires a list of physical addresses, the caller might +//! provide a wrong list by mistake or intentionally). +use core::ops::Deref; use litebox::platform::page_mgmt::MemoryRegionPermissions; use thiserror::Error; @@ -77,7 +77,8 @@ fn align_up(len: usize, align: usize) -> usize { /// This validation is mainly to deal with the Boomerang attack where a normal-world client /// tricks the secure-world kernel (i.e., LiteBox) to access the secure-world memory. /// However, even if there is no such threat (e.g., no normal/secure world separation), -/// this validation is still beneficial to ensure the memory safety. +/// this validation is still beneficial to ensure the memory safety by doing not access +/// LiteBox-managed memory without going through its memory allocator. /// /// Succeeding these operations does not guarantee that the physical pointer is valid to /// access, just that it is outside of LiteBox-managed memory and won't be used to access @@ -88,7 +89,7 @@ pub trait ValidateAccess { /// Here, we do not use `*const T` or `*mut T` because this is a physical pointer which /// must not be dereferenced directly. /// - /// Returns `Some(pa)` if valid. If the pointer is not valid, returns `None`. + /// Returns `Ok(pa)` if valid. If the pointer is not valid, returns `Err(PhysPointerError)`. fn validate(pa: usize) -> Result; } @@ -100,6 +101,8 @@ pub struct PhysPageArray { } impl PhysPageArray { /// Create a new `PhysPageArray` from the given slice of physical addresses. + /// + /// All page addresses must be aligned to `ALIGN`. pub fn try_from_slice(addrs: &[usize]) -> Result { for addr in addrs { if !addr.is_multiple_of(ALIGN) { @@ -110,14 +113,33 @@ impl PhysPageArray { inner: alloc::boxed::Box::from(addrs), }) } + /// Check if the array is empty. pub fn is_empty(&self) -> bool { self.inner.is_empty() } + /// Return the number of physical pages in the array. pub fn len(&self) -> usize { self.inner.len() } - pub fn iter(&self) -> impl Iterator { - self.inner.iter() + /// Return the first physical address in the array if exists. + pub fn first(&self) -> Option { + self.inner.first().copied() + } +} +impl core::iter::Iterator for PhysPageArray { + type Item = usize; + fn next(&mut self) -> Option { + if self.inner.is_empty() { + None + } else { + Some(self.inner[0]) + } + } +} +impl core::ops::Deref for PhysPageArray { + type Target = [usize]; + fn deref(&self) -> &Self::Target { + &self.inner } } @@ -176,23 +198,31 @@ impl From for MemoryRegionPermissions { pub trait PhysPageMapper { /// Map the given [`PhysPageArray`] into virtually contiguous address space with the given /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. + /// /// This function is analogous to Linux kernel's `vmap()`. /// /// # Safety /// - /// The caller must ensure that `pages` are not in active use. LiteBox itself cannot fully guarantee this - /// and it needs some helps from the caller, hypervisor, or hardware. + /// The caller should ensure that `pages` are not in active use by other entities. LiteBox + /// itself cannot fully guarantee this and it needs some helps from the caller, hypervisor, + /// or hardware. + /// Multiple LiteBox threads might concurrently call this function with overlapping physical + /// pages, so the implementation should safely handle such cases. unsafe fn vmap( pages: PhysPageArray, perms: PhysPageMapPermissions, ) -> Result, PhysPointerError>; /// Unmap the previously mapped virtually contiguous address space ([`PhysPageMapInfo`]). + /// /// This function is analogous to Linux kernel's `vunmap()`. /// /// # Safety /// - /// The caller must ensure that the virtual addresses belonging to `vmap_info` are not in active use. - /// Like `vmap()`, LiteBox itself cannot fully guarantee this and it needs some helps from other parties. + /// The caller should ensure that the virtual addresses belonging to `vmap_info` are not in + /// active use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this + /// and it needs some helps from other parties. + /// Multiple LiteBox threads might concurrently call this function with overlapping physical + /// pages, so the implementation should safely handle such cases. unsafe fn vunmap( vmap_info: PhysPageMapInfo, ) -> Result<(), PhysPointerError>; @@ -222,6 +252,9 @@ impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. + /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -258,6 +291,8 @@ impl }) } /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { @@ -281,7 +316,10 @@ impl Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. + /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) } @@ -291,7 +329,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That it, it might read corrupt data. pub unsafe fn read_at_offset( &mut self, count: usize, @@ -299,8 +337,24 @@ impl if count >= self.count { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -323,7 +377,9 @@ impl } unsafe { buffer.assume_init() } }; - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(alloc::boxed::Box::new(val)) } /// Read a slice of values at the given offset from the physical pointer. @@ -332,7 +388,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( &mut self, count: usize, @@ -344,8 +400,24 @@ impl { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -364,7 +436,9 @@ impl ); } } - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(()) } /// Write the value at the given offset to the physical pointer. @@ -373,7 +447,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_at_offset( &mut self, count: usize, @@ -382,8 +456,28 @@ impl if count >= self.count { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -394,7 +488,9 @@ impl } else { unsafe { core::ptr::write_unaligned(addr, value) }; } - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(()) } /// Write a slice of values at the given offset to the physical pointer. @@ -403,7 +499,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_slice_at_offset( &mut self, count: usize, @@ -415,8 +511,28 @@ impl { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -435,24 +551,45 @@ impl ); } } - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(()) } - /// Map the physical pages if not already mapped. - fn map_all(&mut self, perms: PhysPageMapPermissions) -> Result<(), PhysPointerError> { + /// Map the physical pages from `start` to `end` indexes. + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn map_range( + &mut self, + start: usize, + end: usize, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + if start >= end || end > self.pages.len() { + return Err(PhysPointerError::IndexOutOfBounds(end, self.pages.len())); + } if self.map_info.is_none() { + let sub_pages = PhysPageArray::try_from_slice(&self.pages.deref()[start..end])?; unsafe { - self.map_info = Some(M::vmap(self.pages.clone(), perms)?); + self.map_info = Some(M::vmap(sub_pages, perms)?); } Ok(()) } else { Err(PhysPointerError::AlreadyMapped( - self.pages.iter().next().copied().unwrap_or(0), + self.pages.first().unwrap_or(0), )) } } /// Unmap the physical pages if mapped. - fn unmap(&mut self) -> Result<(), PhysPointerError> { + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn unmap(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { M::vunmap(map_info)?; @@ -460,9 +597,7 @@ impl self.map_info = None; Ok(()) } else { - Err(PhysPointerError::Unmapped( - self.pages.iter().next().copied().unwrap_or(0), - )) + Err(PhysPointerError::Unmapped(self.pages.first().unwrap_or(0))) } } } @@ -470,7 +605,7 @@ impl impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") - .field("pages[0]", &self.pages.iter().next().copied().unwrap_or(0)) + .field("pages[0]", &self.pages.first().unwrap_or(0)) .field("offset", &self.offset) .finish_non_exhaustive() } @@ -486,7 +621,10 @@ pub struct PhysConstPtr { impl PhysConstPtr { - /// Create a new `PhysConstPtr` from the given physical page array and offset. + /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. + /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -495,15 +633,20 @@ impl inner: PhysMutPtr::try_from_page_array(pages, offset)?, }) } - /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, }) } - /// Create a new `PhysConstPtr` from the given physical address for a single object. + /// Create a new `PhysMutPtr` from the given physical address for a single object. + /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. pub fn try_from_usize(pa: usize) -> Result { Ok(Self { inner: PhysMutPtr::try_from_usize(pa)?, @@ -515,7 +658,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_at_offset( &mut self, count: usize, @@ -528,7 +671,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( &mut self, count: usize, @@ -541,17 +684,14 @@ impl impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") - .field( - "pages[0]", - &self.inner.pages.iter().next().copied().unwrap_or(0), - ) + .field("pages[0]", &self.inner.pages.first().unwrap_or(0)) .field("offset", &self.inner.offset) .finish_non_exhaustive() } } -// TODO: Sample no-op implementations to be removed. Implement a validation mechanism for -// VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) +/// This is a mock implementation that does no validation. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `ValidateAccess` implementation. pub struct NoValidation; impl ValidateAccess for NoValidation { fn validate(pa: usize) -> Result { @@ -559,6 +699,8 @@ impl ValidateAccess for NoValidation { } } +/// This is a mock implementation that does no actual mapping. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. pub struct MockPhysMemoryMapper; impl PhysPageMapper for MockPhysMemoryMapper { unsafe fn vmap( From e2a25b5e96a48de4d06d313e2524cc49aa451169 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 23 Dec 2025 15:50:29 +0000 Subject: [PATCH 26/45] check page contiguity --- litebox_shim_optee/src/ptr.rs | 78 ++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 4988a656c..701c1f3bb 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -109,6 +109,9 @@ impl PhysPageArray { return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); } } + // TODO: Remove this check once our platform implementations support virtually + // contiguous non-contiguous physical page mapping. + Self::check_contiguity(addrs)?; Ok(Self { inner: alloc::boxed::Box::from(addrs), }) @@ -125,6 +128,21 @@ impl PhysPageArray { pub fn first(&self) -> Option { self.inner.first().copied() } + /// Checks whether the given physical addresses are contiguous with respect to ALIGN. + /// + /// Note: This is a temporary check to let this module work with our platform implementations + /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that + /// do not support non-contiguous physical page mapping with contiguous virtual addresses. + fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0]; + let second = window[1]; + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) + } } impl core::iter::Iterator for PhysPageArray { type Item = usize; @@ -690,35 +708,6 @@ impl core::fmt::Debug for PhysConstPtr(pa: usize) -> Result { - Ok(pa) - } -} - -/// This is a mock implementation that does no actual mapping. Each platform which supports -/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. -pub struct MockPhysMemoryMapper; -impl PhysPageMapper for MockPhysMemoryMapper { - unsafe fn vmap( - pages: PhysPageArray, - _perms: PhysPageMapPermissions, - ) -> Result, PhysPointerError> { - Ok(PhysPageMapInfo { - base: core::ptr::null_mut(), - size: pages.iter().count() * ALIGN, - }) - } - unsafe fn vunmap( - _vmap_info: PhysPageMapInfo, - ) -> Result<(), PhysPointerError> { - Ok(()) - } -} - /// Possible errors for physical page access #[non_exhaustive] #[derive(Error, Debug)] @@ -745,6 +734,37 @@ pub enum PhysPointerError { NoMappingInfo, #[error("Overflow occurred during calculation")] Overflow, + #[error("Non-contiguous physical pages in the array")] + NonContiguousPages, +} + +/// This is a mock implementation that does no validation. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `ValidateAccess` implementation. +pub struct NoValidation; +impl ValidateAccess for NoValidation { + fn validate(pa: usize) -> Result { + Ok(pa) + } +} + +/// This is a mock implementation that does no actual mapping. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. +pub struct MockPhysMemoryMapper; +impl PhysPageMapper for MockPhysMemoryMapper { + unsafe fn vmap( + _pages: PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError> { + Ok(PhysPageMapInfo { + base: core::ptr::null_mut(), + size: 0, + }) + } + unsafe fn vunmap( + _vmap_info: PhysPageMapInfo, + ) -> Result<(), PhysPointerError> { + Ok(()) + } } /// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. From 75ee395ab7fbbe044991f4ec2975a8ec7be1f744 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 23 Dec 2025 23:23:02 +0000 Subject: [PATCH 27/45] VmapProvider --- Cargo.lock | 1 + litebox/src/platform/mod.rs | 1 + litebox/src/platform/vmap.rs | 207 ++++++++++++++ litebox_common_optee/Cargo.toml | 1 + litebox_platform_linux_userland/src/lib.rs | 26 ++ litebox_platform_lvbs/src/lib.rs | 21 ++ litebox_shim_optee/src/lib.rs | 3 + litebox_shim_optee/src/msg_handler.rs | 5 +- litebox_shim_optee/src/ptr.rs | 300 ++------------------- 9 files changed, 292 insertions(+), 273 deletions(-) create mode 100644 litebox/src/platform/vmap.rs diff --git a/Cargo.lock b/Cargo.lock index 93b174c06..d17118788 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,6 +881,7 @@ dependencies = [ "litebox_common_linux", "modular-bitfield", "num_enum", + "thiserror", ] [[package]] diff --git a/litebox/src/platform/mod.rs b/litebox/src/platform/mod.rs index 3fad67900..b25dec4e7 100644 --- a/litebox/src/platform/mod.rs +++ b/litebox/src/platform/mod.rs @@ -10,6 +10,7 @@ pub mod common_providers; pub mod page_mgmt; pub mod trivial_providers; +pub mod vmap; #[cfg(test)] pub(crate) mod mock; diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs new file mode 100644 index 000000000..029a4292f --- /dev/null +++ b/litebox/src/platform/vmap.rs @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +use crate::platform::page_mgmt::MemoryRegionPermissions; +use thiserror::Error; + +/// A provider to map and unmap physical pages with virtually contiguous addresses. +/// +/// `ALIGN`: The page frame size. +/// +/// This provider is written to implement `litebox_shim_optee::ptr::PhysMutPtr` and +/// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need +/// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). +pub trait VmapProvider { + /// Data structure for an array of physical pages which are virtually contiguous. + type PhysPageArray; + /// Data structure to maintain the mapping information returned by `vmap()`. + type PhysPageMapInfo; + /// Map the given [`PhysPageArray`] into virtually contiguous addresses with the given + /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function + /// expects that it can access and update the page table using `&self`. + /// + /// This function is analogous to Linux kernel's `vmap()`. + /// + /// # Safety + /// + /// The caller should ensure that `pages` are not in active use by other entities. + /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps + /// from the caller, hypervisor, or hardware. + /// Multiple LiteBox threads might concurrently call this function (and `vunmap()`) with + /// overlapping physical pages, so the implementation should safely handle such cases. + unsafe fn vmap( + &self, + pages: Self::PhysPageArray, + perms: PhysPageMapPermissions, + ) -> Result; + /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). + /// Use `&self` to access and update the page table. + /// + /// This function is analogous to Linux kernel's `vunmap()`. + /// + /// # Safety + /// + /// The caller should ensure that the virtual addresses in `vmap_info` are not in active + /// use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this and + /// it needs some helps from other parties. + /// Multiple LiteBox threads might concurrently call this function (and `vmap()`) with + /// overlapping physical pages, so the implementation should safely handle such cases. + unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; + /// Validate that the given physical address (with type) does not belong to LiteBox-managed + /// memory. Use `&self` to get the memory layout of the platform (i.e., the physical memory + /// range assigned to LiteBox). + /// + /// This function does not use `*const T` or `*mut T` because it deals with a physical address + /// which must not be dereferenced directly. + /// + /// Returns `Ok(pa)` if valid. If the address is not valid, returns `Err(PhysPointerError)`. + fn validate(&self, pa: usize) -> Result; +} + +/// Data structure for an array of physical pages. These physical pages should be virtually contiguous. +#[derive(Clone)] +pub struct PhysPageArray { + inner: alloc::boxed::Box<[usize]>, +} +impl PhysPageArray { + /// Create a new `PhysPageArray` from the given slice of physical addresses. + /// + /// All page addresses must be aligned to `ALIGN`. + pub fn try_from_slice(addrs: &[usize]) -> Result { + for addr in addrs { + if !addr.is_multiple_of(ALIGN) { + return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); + } + } + // TODO: Remove this check once our platform implementations support virtually + // contiguous non-contiguous physical page mapping. + Self::check_contiguity(addrs)?; + Ok(Self { + inner: alloc::boxed::Box::from(addrs), + }) + } + /// Check if the array is empty. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + /// Return the number of physical pages in the array. + pub fn len(&self) -> usize { + self.inner.len() + } + /// Return the first physical address in the array if exists. + pub fn first(&self) -> Option { + self.inner.first().copied() + } + /// Checks whether the given physical addresses are contiguous with respect to ALIGN. + /// + /// Note: This is a temporary check to let this module work with our platform implementations + /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that + /// do not support non-contiguous physical page mapping with contiguous virtual addresses. + fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0]; + let second = window[1]; + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) + } +} +impl core::iter::Iterator for PhysPageArray { + type Item = usize; + fn next(&mut self) -> Option { + if self.inner.is_empty() { + None + } else { + Some(self.inner[0]) + } + } +} +impl core::ops::Deref for PhysPageArray { + type Target = [usize]; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +/// Data structure to maintain the mapping information returned by `vmap()`. +/// +/// `base` is the virtual address of the mapped region which is page aligned. +/// `size` is the size of the mapped region in bytes. +#[derive(Clone)] +pub struct PhysPageMapInfo { + pub base: *mut u8, + pub size: usize, +} + +bitflags::bitflags! { + /// Physical page map permissions which is a restricted version of + /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. + /// + /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED + /// permissions are explicitly prohibited. + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub struct PhysPageMapPermissions: u8 { + /// Readable + const READ = 1 << 0; + /// Writable + const WRITE = 1 << 1; + } +} +impl From for PhysPageMapPermissions { + fn from(perms: MemoryRegionPermissions) -> Self { + let mut phys_perms = PhysPageMapPermissions::empty(); + if perms.contains(MemoryRegionPermissions::READ) { + phys_perms |= PhysPageMapPermissions::READ; + } + if perms.contains(MemoryRegionPermissions::WRITE) { + phys_perms |= PhysPageMapPermissions::WRITE; + } + phys_perms + } +} +impl From for MemoryRegionPermissions { + fn from(perms: PhysPageMapPermissions) -> Self { + let mut mem_perms = MemoryRegionPermissions::empty(); + if perms.contains(PhysPageMapPermissions::READ) { + mem_perms |= MemoryRegionPermissions::READ; + } + if perms.contains(PhysPageMapPermissions::WRITE) { + mem_perms |= MemoryRegionPermissions::WRITE; + } + mem_perms + } +} + +/// Possible errors for physical pointer access with `VmapProvider` +#[non_exhaustive] +#[derive(Error, Debug)] +pub enum PhysPointerError { + #[error("Physical address {0:#x} is invalid to access")] + InvalidPhysicalAddress(usize), + #[error("Physical address {0:#x} is not aligned to {1} bytes")] + UnalignedPhysicalAddress(usize, usize), + #[error("Offset {0:#x} is not aligned to {1} bytes")] + UnalignedOffset(usize, usize), + #[error("Base offset {0:#x} is greater than or equal to alignment ({1} bytes)")] + InvalidBaseOffset(usize, usize), + #[error( + "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" + )] + InsufficientPhysicalPages(usize, usize), + #[error("Index {0} is out of bounds (count: {1})")] + IndexOutOfBounds(usize, usize), + #[error("Physical address {0:#x} is already mapped")] + AlreadyMapped(usize), + #[error("Physical address {0:#x} is unmapped")] + Unmapped(usize), + #[error("No mapping information available")] + NoMappingInfo, + #[error("Overflow occurred during calculation")] + Overflow, + #[error("Non-contiguous physical pages in the array")] + NonContiguousPages, + #[error("The operation is unsupported on this platform")] + UnsupportedOperation, +} diff --git a/litebox_common_optee/Cargo.toml b/litebox_common_optee/Cargo.toml index 5b88e7c9f..901997b43 100644 --- a/litebox_common_optee/Cargo.toml +++ b/litebox_common_optee/Cargo.toml @@ -9,6 +9,7 @@ litebox = { path = "../litebox/", version = "0.1.0" } litebox_common_linux = { path = "../litebox_common_linux/", version = "0.1.0" } modular-bitfield = { version = "0.12.0", default-features = false } num_enum = { version = "0.7.3", default-features = false } +thiserror = { version = "2.0.6", default-features = false } [lints] workspace = true diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index bc7657fd8..b95bd4ead 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -15,6 +15,9 @@ use std::time::Duration; use litebox::fs::OFlags; use litebox::platform::UnblockedOrTimedOut; use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions}; +use litebox::platform::vmap::{ + PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; @@ -2187,6 +2190,29 @@ impl litebox::platform::CrngProvider for LinuxUserland { } } +/// Dummy `VmapProvider`. +/// +/// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). +/// We might need to emulate these functions' behaviors using virtual addresses for development or +/// testing, or use a kernel module to provide this functionality (if needed). +impl VmapProvider for LinuxUserland { + type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result { + Err(PhysPointerError::UnsupportedOperation) + } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + fn validate(&self, _pa: usize) -> Result { + Err(PhysPointerError::UnsupportedOperation) + } +} + #[cfg(test)] mod tests { use core::sync::atomic::AtomicU32; diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index e824d92a1..06b43b287 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -16,6 +16,9 @@ use core::{ sync::atomic::{AtomicU32, AtomicU64}, }; use litebox::platform::page_mgmt::DeallocationError; +use litebox::platform::vmap::{ + PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; use litebox::platform::{ DebugLogProvider, IPInterfaceProvider, ImmediatelyWokenUp, PageManagementProvider, Punchthrough, RawMutexProvider, StdioProvider, TimeProvider, UnblockedOrTimedOut, @@ -760,6 +763,24 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< } } +impl VmapProvider for LinuxKernel { + type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result { + todo!("use map_vtl0_phys_range()") + } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + todo!("use unmap_vtl0_pages()") + } + fn validate(&self, _pa: usize) -> Result { + todo!("use vtl1_phys_frame_range to validate") + } +} + // NOTE: The below code is a naive workaround to let LVBS code to access the platform. // Rather than doing this, we should implement LVBS interface/provider for the platform. diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 0591f0f39..e0e4decba 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -1240,3 +1240,6 @@ mod test_utils { } } } + +pub type NormalWorldConstPtr = crate::ptr::PhysConstPtr; +pub type NormalWorldMutPtr = crate::ptr::PhysMutPtr; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 89533a1ac..5147c6286 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + //! OP-TEE's message passing is a bit complex because it involves with multiple actors //! (normal world: client app and driver; secure world: OP-TEE OS and TAs), //! consists multiple layers, and relies on shared memory references (i.e., no serialization). @@ -12,7 +15,7 @@ //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register //! shared memory). -use crate::ptr::NormalWorldConstPtr; +use crate::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 701c1f3bb..4a00ee9fd 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -1,7 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + //! Physical Pointer Abstraction with On-demand Mapping //! -//! This module implements types and traits to support accessing physical addresses -//! (e.g., VTL0 or normal-world physical memory) from LiteBox with on-demand mapping. +//! This module adds supports for accessing physical addresses (e.g., VTL0 or +//! normal-world physical memory) from LiteBox with on-demand mapping. //! In the context of LVBS and OP-TEE, accessing physical memory is necessary //! because VTL0 and VTL1 as well as normal world and secure world do not share //! the same virtual address space, but they still have to share data through memory. @@ -10,12 +13,10 @@ //! //! To simplify all these, we could persistently map the entire VTL0/normal-world //! physical memory into VTL1/secure-world address space at once and just access them -//! through corresponding virtual addresses. Also, we could define some APIs to let -//! LiteBox (shim) map/unmap arbitrary physical addresses (i.e., implementing and -//! exposing APIs like Linux kernel's `vmap()` and `vunmap()`). However, this module -//! does not take these approaches due to scalability (e.g., how to deal with a system -//! with terabytes of physical memory?) and security concerns (e.g., data corruption or -//! information leakage due to concurrent or persistent access). +//! through corresponding virtual addresses. However, this module does not take these +//! approaches due to scalability (e.g., how to deal with a system with terabytes of +//! physical memory?) and security concerns (e.g., data corruption or information +//! leakage due to concurrent or persistent access). //! //! Instead, the approach this module takes is to map the required physical memory //! region on-demand when accessing them while using a LiteBox-managed buffer to copy @@ -57,9 +58,14 @@ //! if this module always requires a list of physical addresses, the caller might //! provide a wrong list by mistake or intentionally). +// TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, +// we can move them to a different crate (e.g., `litebox`) if needed. + use core::ops::Deref; -use litebox::platform::page_mgmt::MemoryRegionPermissions; -use thiserror::Error; +use litebox::platform::vmap::{ + PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; +use litebox_platform_multiplex::{Platform, platform}; #[inline] fn align_down(address: usize, align: usize) -> usize { @@ -71,204 +77,25 @@ fn align_up(len: usize, align: usize) -> usize { len.next_multiple_of(align) } -/// Trait to validate that a physical pointer does not belong to LiteBox-managed memory -/// (including both kernel and userspace memory). -/// -/// This validation is mainly to deal with the Boomerang attack where a normal-world client -/// tricks the secure-world kernel (i.e., LiteBox) to access the secure-world memory. -/// However, even if there is no such threat (e.g., no normal/secure world separation), -/// this validation is still beneficial to ensure the memory safety by doing not access -/// LiteBox-managed memory without going through its memory allocator. -/// -/// Succeeding these operations does not guarantee that the physical pointer is valid to -/// access, just that it is outside of LiteBox-managed memory and won't be used to access -/// it as an unmanaged channel. -pub trait ValidateAccess { - /// Validate that the given physical pointer does not belong to LiteBox-managed memory. - /// - /// Here, we do not use `*const T` or `*mut T` because this is a physical pointer which - /// must not be dereferenced directly. - /// - /// Returns `Ok(pa)` if valid. If the pointer is not valid, returns `Err(PhysPointerError)`. - fn validate(pa: usize) -> Result; -} - -/// Data structure for an array of physical pages. These physical pages should be -/// virtually contiguous in the source address space. -#[derive(Clone)] -pub struct PhysPageArray { - inner: alloc::boxed::Box<[usize]>, -} -impl PhysPageArray { - /// Create a new `PhysPageArray` from the given slice of physical addresses. - /// - /// All page addresses must be aligned to `ALIGN`. - pub fn try_from_slice(addrs: &[usize]) -> Result { - for addr in addrs { - if !addr.is_multiple_of(ALIGN) { - return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); - } - } - // TODO: Remove this check once our platform implementations support virtually - // contiguous non-contiguous physical page mapping. - Self::check_contiguity(addrs)?; - Ok(Self { - inner: alloc::boxed::Box::from(addrs), - }) - } - /// Check if the array is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - /// Return the number of physical pages in the array. - pub fn len(&self) -> usize { - self.inner.len() - } - /// Return the first physical address in the array if exists. - pub fn first(&self) -> Option { - self.inner.first().copied() - } - /// Checks whether the given physical addresses are contiguous with respect to ALIGN. - /// - /// Note: This is a temporary check to let this module work with our platform implementations - /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that - /// do not support non-contiguous physical page mapping with contiguous virtual addresses. - fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { - for window in addrs.windows(2) { - let first = window[0]; - let second = window[1]; - if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { - return Err(PhysPointerError::NonContiguousPages); - } - } - Ok(()) - } -} -impl core::iter::Iterator for PhysPageArray { - type Item = usize; - fn next(&mut self) -> Option { - if self.inner.is_empty() { - None - } else { - Some(self.inner[0]) - } - } -} -impl core::ops::Deref for PhysPageArray { - type Target = [usize]; - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -/// Data structure to maintain the mapping information returned by `vmap()`. -/// `base` is the virtual address of the mapped region which is page aligned. -/// `size` is the size of the mapped region in bytes. -#[derive(Clone)] -pub struct PhysPageMapInfo { - pub base: *mut u8, - pub size: usize, -} - -bitflags::bitflags! { - /// Physical page map permissions which is a restricted version of - /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. - /// - /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED - /// permissions are explicitly prohibited. - #[derive(Clone, Copy, Debug, PartialEq, Eq)] - pub struct PhysPageMapPermissions: u8 { - /// Readable - const READ = 1 << 0; - /// Writable - const WRITE = 1 << 1; - } -} -impl From for PhysPageMapPermissions { - fn from(perms: MemoryRegionPermissions) -> Self { - let mut phys_perms = PhysPageMapPermissions::empty(); - if perms.contains(MemoryRegionPermissions::READ) { - phys_perms |= PhysPageMapPermissions::READ; - } - if perms.contains(MemoryRegionPermissions::WRITE) { - phys_perms |= PhysPageMapPermissions::WRITE; - } - phys_perms - } -} -impl From for MemoryRegionPermissions { - fn from(perms: PhysPageMapPermissions) -> Self { - let mut mem_perms = MemoryRegionPermissions::empty(); - if perms.contains(PhysPageMapPermissions::READ) { - mem_perms |= MemoryRegionPermissions::READ; - } - if perms.contains(PhysPageMapPermissions::WRITE) { - mem_perms |= MemoryRegionPermissions::WRITE; - } - mem_perms - } -} - -/// Trait to map and unmap physical pages into virtually contiguous address space. -/// -/// The implementation of this trait is platform-specific because it depends on how -/// the underlying platform manages page tables and memory regions. -pub trait PhysPageMapper { - /// Map the given [`PhysPageArray`] into virtually contiguous address space with the given - /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. - /// - /// This function is analogous to Linux kernel's `vmap()`. - /// - /// # Safety - /// - /// The caller should ensure that `pages` are not in active use by other entities. LiteBox - /// itself cannot fully guarantee this and it needs some helps from the caller, hypervisor, - /// or hardware. - /// Multiple LiteBox threads might concurrently call this function with overlapping physical - /// pages, so the implementation should safely handle such cases. - unsafe fn vmap( - pages: PhysPageArray, - perms: PhysPageMapPermissions, - ) -> Result, PhysPointerError>; - /// Unmap the previously mapped virtually contiguous address space ([`PhysPageMapInfo`]). - /// - /// This function is analogous to Linux kernel's `vunmap()`. - /// - /// # Safety - /// - /// The caller should ensure that the virtual addresses belonging to `vmap_info` are not in - /// active use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this - /// and it needs some helps from other parties. - /// Multiple LiteBox threads might concurrently call this function with overlapping physical - /// pages, so the implementation should safely handle such cases. - unsafe fn vunmap( - vmap_info: PhysPageMapInfo, - ) -> Result<(), PhysPointerError>; -} - /// Represent a physical pointer to an object with on-demand mapping. /// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in /// this array should be virtually contiguous. /// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. /// - `count`: The number of objects of type `T` that can be accessed from this pointer. +/// - `map_info`: The mapping information of the currently mapped physical pages, if any. /// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough /// memory for an object of type `T`. -/// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses #[derive(Clone)] #[repr(C)] -pub struct PhysMutPtr { +pub struct PhysMutPtr { pages: PhysPageArray, offset: usize, count: usize, map_info: Option>, _type: core::marker::PhantomData, - _mapper: core::marker::PhantomData, - _validator: core::marker::PhantomData, } -impl - PhysMutPtr -{ +impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. @@ -296,7 +123,7 @@ impl )); } for pa in pages.iter() { - V::validate::(*pa)?; + >::validate::(platform(), *pa)?; } Ok(Self { pages, @@ -304,8 +131,6 @@ impl count: size / core::mem::size_of::(), map_info: None, _type: core::marker::PhantomData, - _mapper: core::marker::PhantomData, - _validator: core::marker::PhantomData, }) } /// Create a new `PhysMutPtr` from the given contiguous physical address and length. @@ -327,7 +152,7 @@ impl let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; while current_page < end_page { - V::validate::(current_page)?; + >::validate::(platform(), current_page)?; pages.push(current_page); current_page += ALIGN; } @@ -592,7 +417,7 @@ impl if self.map_info.is_none() { let sub_pages = PhysPageArray::try_from_slice(&self.pages.deref()[start..end])?; unsafe { - self.map_info = Some(M::vmap(sub_pages, perms)?); + self.map_info = Some(platform().vmap(sub_pages, perms)?); } Ok(()) } else { @@ -610,7 +435,7 @@ impl unsafe fn unmap(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { - M::vunmap(map_info)?; + platform().vunmap(map_info)?; } self.map_info = None; Ok(()) @@ -620,7 +445,7 @@ impl } } -impl core::fmt::Debug for PhysMutPtr { +impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") .field("pages[0]", &self.pages.first().unwrap_or(0)) @@ -633,12 +458,10 @@ impl core::fmt::Debug for PhysMutPtr { - inner: PhysMutPtr, +pub struct PhysConstPtr { + inner: PhysMutPtr, } -impl - PhysConstPtr -{ +impl PhysConstPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. @@ -699,7 +522,7 @@ impl } } -impl core::fmt::Debug for PhysConstPtr { +impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") .field("pages[0]", &self.inner.pages.first().unwrap_or(0)) @@ -707,70 +530,3 @@ impl core::fmt::Debug for PhysConstPtr(pa: usize) -> Result { - Ok(pa) - } -} - -/// This is a mock implementation that does no actual mapping. Each platform which supports -/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. -pub struct MockPhysMemoryMapper; -impl PhysPageMapper for MockPhysMemoryMapper { - unsafe fn vmap( - _pages: PhysPageArray, - _perms: PhysPageMapPermissions, - ) -> Result, PhysPointerError> { - Ok(PhysPageMapInfo { - base: core::ptr::null_mut(), - size: 0, - }) - } - unsafe fn vunmap( - _vmap_info: PhysPageMapInfo, - ) -> Result<(), PhysPointerError> { - Ok(()) - } -} - -/// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. -pub type NormalWorldConstPtr = - PhysConstPtr; - -/// Normal world mutable pointer type using MockPhysMemoryMapper for testing purposes. -pub type NormalWorldMutPtr = - PhysMutPtr; From 52326a14cf4d7abd5df5913ba9ddbcf7fadf55d7 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 24 Dec 2025 03:59:35 +0000 Subject: [PATCH 28/45] addressed comments --- litebox/src/platform/vmap.rs | 20 +++++++-- litebox_platform_linux_userland/src/lib.rs | 4 ++ litebox_platform_lvbs/src/lib.rs | 4 ++ litebox_shim_optee/src/ptr.rs | 50 +++++++++++++++------- 4 files changed, 59 insertions(+), 19 deletions(-) diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index 029a4292f..adc55ba23 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -14,8 +14,10 @@ use thiserror::Error; pub trait VmapProvider { /// Data structure for an array of physical pages which are virtually contiguous. type PhysPageArray; + /// Data structure to maintain the mapping information returned by `vmap()`. type PhysPageMapInfo; + /// Map the given [`PhysPageArray`] into virtually contiguous addresses with the given /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function /// expects that it can access and update the page table using `&self`. @@ -24,7 +26,8 @@ pub trait VmapProvider { /// /// # Safety /// - /// The caller should ensure that `pages` are not in active use by other entities. + /// The caller should ensure that `pages` are not in active use by other entities + /// (especially, there should be no read/write or write/write conflicts). /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps /// from the caller, hypervisor, or hardware. /// Multiple LiteBox threads might concurrently call this function (and `vunmap()`) with @@ -34,6 +37,7 @@ pub trait VmapProvider { pages: Self::PhysPageArray, perms: PhysPageMapPermissions, ) -> Result; + /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). /// Use `&self` to access and update the page table. /// @@ -47,12 +51,13 @@ pub trait VmapProvider { /// Multiple LiteBox threads might concurrently call this function (and `vmap()`) with /// overlapping physical pages, so the implementation should safely handle such cases. unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; + /// Validate that the given physical address (with type) does not belong to LiteBox-managed /// memory. Use `&self` to get the memory layout of the platform (i.e., the physical memory /// range assigned to LiteBox). /// /// This function does not use `*const T` or `*mut T` because it deals with a physical address - /// which must not be dereferenced directly. + /// which should not be dereferenced directly. /// /// Returns `Ok(pa)` if valid. If the address is not valid, returns `Err(PhysPointerError)`. fn validate(&self, pa: usize) -> Result; @@ -63,10 +68,11 @@ pub trait VmapProvider { pub struct PhysPageArray { inner: alloc::boxed::Box<[usize]>, } + impl PhysPageArray { /// Create a new `PhysPageArray` from the given slice of physical addresses. /// - /// All page addresses must be aligned to `ALIGN`. + /// All page addresses should be aligned to `ALIGN`. pub fn try_from_slice(addrs: &[usize]) -> Result { for addr in addrs { if !addr.is_multiple_of(ALIGN) { @@ -80,18 +86,22 @@ impl PhysPageArray { inner: alloc::boxed::Box::from(addrs), }) } + /// Check if the array is empty. pub fn is_empty(&self) -> bool { self.inner.is_empty() } + /// Return the number of physical pages in the array. pub fn len(&self) -> usize { self.inner.len() } + /// Return the first physical address in the array if exists. pub fn first(&self) -> Option { self.inner.first().copied() } + /// Checks whether the given physical addresses are contiguous with respect to ALIGN. /// /// Note: This is a temporary check to let this module work with our platform implementations @@ -108,6 +118,7 @@ impl PhysPageArray { Ok(()) } } + impl core::iter::Iterator for PhysPageArray { type Item = usize; fn next(&mut self) -> Option { @@ -118,6 +129,7 @@ impl core::iter::Iterator for PhysPageArray { } } } + impl core::ops::Deref for PhysPageArray { type Target = [usize]; fn deref(&self) -> &Self::Target { @@ -149,6 +161,7 @@ bitflags::bitflags! { const WRITE = 1 << 1; } } + impl From for PhysPageMapPermissions { fn from(perms: MemoryRegionPermissions) -> Self { let mut phys_perms = PhysPageMapPermissions::empty(); @@ -161,6 +174,7 @@ impl From for PhysPageMapPermissions { phys_perms } } + impl From for MemoryRegionPermissions { fn from(perms: PhysPageMapPermissions) -> Self { let mut mem_perms = MemoryRegionPermissions::empty(); diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index b95bd4ead..7308bace2 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -2197,7 +2197,9 @@ impl litebox::platform::CrngProvider for LinuxUserland { /// testing, or use a kernel module to provide this functionality (if needed). impl VmapProvider for LinuxUserland { type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( &self, _pages: Self::PhysPageArray, @@ -2205,9 +2207,11 @@ impl VmapProvider for LinuxUserland { ) -> Result { Err(PhysPointerError::UnsupportedOperation) } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } + fn validate(&self, _pa: usize) -> Result { Err(PhysPointerError::UnsupportedOperation) } diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 06b43b287..f8d7a8627 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -765,7 +765,9 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< impl VmapProvider for LinuxKernel { type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( &self, _pages: Self::PhysPageArray, @@ -773,9 +775,11 @@ impl VmapProvider for LinuxKerne ) -> Result { todo!("use map_vtl0_phys_range()") } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { todo!("use unmap_vtl0_pages()") } + fn validate(&self, _pa: usize) -> Result { todo!("use vtl1_phys_frame_range to validate") } diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 4a00ee9fd..0f97e1c15 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -98,8 +98,10 @@ pub struct PhysMutPtr { impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// - /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. - /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller + /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of + /// type `T` starting from `offset`. If these conditions are not met, this function returns + /// `Err(PhysPointerError)`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -133,10 +135,12 @@ impl PhysMutPtr { _type: core::marker::PhantomData, }) } + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. - /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysMutPtr` may read/write incorrect data. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( @@ -158,6 +162,7 @@ impl PhysMutPtr { } Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) } + /// Create a new `PhysMutPtr` from the given physical address for a single object. /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. @@ -166,13 +171,14 @@ impl PhysMutPtr { pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) } + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). That it, it might read corrupt data. + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_at_offset( &mut self, count: usize, @@ -225,11 +231,12 @@ impl PhysMutPtr { } Ok(alloc::boxed::Box::new(val)) } + /// Read a slice of values at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( @@ -284,11 +291,12 @@ impl PhysMutPtr { } Ok(()) } + /// Write the value at the given offset to the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently writtenby /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_at_offset( @@ -336,11 +344,12 @@ impl PhysMutPtr { } Ok(()) } + /// Write a slice of values at the given offset to the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_slice_at_offset( @@ -399,6 +408,7 @@ impl PhysMutPtr { } Ok(()) } + /// Map the physical pages from `start` to `end` indexes. /// /// # Safety @@ -426,6 +436,7 @@ impl PhysMutPtr { )) } } + /// Unmap the physical pages if mapped. /// /// # Safety @@ -462,10 +473,12 @@ pub struct PhysConstPtr { inner: PhysMutPtr, } impl PhysConstPtr { - /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// Create a new `PhysConstPtr` from the given physical page array and offset. /// - /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. - /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller + /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of + /// type `T` starting from `offset`. If these conditions are not met, this function returns + /// `Err(PhysPointerError)`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -474,16 +487,19 @@ impl PhysConstPtr { inner: PhysMutPtr::try_from_page_array(pages, offset)?, }) } - /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. /// /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. - /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysConstPtr` may read incorrect data. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, }) } - /// Create a new `PhysMutPtr` from the given physical address for a single object. + + /// Create a new `PhysConstPtr` from the given physical address for a single object. /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. /// @@ -493,11 +509,12 @@ impl PhysConstPtr { inner: PhysMutPtr::try_from_usize(pa)?, }) } + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_at_offset( @@ -506,11 +523,12 @@ impl PhysConstPtr { ) -> Result, PhysPointerError> { unsafe { self.inner.read_at_offset(count) } } + /// Read a slice of values at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( From d06ca131c3f2f640c9db6e40a58260b6a2be775c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 24 Dec 2025 05:29:11 +0000 Subject: [PATCH 29/45] impl Drop for PhysPtrs --- litebox_shim_optee/src/ptr.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 0f97e1c15..2ad25c694 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -87,7 +87,7 @@ fn align_up(len: usize, align: usize) -> usize { /// memory for an object of type `T`. #[derive(Clone)] #[repr(C)] -pub struct PhysMutPtr { +pub struct PhysMutPtr { pages: PhysPageArray, offset: usize, count: usize, @@ -456,6 +456,12 @@ impl PhysMutPtr { } } +impl Drop for PhysMutPtr { + fn drop(&mut self) { + let _ = unsafe { self.unmap() }; + } +} + impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") @@ -469,9 +475,10 @@ impl core::fmt::Debug for PhysMutPtr { /// exposes only read access. #[derive(Clone)] #[repr(C)] -pub struct PhysConstPtr { +pub struct PhysConstPtr { inner: PhysMutPtr, } + impl PhysConstPtr { /// Create a new `PhysConstPtr` from the given physical page array and offset. /// @@ -540,6 +547,12 @@ impl PhysConstPtr { } } +impl Drop for PhysConstPtr { + fn drop(&mut self) { + let _ = unsafe { self.inner.unmap() }; + } +} + impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") From b41826834d12f4b3e618c2e26f88d63abe49f6bb Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 24 Dec 2025 15:44:04 +0000 Subject: [PATCH 30/45] VmapProvider validate and protect --- litebox/src/platform/vmap.rs | 31 +++++++++++++++++----- litebox_platform_linux_userland/src/lib.rs | 10 ++++++- litebox_platform_lvbs/src/lib.rs | 10 ++++++- litebox_shim_optee/src/ptr.rs | 5 +--- 4 files changed, 44 insertions(+), 12 deletions(-) diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index adc55ba23..fe14496cb 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -52,15 +52,34 @@ pub trait VmapProvider { /// overlapping physical pages, so the implementation should safely handle such cases. unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; - /// Validate that the given physical address (with type) does not belong to LiteBox-managed - /// memory. Use `&self` to get the memory layout of the platform (i.e., the physical memory + /// Validate that the given physical pages do not belong to LiteBox-managed memory. + /// Use `&self` to get the memory layout of the platform (i.e., the physical memory /// range assigned to LiteBox). /// - /// This function does not use `*const T` or `*mut T` because it deals with a physical address - /// which should not be dereferenced directly. + /// This function is a no-op if there is no other world or VM sharing the physical memory. /// - /// Returns `Ok(pa)` if valid. If the address is not valid, returns `Err(PhysPointerError)`. - fn validate(&self, pa: usize) -> Result; + /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. + fn validate(&self, pages: Self::PhysPageArray) -> Result<(), PhysPointerError>; + + /// Protect the given physical pages to ensure concurrent read or exclusive write access. + /// Read protection prevents others from modifying the pages. Read/write protection prevents + /// others from accessing the pages. + /// This can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// + /// This function is a no-op if there is no other world or VM sharing the physical memory. + /// + /// Returns `Ok(())` if it successfully protects the pages. If it fails, returns + /// `Err(PhysPointerError)`. + /// + /// # Safety + /// + /// Since this function is expected to use hypercalls or other privileged hardware features, + /// the caller must ensure that it is safe to perform such operations at the time of the call. + unsafe fn protect( + &self, + pages: Self::PhysPageArray, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError>; } /// Data structure for an array of physical pages. These physical pages should be virtually contiguous. diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 7308bace2..28784bb89 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -2212,7 +2212,15 @@ impl VmapProvider for LinuxUserland { Err(PhysPointerError::UnsupportedOperation) } - fn validate(&self, _pa: usize) -> Result { + fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + + unsafe fn protect( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } } diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index f8d7a8627..7c7e7e2a4 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -780,9 +780,17 @@ impl VmapProvider for LinuxKerne todo!("use unmap_vtl0_pages()") } - fn validate(&self, _pa: usize) -> Result { + fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { todo!("use vtl1_phys_frame_range to validate") } + + unsafe fn protect( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + todo!("use hypercall to protect/unprotect physical pages") + } } // NOTE: The below code is a naive workaround to let LVBS code to access the platform. diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 2ad25c694..32ea9f8de 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -124,9 +124,7 @@ impl PhysMutPtr { core::mem::size_of::(), )); } - for pa in pages.iter() { - >::validate::(platform(), *pa)?; - } + >::validate(platform(), pages.clone())?; Ok(Self { pages, offset, @@ -156,7 +154,6 @@ impl PhysMutPtr { let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; while current_page < end_page { - >::validate::(platform(), current_page)?; pages.push(current_page); current_page += ALIGN; } From 378097a8e4a219b23e49f57ea68ad723719f4ca6 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 26 Dec 2025 18:39:09 +0000 Subject: [PATCH 31/45] use existing NonZeroAddress --- litebox/src/mm/linux.rs | 1 + litebox/src/platform/vmap.rs | 96 ++++------------------ litebox_platform_linux_userland/src/lib.rs | 10 +-- litebox_platform_lvbs/src/lib.rs | 31 +++++-- litebox_shim_optee/src/ptr.rs | 49 +++++++---- 5 files changed, 81 insertions(+), 106 deletions(-) diff --git a/litebox/src/mm/linux.rs b/litebox/src/mm/linux.rs index 188e9a3b6..aa1348f30 100644 --- a/litebox/src/mm/linux.rs +++ b/litebox/src/mm/linux.rs @@ -216,6 +216,7 @@ impl core::ops::Add for NonZeroPageSize { } /// A non-zero address that is `ALIGN`-aligned. +#[derive(Clone, Copy)] pub struct NonZeroAddress(usize); impl NonZeroAddress { diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index fe14496cb..b152508fb 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -12,13 +12,13 @@ use thiserror::Error; /// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need /// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). pub trait VmapProvider { - /// Data structure for an array of physical pages which are virtually contiguous. - type PhysPageArray; + /// Data structure for an array of physical page addresses which are virtually contiguous. + type PhysPageAddrArray; /// Data structure to maintain the mapping information returned by `vmap()`. type PhysPageMapInfo; - /// Map the given [`PhysPageArray`] into virtually contiguous addresses with the given + /// Map the given `PhysPageAddrArray` into virtually contiguous addresses with the given /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function /// expects that it can access and update the page table using `&self`. /// @@ -34,7 +34,7 @@ pub trait VmapProvider { /// overlapping physical pages, so the implementation should safely handle such cases. unsafe fn vmap( &self, - pages: Self::PhysPageArray, + pages: Self::PhysPageAddrArray, perms: PhysPageMapPermissions, ) -> Result; @@ -59,7 +59,7 @@ pub trait VmapProvider { /// This function is a no-op if there is no other world or VM sharing the physical memory. /// /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. - fn validate(&self, pages: Self::PhysPageArray) -> Result<(), PhysPointerError>; + fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError>; /// Protect the given physical pages to ensure concurrent read or exclusive write access. /// Read protection prevents others from modifying the pages. Read/write protection prevents @@ -77,84 +77,18 @@ pub trait VmapProvider { /// the caller must ensure that it is safe to perform such operations at the time of the call. unsafe fn protect( &self, - pages: Self::PhysPageArray, + pages: Self::PhysPageAddrArray, perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError>; } -/// Data structure for an array of physical pages. These physical pages should be virtually contiguous. -#[derive(Clone)] -pub struct PhysPageArray { - inner: alloc::boxed::Box<[usize]>, -} - -impl PhysPageArray { - /// Create a new `PhysPageArray` from the given slice of physical addresses. - /// - /// All page addresses should be aligned to `ALIGN`. - pub fn try_from_slice(addrs: &[usize]) -> Result { - for addr in addrs { - if !addr.is_multiple_of(ALIGN) { - return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); - } - } - // TODO: Remove this check once our platform implementations support virtually - // contiguous non-contiguous physical page mapping. - Self::check_contiguity(addrs)?; - Ok(Self { - inner: alloc::boxed::Box::from(addrs), - }) - } - - /// Check if the array is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Return the number of physical pages in the array. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Return the first physical address in the array if exists. - pub fn first(&self) -> Option { - self.inner.first().copied() - } - - /// Checks whether the given physical addresses are contiguous with respect to ALIGN. - /// - /// Note: This is a temporary check to let this module work with our platform implementations - /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that - /// do not support non-contiguous physical page mapping with contiguous virtual addresses. - fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { - for window in addrs.windows(2) { - let first = window[0]; - let second = window[1]; - if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { - return Err(PhysPointerError::NonContiguousPages); - } - } - Ok(()) - } -} - -impl core::iter::Iterator for PhysPageArray { - type Item = usize; - fn next(&mut self) -> Option { - if self.inner.is_empty() { - None - } else { - Some(self.inner[0]) - } - } -} - -impl core::ops::Deref for PhysPageArray { - type Target = [usize]; - fn deref(&self) -> &Self::Target { - &self.inner - } -} +/// Data structure representing a physical address with page alignment. +/// +/// Currently, this is an alias to `crate::mm::linux::NonZeroAddress`. This might change if +/// we selectively conduct sanity checks based on whether an address is virtual or physical +/// (e.g., whether a virtual address is canonical, whether a physical address is tagged with +/// a valid key ID, etc.). +pub type PhysPageAddr = crate::mm::linux::NonZeroAddress; /// Data structure to maintain the mapping information returned by `vmap()`. /// @@ -172,12 +106,14 @@ bitflags::bitflags! { /// /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED /// permissions are explicitly prohibited. + #[non_exhaustive] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct PhysPageMapPermissions: u8 { /// Readable const READ = 1 << 0; /// Writable const WRITE = 1 << 1; + const _ = !0; } } @@ -237,4 +173,6 @@ pub enum PhysPointerError { NonContiguousPages, #[error("The operation is unsupported on this platform")] UnsupportedOperation, + #[error("Unsupported permissions: {0:#x}")] + UnsupportedPermissions(u8), } diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 28784bb89..48e930f54 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -16,7 +16,7 @@ use litebox::fs::OFlags; use litebox::platform::UnblockedOrTimedOut; use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions}; use litebox::platform::vmap::{ - PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; @@ -2196,13 +2196,13 @@ impl litebox::platform::CrngProvider for LinuxUserland { /// We might need to emulate these functions' behaviors using virtual addresses for development or /// testing, or use a kernel module to provide this functionality (if needed). impl VmapProvider for LinuxUserland { - type PhysPageArray = PhysPageArray; + type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; type PhysPageMapInfo = PhysPageMapInfo; unsafe fn vmap( &self, - _pages: Self::PhysPageArray, + _pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result { Err(PhysPointerError::UnsupportedOperation) @@ -2212,13 +2212,13 @@ impl VmapProvider for LinuxUserland { Err(PhysPointerError::UnsupportedOperation) } - fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { + fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } unsafe fn protect( &self, - _pages: Self::PhysPageArray, + _pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 7c7e7e2a4..aaa676d8d 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -17,7 +17,7 @@ use core::{ }; use litebox::platform::page_mgmt::DeallocationError; use litebox::platform::vmap::{ - PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox::platform::{ DebugLogProvider, IPInterfaceProvider, ImmediatelyWokenUp, PageManagementProvider, @@ -763,16 +763,37 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< } } +/// Checks whether the given physical addresses are contiguous with respect to ALIGN. +/// +/// Note: This is a temporary check to let `VmapProvider` work with this platform +/// which maps physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that +/// does not support non-contiguous physical page mapping with contiguous virtual addresses. +fn check_contiguity( + addrs: &[PhysPageAddr], +) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0].as_usize(); + let second = window[1].as_usize(); + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) +} + impl VmapProvider for LinuxKernel { - type PhysPageArray = PhysPageArray; + type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; type PhysPageMapInfo = PhysPageMapInfo; unsafe fn vmap( &self, - _pages: Self::PhysPageArray, + pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result { + // TODO: Remove this check once this platform supports virtually contiguous + // non-contiguous physical page mapping. + check_contiguity(&pages)?; todo!("use map_vtl0_phys_range()") } @@ -780,13 +801,13 @@ impl VmapProvider for LinuxKerne todo!("use unmap_vtl0_pages()") } - fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { + fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { todo!("use vtl1_phys_frame_range to validate") } unsafe fn protect( &self, - _pages: Self::PhysPageArray, + _pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError> { todo!("use hypercall to protect/unprotect physical pages") diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 32ea9f8de..3f3007f69 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -61,9 +61,8 @@ // TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, // we can move them to a different crate (e.g., `litebox`) if needed. -use core::ops::Deref; use litebox::platform::vmap::{ - PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox_platform_multiplex::{Platform, platform}; @@ -78,8 +77,8 @@ fn align_up(len: usize, align: usize) -> usize { } /// Represent a physical pointer to an object with on-demand mapping. -/// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in -/// this array should be virtually contiguous. +/// - `pages`: An array of page-aligned physical addresses. Physical addresses in this array should be +/// virtually contiguous. /// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. /// - `count`: The number of objects of type `T` that can be accessed from this pointer. /// - `map_info`: The mapping information of the currently mapped physical pages, if any. @@ -88,7 +87,7 @@ fn align_up(len: usize, align: usize) -> usize { #[derive(Clone)] #[repr(C)] pub struct PhysMutPtr { - pages: PhysPageArray, + pages: alloc::boxed::Box<[PhysPageAddr]>, offset: usize, count: usize, map_info: Option>, @@ -103,7 +102,7 @@ impl PhysMutPtr { /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. pub fn try_from_page_array( - pages: PhysPageArray, + pages: &[PhysPageAddr], offset: usize, ) -> Result { if offset >= ALIGN { @@ -124,9 +123,9 @@ impl PhysMutPtr { core::mem::size_of::(), )); } - >::validate(platform(), pages.clone())?; + >::validate(platform(), pages.into())?; Ok(Self { - pages, + pages: pages.into(), offset, count: size / core::mem::size_of::(), map_info: None, @@ -154,10 +153,13 @@ impl PhysMutPtr { let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; while current_page < end_page { - pages.push(current_page); + pages.push( + PhysPageAddr::::new(current_page) + .ok_or(PhysPointerError::InvalidPhysicalAddress(current_page))?, + ); current_page += ALIGN; } - Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) + Self::try_from_page_array(&pages, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. @@ -421,15 +423,23 @@ impl PhysMutPtr { if start >= end || end > self.pages.len() { return Err(PhysPointerError::IndexOutOfBounds(end, self.pages.len())); } + let accept_perms = PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE; + if perms.bits() & !accept_perms.bits() != 0 { + return Err(PhysPointerError::UnsupportedPermissions(perms.bits())); + } if self.map_info.is_none() { - let sub_pages = PhysPageArray::try_from_slice(&self.pages.deref()[start..end])?; + let sub_pages = &self.pages[start..end]; unsafe { - self.map_info = Some(platform().vmap(sub_pages, perms)?); + self.map_info = Some(>::vmap( + platform(), + sub_pages.into(), + perms, + )?); } Ok(()) } else { Err(PhysPointerError::AlreadyMapped( - self.pages.first().unwrap_or(0), + self.pages.first().map_or(0, |p| p.as_usize()), )) } } @@ -448,7 +458,9 @@ impl PhysMutPtr { self.map_info = None; Ok(()) } else { - Err(PhysPointerError::Unmapped(self.pages.first().unwrap_or(0))) + Err(PhysPointerError::Unmapped( + self.pages.first().map_or(0, |p| p.as_usize()), + )) } } } @@ -462,7 +474,7 @@ impl Drop for PhysMutPtr { impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") - .field("pages[0]", &self.pages.first().unwrap_or(0)) + .field("pages[0]", &self.pages.first().map_or(0, |p| p.as_usize())) .field("offset", &self.offset) .finish_non_exhaustive() } @@ -484,7 +496,7 @@ impl PhysConstPtr { /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. pub fn try_from_page_array( - pages: PhysPageArray, + pages: &[PhysPageAddr], offset: usize, ) -> Result { Ok(Self { @@ -553,7 +565,10 @@ impl Drop for PhysConstPtr { impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") - .field("pages[0]", &self.inner.pages.first().unwrap_or(0)) + .field( + "pages[0]", + &self.inner.pages.first().map_or(0, |p| p.as_usize()), + ) .field("offset", &self.inner.offset) .finish_non_exhaustive() } From 04fa48dc44d37963c695f5d5343f78adfa1fe10b Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 29 Dec 2025 17:54:12 +0000 Subject: [PATCH 32/45] rename --- litebox_shim_optee/src/msg_handler.rs | 7 +- litebox_shim_optee/src/ptr.rs | 111 +++++++++++++------------- 2 files changed, 58 insertions(+), 60 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 5147c6286..27027113f 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -75,9 +75,8 @@ pub fn handle_optee_smc_args( | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let mut ptr = - NormalWorldConstPtr::::try_from_usize(msg_arg_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; Ok(( OpteeSmcResult::Generic { @@ -281,7 +280,7 @@ impl ShmRefMap { let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { let mut cur_ptr = - NormalWorldConstPtr::::try_from_usize(cur_addr) + NormalWorldConstPtr::::with_usize(cur_addr) .map_err(|_| OpteeSmcReturn::EBadAddr)?; let pages_data = unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 3f3007f69..a2fe3f9cf 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -64,7 +64,7 @@ use litebox::platform::vmap::{ PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; -use litebox_platform_multiplex::{Platform, platform}; +use litebox_platform_multiplex::platform; #[inline] fn align_down(address: usize, align: usize) -> usize { @@ -101,10 +101,7 @@ impl PhysMutPtr { /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. - pub fn try_from_page_array( - pages: &[PhysPageAddr], - offset: usize, - ) -> Result { + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { if offset >= ALIGN { return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); } @@ -123,7 +120,7 @@ impl PhysMutPtr { core::mem::size_of::(), )); } - >::validate(platform(), pages.into())?; + platform().validate(pages.into())?; Ok(Self { pages: pages.into(), offset, @@ -135,10 +132,11 @@ impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// - /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// This is a shortcut for + /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, /// later accesses through `PhysMutPtr` may read/write incorrect data. - pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( bytes, @@ -159,16 +157,16 @@ impl PhysMutPtr { ); current_page += ALIGN; } - Self::try_from_page_array(&pages, pa - start_page) + Self::new(&pages, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. /// - /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// This is a shortcut for `PhysMutPtr::with_contiguous_pages(pa, size_of::())`. /// /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. - pub fn try_from_usize(pa: usize) -> Result { - Self::try_from_contiguous_pages(pa, core::mem::size_of::()) + pub fn with_usize(pa: usize) -> Result { + Self::with_contiguous_pages(pa, core::mem::size_of::()) } /// Read the value at the given offset from the physical pointer. @@ -199,25 +197,23 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(src) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); + let src = src.wrapping_add(count); let val = { let mut buffer = core::mem::MaybeUninit::::uninit(); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { + if (src as usize).is_multiple_of(core::mem::align_of::()) { unsafe { - core::ptr::copy_nonoverlapping(addr, buffer.as_mut_ptr(), 1); + core::ptr::copy_nonoverlapping(src, buffer.as_mut_ptr(), 1); } } else { unsafe { core::ptr::copy_nonoverlapping( - addr.cast::(), + src.cast::(), buffer.as_mut_ptr().cast::(), core::mem::size_of::(), ); @@ -263,23 +259,21 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(src) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { + let src = src.wrapping_add(count); + if (src as usize).is_multiple_of(core::mem::align_of::()) { unsafe { - core::ptr::copy_nonoverlapping(addr, values.as_mut_ptr(), values.len()); + core::ptr::copy_nonoverlapping(src, values.as_mut_ptr(), values.len()); } } else { unsafe { core::ptr::copy_nonoverlapping( - addr.cast::(), + src.cast::(), values.as_mut_ptr().cast::(), core::mem::size_of_val(values), ); @@ -324,19 +318,17 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(dst) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { - unsafe { core::ptr::write(addr, value) }; + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { core::ptr::write(dst, value) }; } else { - unsafe { core::ptr::write_unaligned(addr, value) }; + unsafe { core::ptr::write_unaligned(dst, value) }; } unsafe { self.unmap()?; @@ -380,24 +372,22 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(dst) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { unsafe { - core::ptr::copy_nonoverlapping(values.as_ptr(), addr, values.len()); + core::ptr::copy_nonoverlapping(values.as_ptr(), dst, values.len()); } } else { unsafe { core::ptr::copy_nonoverlapping( values.as_ptr().cast::(), - addr.cast::(), + dst.cast::(), core::mem::size_of_val(values), ); } @@ -430,11 +420,9 @@ impl PhysMutPtr { if self.map_info.is_none() { let sub_pages = &self.pages[start..end]; unsafe { - self.map_info = Some(>::vmap( - platform(), - sub_pages.into(), - perms, - )?); + platform().vmap(sub_pages.into(), perms).map(|info| { + self.map_info = Some(info); + })?; } Ok(()) } else { @@ -463,6 +451,19 @@ impl PhysMutPtr { )) } } + + /// Get the base virtual pointer if mapped. + /// + /// # Safety + /// + /// This function performs pointer arithmetic on the mapped base pointer. + #[inline] + unsafe fn base_ptr(&self) -> Option<*mut T> { + let Some(map_info) = &self.map_info else { + return None; + }; + Some(unsafe { map_info.base.add(self.offset) }.cast::()) + } } impl Drop for PhysMutPtr { @@ -495,34 +496,32 @@ impl PhysConstPtr { /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. - pub fn try_from_page_array( - pages: &[PhysPageAddr], - offset: usize, - ) -> Result { + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { Ok(Self { - inner: PhysMutPtr::try_from_page_array(pages, offset)?, + inner: PhysMutPtr::new(pages, offset)?, }) } /// Create a new `PhysConstPtr` from the given contiguous physical address and length. /// - /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// This is a shortcut for + /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, /// later accesses through `PhysConstPtr` may read incorrect data. - pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { - inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, + inner: PhysMutPtr::with_contiguous_pages(pa, bytes)?, }) } /// Create a new `PhysConstPtr` from the given physical address for a single object. /// - /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// This is a shortcut for `PhysConstPtr::with_contiguous_pages(pa, size_of::())`. /// /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. - pub fn try_from_usize(pa: usize) -> Result { + pub fn with_usize(pa: usize) -> Result { Ok(Self { - inner: PhysMutPtr::try_from_usize(pa)?, + inner: PhysMutPtr::with_usize(pa)?, }) } From c0a103056c86de03b1394d5730aac4736b2c36ce Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 29 Dec 2025 22:38:56 +0000 Subject: [PATCH 33/45] use PhysPageAddr for ShmRefMap --- litebox_shim_optee/src/msg_handler.rs | 56 ++++++++++++++++----------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 27027113f..8d3a34529 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -19,6 +19,7 @@ use crate::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; +use litebox::platform::vmap::PhysPageAddr; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, @@ -202,19 +203,14 @@ pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result, - pub page_offset: u64, -} - /// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to /// share with the secure world (VTL1). Each [`ShmRefPagesData`] occupies one memory page /// where `pages_list` contains a list of physical page addresses and `next_page_data` /// contains the physical address of the next [`ShmRefPagesData`] if any. Entries of `pages_list` /// and `next_page_data` contain zero if the list ends. These physical page addresses are /// virtually contiguous in the normal world. All these address values must be page aligned. +/// +/// `pages_data` from [Linux](https://elixir.bootlin.com/linux/v6.18.2/source/drivers/tee/optee/smc_abi.c#L409) #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { @@ -226,22 +222,33 @@ impl ShmRefPagesData { PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); } +/// Data structure to maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// `pages` contains an array of physical page addresses. +/// `page_offset` indicates the page offset of the first page (i.e., `pages[0]`) which should be +/// smaller than `ALIGN`. +#[expect(unused)] +#[derive(Clone)] +struct ShmRefInfo { + pub pages: Box<[PhysPageAddr]>, + pub page_offset: usize, +} + /// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. /// This data structure is for registering shared memory regions before they are /// used during OP-TEE calls with parameters referencing shared memory. /// Any normal memory references without this registration will be rejected. -struct ShmRefMap { - inner: spin::mutex::SpinMutex>, +struct ShmRefMap { + inner: spin::mutex::SpinMutex>>, } -impl ShmRefMap { +impl ShmRefMap { pub fn new() -> Self { Self { inner: spin::mutex::SpinMutex::new(HashMap::new()), } } - pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { let mut guard = self.inner.lock(); if guard.contains_key(&shm_ref) { Err(OpteeSmcReturn::ENotAvail) @@ -251,13 +258,13 @@ impl ShmRefMap { } } - pub fn remove(&self, shm_ref: u64) -> Option { + pub fn remove(&self, shm_ref: u64) -> Option> { let mut guard = self.inner.lock(); guard.remove(&shm_ref) } #[expect(unused)] - pub fn get(&self, shm_ref: u64) -> Option { + pub fn get(&self, shm_ref: u64) -> Option> { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() } @@ -275,22 +282,25 @@ impl ShmRefMap { aligned_size: u64, shm_ref: u64, ) -> Result<(), OpteeSmcReturn> { - let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; + if page_offset >= ALIGN as u64 || aligned_size == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } + let num_pages = usize::try_from(aligned_size).unwrap() / ALIGN; let mut pages = Vec::with_capacity(num_pages); let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { - let mut cur_ptr = - NormalWorldConstPtr::::with_usize(cur_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut cur_ptr = NormalWorldConstPtr::::with_usize(cur_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; let pages_data = unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; - } else if !page.is_multiple_of(u64::try_from(PAGE_SIZE).unwrap()) { - return Err(OpteeSmcReturn::EBadAddr); } else { - pages.push(*page); + pages.push( + PhysPageAddr::new(usize::try_from(*page).unwrap()) + .ok_or(OpteeSmcReturn::EBadAddr)?, + ); } } if pages_data.next_page_data == 0 || pages.len() == num_pages { @@ -304,14 +314,14 @@ impl ShmRefMap { shm_ref, ShmRefInfo { pages: pages.into_boxed_slice(), - page_offset, + page_offset: usize::try_from(page_offset).unwrap(), }, )?; Ok(()) } } -fn shm_ref_map() -> &'static ShmRefMap { - static SHM_REF_MAP: OnceBox = OnceBox::new(); +fn shm_ref_map() -> &'static ShmRefMap { + static SHM_REF_MAP: OnceBox> = OnceBox::new(); SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } From 2fbb19d44ddadb466e78174ab5ad8e4a237c4de5 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 8 Jan 2026 05:53:33 +0000 Subject: [PATCH 34/45] clarification --- litebox/src/platform/vmap.rs | 23 +++++---- litebox_common_optee/src/lib.rs | 1 - litebox_platform_lvbs/src/lib.rs | 4 +- litebox_shim_optee/src/msg_handler.rs | 70 +++++++++++++++------------ litebox_shim_optee/src/ptr.rs | 38 +++++++-------- 5 files changed, 69 insertions(+), 67 deletions(-) diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index b152508fb..a5e676846 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -30,8 +30,8 @@ pub trait VmapProvider { /// (especially, there should be no read/write or write/write conflicts). /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps /// from the caller, hypervisor, or hardware. - /// Multiple LiteBox threads might concurrently call this function (and `vunmap()`) with - /// overlapping physical pages, so the implementation should safely handle such cases. + /// Multiple LiteBox threads might concurrently call this function with overlapping + /// physical pages, so the implementation should safely handle such cases. unsafe fn vmap( &self, pages: Self::PhysPageAddrArray, @@ -46,13 +46,10 @@ pub trait VmapProvider { /// # Safety /// /// The caller should ensure that the virtual addresses in `vmap_info` are not in active - /// use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this and - /// it needs some helps from other parties. - /// Multiple LiteBox threads might concurrently call this function (and `vmap()`) with - /// overlapping physical pages, so the implementation should safely handle such cases. + /// use by other entities. unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; - /// Validate that the given physical pages do not belong to LiteBox-managed memory. + /// Validate that the given physical pages do not belong to LiteBox-owned memory. /// Use `&self` to get the memory layout of the platform (i.e., the physical memory /// range assigned to LiteBox). /// @@ -61,12 +58,13 @@ pub trait VmapProvider { /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError>; - /// Protect the given physical pages to ensure concurrent read or exclusive write access. - /// Read protection prevents others from modifying the pages. Read/write protection prevents - /// others from accessing the pages. - /// This can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// Protect the given physical pages to ensure concurrent read or exclusive write access: + /// - Read protection: prevent others from writing to the pages. + /// - Read/write protection: prevent others from reading or writing to the pages. + /// - No protection: allow others to read and write the pages. /// - /// This function is a no-op if there is no other world or VM sharing the physical memory. + /// This function can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// It is a no-op if there is no other world or VM sharing the physical memory. /// /// Returns `Ok(())` if it successfully protects the pages. If it fails, returns /// `Err(PhysPointerError)`. @@ -75,6 +73,7 @@ pub trait VmapProvider { /// /// Since this function is expected to use hypercalls or other privileged hardware features, /// the caller must ensure that it is safe to perform such operations at the time of the call. + /// Also, the caller should unprotect the pages when they are no longer needed to be protected. unsafe fn protect( &self, pages: Self::PhysPageAddrArray, diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 823e1df9d..6feda4ce0 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1418,7 +1418,6 @@ impl OpteeSmcArgs { /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. - #[cfg(target_pointer_width = "64")] pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index aaa676d8d..238c7a9bd 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -766,8 +766,8 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< /// Checks whether the given physical addresses are contiguous with respect to ALIGN. /// /// Note: This is a temporary check to let `VmapProvider` work with this platform -/// which maps physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that -/// does not support non-contiguous physical page mapping with contiguous virtual addresses. +/// which does not yet support virtually contiguous mapping of non-contiguous physical pages +/// (for now, it maps physical pages with a fixed offset). fn check_contiguity( addrs: &[PhysPageAddr], ) -> Result<(), PhysPointerError> { diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 8d3a34529..c3bbcfdc4 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -60,15 +60,23 @@ fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } +/// The result of handling an OP-TEE SMC call along with an extracted OP-TEE message argument to handle. +pub struct OpteeSmcHandled<'a> { + pub result: OpteeSmcResult<'a>, + pub msg_to_handle: Option, +} + /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// It returns an `OpteeSmcResult` representing the result of the SMC call and /// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE messagewhich should be handled by /// `handle_optee_msg_arg` or `handle_ta_request`. +/// /// # Panics +/// /// Panics if the normal world physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args( smc: &mut OpteeSmcArgs, -) -> Result<(OpteeSmcResult<'_>, Option), OpteeSmcReturn> { +) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; match func_id { OpteeSmcFunction::CallWithArg @@ -79,41 +87,41 @@ pub fn handle_optee_smc_args( let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) .map_err(|_| OpteeSmcReturn::EBadAddr)?; let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; - Ok(( - OpteeSmcResult::Generic { + Ok(OpteeSmcHandled { + result: OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }, - Some(*msg_arg), - )) + msg_to_handle: Some(*msg_arg), + }) } OpteeSmcFunction::ExchangeCapabilities => { // TODO: update the below when we support more features let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM | OpteeSecureWorldCapabilities::MEMREF_NULL | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(( - OpteeSmcResult::ExchangeCapabilities { + Ok(OpteeSmcHandled { + result: OpteeSmcResult::ExchangeCapabilities { status: OpteeSmcReturn::Ok, capabilities: default_cap, max_notif_value: MAX_NOTIF_VALUE, data: NUM_RPC_PARMS, }, - None, - )) + msg_to_handle: None, + }) } OpteeSmcFunction::DisableShmCache => { // Currently, we do not support this feature. - Ok(( - OpteeSmcResult::DisableShmCache { + Ok(OpteeSmcHandled { + result: OpteeSmcResult::DisableShmCache { status: OpteeSmcReturn::ENotAvail, shm_upper32: 0, shm_lower32: 0, }, - None, - )) + msg_to_handle: None, + }) } - OpteeSmcFunction::GetOsUuid => Ok(( - OpteeSmcResult::Uuid { + OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Uuid { data: &[ OPTEE_MSG_OS_OPTEE_UUID_0, OPTEE_MSG_OS_OPTEE_UUID_1, @@ -121,10 +129,10 @@ pub fn handle_optee_smc_args( OPTEE_MSG_OS_OPTEE_UUID_3, ], }, - None, - )), - OpteeSmcFunction::CallsUid => Ok(( - OpteeSmcResult::Uuid { + msg_to_handle: None, + }), + OpteeSmcFunction::CallsUid => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Uuid { data: &[ OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, @@ -132,29 +140,29 @@ pub fn handle_optee_smc_args( OPTEE_MSG_UID_3, ], }, - None, - )), - OpteeSmcFunction::GetOsRevision => Ok(( - OpteeSmcResult::OsRevision { + msg_to_handle: None, + }), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcHandled { + result: OpteeSmcResult::OsRevision { major: OPTEE_MSG_REVISION_MAJOR, minor: OPTEE_MSG_REVISION_MINOR, build_id: OPTEE_MSG_BUILD_ID, }, - None, - )), - OpteeSmcFunction::CallsRevision => Ok(( - OpteeSmcResult::Revision { + msg_to_handle: None, + }), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Revision { major: OPTEE_MSG_REVISION_MAJOR, minor: OPTEE_MSG_REVISION_MINOR, }, - None, - )), + msg_to_handle: None, + }), _ => Err(OpteeSmcReturn::UnknownFunction), } } /// This function handles an OP-TEE message contained in `OpteeMsgArg`. -/// Currently, it only handles share memory registration and unregistration. +/// Currently, it only handles shared memory registration and unregistration. /// If an OP-TEE message involves with a TA request, it simply returns /// `Err(OpteeSmcReturn::Ok)` while expecting that the caller will handle /// the message with `handle_ta_request`. @@ -166,7 +174,7 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> if tmem.buf_ptr == 0 || tmem.size == 0 || tmem.shm_ref == 0 { return Err(OpteeSmcReturn::EBadAddr); } - // `tmem.buf_ptr` embeds two different information: + // `tmem.buf_ptr` encodes two different information: // - The physical page address of the first `ShmRefPagesData` // - The page offset of the first shared memory page (`pages_list[0]`) let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index a2fe3f9cf..0625804bb 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -19,9 +19,9 @@ //! leakage due to concurrent or persistent access). //! //! Instead, the approach this module takes is to map the required physical memory -//! region on-demand when accessing them while using a LiteBox-managed buffer to copy +//! region on-demand when accessing them while using a LiteBox-owned buffer to copy //! data to/from those regions. This way, this module can ensure that data must be -//! copied into LiteBox-managed memory before being used while avoiding any unknown +//! copied into LiteBox-owned memory before being used while avoiding any unknown //! side effects due to persistent memory mapping. //! //! Considerations: @@ -97,10 +97,10 @@ pub struct PhysMutPtr { impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// - /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller - /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of - /// type `T` starting from `offset`. If these conditions are not met, this function returns - /// `Err(PhysPointerError)`. + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be + /// smaller than `ALIGN`. Also, `pages` should contain enough pages to cover at least one + /// object of type `T` starting from `offset`. If these conditions are not met, this function + /// returns `Err(PhysPointerError)`. pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { if offset >= ALIGN { return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); @@ -133,9 +133,9 @@ impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// /// This is a shortcut for - /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, - /// later accesses through `PhysMutPtr` may read/write incorrect data. + /// later accesses through `PhysMutPtr` may read/write data in a wrong order. pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( @@ -197,7 +197,7 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(src) = (unsafe { self.base_ptr() }) else { + let Some(src) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -259,7 +259,7 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(src) = (unsafe { self.base_ptr() }) else { + let Some(src) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -289,7 +289,7 @@ impl PhysMutPtr { /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently writtenby + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_at_offset( @@ -318,7 +318,7 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(dst) = (unsafe { self.base_ptr() }) else { + let Some(dst) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -372,7 +372,7 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(dst) = (unsafe { self.base_ptr() }) else { + let Some(dst) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -453,16 +453,12 @@ impl PhysMutPtr { } /// Get the base virtual pointer if mapped. - /// - /// # Safety - /// - /// This function performs pointer arithmetic on the mapped base pointer. #[inline] - unsafe fn base_ptr(&self) -> Option<*mut T> { + fn base_ptr(&self) -> Option<*mut T> { let Some(map_info) = &self.map_info else { return None; }; - Some(unsafe { map_info.base.add(self.offset) }.cast::()) + Some(map_info.base.wrapping_add(self.offset).cast::()) } } @@ -505,9 +501,9 @@ impl PhysConstPtr { /// Create a new `PhysConstPtr` from the given contiguous physical address and length. /// /// This is a shortcut for - /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, - /// later accesses through `PhysConstPtr` may read incorrect data. + /// later accesses through `PhysConstPtr` may read data in a wrong order. pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { inner: PhysMutPtr::with_contiguous_pages(pa, bytes)?, From 532ebe920e687145c37a8f756d7ea3da344737e3 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 21 Jan 2026 22:53:28 +0000 Subject: [PATCH 35/45] move VmapProvider to litebox_common_optee --- litebox/src/platform/mod.rs | 1 - litebox_common_optee/src/lib.rs | 1 + .../src}/vmap.rs | 28 +++++++---- litebox_platform_linux_userland/src/lib.rs | 50 +++++++++---------- litebox_platform_lvbs/src/lib.rs | 6 +-- litebox_shim_optee/src/msg_handler.rs | 3 +- litebox_shim_optee/src/ptr.rs | 2 +- 7 files changed, 48 insertions(+), 43 deletions(-) rename {litebox/src/platform => litebox_common_optee/src}/vmap.rs (90%) diff --git a/litebox/src/platform/mod.rs b/litebox/src/platform/mod.rs index b25dec4e7..3fad67900 100644 --- a/litebox/src/platform/mod.rs +++ b/litebox/src/platform/mod.rs @@ -10,7 +10,6 @@ pub mod common_providers; pub mod page_mgmt; pub mod trivial_providers; -pub mod vmap; #[cfg(test)] pub(crate) mod mock; diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 6feda4ce0..a519be8a2 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -17,6 +17,7 @@ use num_enum::TryFromPrimitive; use syscall_nr::{LdelfSyscallNr, TeeSyscallNr}; pub mod syscall_nr; +pub mod vmap; // Based on `optee_os/lib/libutee/include/utee_syscalls.h` #[non_exhaustive] diff --git a/litebox/src/platform/vmap.rs b/litebox_common_optee/src/vmap.rs similarity index 90% rename from litebox/src/platform/vmap.rs rename to litebox_common_optee/src/vmap.rs index a5e676846..7133b6e99 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox_common_optee/src/vmap.rs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -use crate::platform::page_mgmt::MemoryRegionPermissions; +use litebox::platform::page_mgmt::MemoryRegionPermissions; use thiserror::Error; /// A provider to map and unmap physical pages with virtually contiguous addresses. @@ -34,9 +34,11 @@ pub trait VmapProvider { /// physical pages, so the implementation should safely handle such cases. unsafe fn vmap( &self, - pages: Self::PhysPageAddrArray, - perms: PhysPageMapPermissions, - ) -> Result; + _pages: Self::PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result { + Err(PhysPointerError::UnsupportedOperation) + } /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). /// Use `&self` to access and update the page table. @@ -47,7 +49,9 @@ pub trait VmapProvider { /// /// The caller should ensure that the virtual addresses in `vmap_info` are not in active /// use by other entities. - unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } /// Validate that the given physical pages do not belong to LiteBox-owned memory. /// Use `&self` to get the memory layout of the platform (i.e., the physical memory @@ -56,7 +60,9 @@ pub trait VmapProvider { /// This function is a no-op if there is no other world or VM sharing the physical memory. /// /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. - fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError>; + fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { + Ok(()) + } /// Protect the given physical pages to ensure concurrent read or exclusive write access: /// - Read protection: prevent others from writing to the pages. @@ -76,9 +82,11 @@ pub trait VmapProvider { /// Also, the caller should unprotect the pages when they are no longer needed to be protected. unsafe fn protect( &self, - pages: Self::PhysPageAddrArray, - perms: PhysPageMapPermissions, - ) -> Result<(), PhysPointerError>; + _pages: Self::PhysPageAddrArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + Ok(()) + } } /// Data structure representing a physical address with page alignment. @@ -87,7 +95,7 @@ pub trait VmapProvider { /// we selectively conduct sanity checks based on whether an address is virtual or physical /// (e.g., whether a virtual address is canonical, whether a physical address is tagged with /// a valid key ID, etc.). -pub type PhysPageAddr = crate::mm::linux::NonZeroAddress; +pub type PhysPageAddr = litebox::mm::linux::NonZeroAddress; /// Data structure to maintain the mapping information returned by `vmap()`. /// diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 48e930f54..15fa2fc80 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -15,13 +15,11 @@ use std::time::Duration; use litebox::fs::OFlags; use litebox::platform::UnblockedOrTimedOut; use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions}; -use litebox::platform::vmap::{ - PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, -}; use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; use litebox_common_linux::{MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall}; +use litebox_common_optee::vmap::{PhysPageAddr, PhysPageMapInfo, VmapProvider}; mod syscall_intercept; @@ -2200,29 +2198,29 @@ impl VmapProvider for LinuxUserland { type PhysPageMapInfo = PhysPageMapInfo; - unsafe fn vmap( - &self, - _pages: Self::PhysPageAddrArray, - _perms: PhysPageMapPermissions, - ) -> Result { - Err(PhysPointerError::UnsupportedOperation) - } - - unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { - Err(PhysPointerError::UnsupportedOperation) - } - - fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { - Err(PhysPointerError::UnsupportedOperation) - } - - unsafe fn protect( - &self, - _pages: Self::PhysPageAddrArray, - _perms: PhysPageMapPermissions, - ) -> Result<(), PhysPointerError> { - Err(PhysPointerError::UnsupportedOperation) - } + // unsafe fn vmap( + // &self, + // _pages: Self::PhysPageAddrArray, + // _perms: PhysPageMapPermissions, + // ) -> Result { + // Err(PhysPointerError::UnsupportedOperation) + // } + + // unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + // Err(PhysPointerError::UnsupportedOperation) + // } + + // fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { + // Err(PhysPointerError::UnsupportedOperation) + // } + + // unsafe fn protect( + // &self, + // _pages: Self::PhysPageAddrArray, + // _perms: PhysPageMapPermissions, + // ) -> Result<(), PhysPointerError> { + // Err(PhysPointerError::UnsupportedOperation) + // } } #[cfg(test)] diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 238c7a9bd..966c3928a 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -16,9 +16,6 @@ use core::{ sync::atomic::{AtomicU32, AtomicU64}, }; use litebox::platform::page_mgmt::DeallocationError; -use litebox::platform::vmap::{ - PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, -}; use litebox::platform::{ DebugLogProvider, IPInterfaceProvider, ImmediatelyWokenUp, PageManagementProvider, Punchthrough, RawMutexProvider, StdioProvider, TimeProvider, UnblockedOrTimedOut, @@ -28,6 +25,9 @@ use litebox::platform::{ }; use litebox::{mm::linux::PageRange, platform::page_mgmt::FixedAddressBehavior}; use litebox_common_linux::{PunchthroughSyscall, errno::Errno}; +use litebox_common_optee::vmap::{ + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; use x86_64::structures::paging::{ PageOffset, PageSize, PageTableFlags, PhysFrame, Size4KiB, frame::PhysFrameRange, mapper::MapToError, diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index c3bbcfdc4..009f1c9d7 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -19,10 +19,9 @@ use crate::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; -use litebox::platform::vmap::PhysPageAddr; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, - OpteeSmcResult, OpteeSmcReturn, + OpteeSmcResult, OpteeSmcReturn, vmap::PhysPageAddr, }; use once_cell::race::OnceBox; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 0625804bb..70f3aee77 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -61,7 +61,7 @@ // TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, // we can move them to a different crate (e.g., `litebox`) if needed. -use litebox::platform::vmap::{ +use litebox_common_optee::vmap::{ PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox_platform_multiplex::platform; From 45ba6db650a5780d486769912c91d16448c49d41 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 22 Jan 2026 16:39:08 +0000 Subject: [PATCH 36/45] vmap: replace todos with actual impl --- litebox_platform_lvbs/src/lib.rs | 112 +++++++++++++++++++++++--- litebox_platform_lvbs/src/mshv/mod.rs | 2 +- litebox_platform_lvbs/src/mshv/vsm.rs | 2 +- 3 files changed, 105 insertions(+), 11 deletions(-) diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 966c3928a..01f4c5a76 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -789,28 +789,122 @@ impl VmapProvider for LinuxKerne unsafe fn vmap( &self, pages: Self::PhysPageAddrArray, - _perms: PhysPageMapPermissions, + perms: PhysPageMapPermissions, ) -> Result { // TODO: Remove this check once this platform supports virtually contiguous // non-contiguous physical page mapping. check_contiguity(&pages)?; - todo!("use map_vtl0_phys_range()") + + if pages.is_empty() { + return Err(PhysPointerError::InvalidPhysicalAddress(0)); + } + let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); + let phys_end = x86_64::PhysAddr::new( + pages + .last() + .unwrap() + .as_usize() + .checked_add(ALIGN) + .ok_or(PhysPointerError::Overflow)? as u64, + ); + let frame_range = if ALIGN == PAGE_SIZE { + PhysFrame::range( + PhysFrame::::containing_address(phys_start), + PhysFrame::::containing_address(phys_end), + ) + } else { + unimplemented!("ALIGN other than 4KiB is not supported yet") + }; + + let mut flags = PageTableFlags::PRESENT; + if perms.contains(PhysPageMapPermissions::WRITE) { + flags |= PageTableFlags::WRITABLE; + } + + if let Ok(page_addr) = self.page_table.map_phys_frame_range(frame_range, flags) { + Ok(Self::PhysPageMapInfo { + base: page_addr, + size: pages.len() * ALIGN, + }) + } else { + Err(PhysPointerError::InvalidPhysicalAddress( + pages[0].as_usize(), + )) + } } - unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { - todo!("use unmap_vtl0_pages()") + unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + if ALIGN == PAGE_SIZE { + let Some(page_range) = PageRange::::new( + vmap_info.base as usize, + vmap_info.base.wrapping_add(vmap_info.size) as usize, + ) else { + return Err(PhysPointerError::UnalignedPhysicalAddress( + vmap_info.base as usize, + ALIGN, + )); + }; + unsafe { + self.page_table + .unmap_pages(page_range, false) + .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize)) + } + } else { + unimplemented!("ALIGN other than 4KiB is not supported yet") + } } - fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { - todo!("use vtl1_phys_frame_range to validate") + fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { + if pages.is_empty() { + return Ok(()); + } + let start_address = self.vtl1_phys_frame_range.start.start_address().as_u64(); + let end_address = self.vtl1_phys_frame_range.end.start_address().as_u64(); + for page in &pages { + let addr = page.as_usize() as u64; + // a physical page belonging to LiteBox (VTL1) should not be used for `vmap` + if addr >= start_address && addr < end_address { + return Err(PhysPointerError::InvalidPhysicalAddress(page.as_usize())); + } + } + Ok(()) } unsafe fn protect( &self, - _pages: Self::PhysPageAddrArray, - _perms: PhysPageMapPermissions, + pages: Self::PhysPageAddrArray, + perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError> { - todo!("use hypercall to protect/unprotect physical pages") + let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); + let phys_end = x86_64::PhysAddr::new( + pages + .last() + .unwrap() + .as_usize() + .checked_add(ALIGN) + .ok_or(PhysPointerError::Overflow)? as u64, + ); + let frame_range = if ALIGN == PAGE_SIZE { + PhysFrame::range( + PhysFrame::::containing_address(phys_start), + PhysFrame::::containing_address(phys_end), + ) + } else { + unimplemented!("ALIGN other than 4KiB is not supported yet") + }; + + let mem_attr = if perms.contains(PhysPageMapPermissions::WRITE) { + // VTL1 wants to write data to the pages, preventing VTL0 from reading/executing the pages. + crate::mshv::heki::MemAttr::empty() + } else if perms.contains(PhysPageMapPermissions::READ) { + // VTL1 wants to read data from the pages, preventing VTL0 from writing to the pages. + crate::mshv::heki::MemAttr::MEM_ATTR_READ | crate::mshv::heki::MemAttr::MEM_ATTR_EXEC + } else { + // VTL1 no longer protects the pages. + crate::mshv::heki::MemAttr::all() + }; + crate::mshv::vsm::protect_physical_memory_range(frame_range, mem_attr) + .map_err(|_| PhysPointerError::UnsupportedPermissions(perms.bits())) } } diff --git a/litebox_platform_lvbs/src/mshv/mod.rs b/litebox_platform_lvbs/src/mshv/mod.rs index 71f1a8835..e5ba8f013 100644 --- a/litebox_platform_lvbs/src/mshv/mod.rs +++ b/litebox_platform_lvbs/src/mshv/mod.rs @@ -3,7 +3,7 @@ //! Hyper-V-specific code -mod heki; +pub(crate) mod heki; pub mod hvcall; mod hvcall_mm; mod hvcall_vp; diff --git a/litebox_platform_lvbs/src/mshv/vsm.rs b/litebox_platform_lvbs/src/mshv/vsm.rs index 308ff3f01..07eed67c0 100644 --- a/litebox_platform_lvbs/src/mshv/vsm.rs +++ b/litebox_platform_lvbs/src/mshv/vsm.rs @@ -1320,7 +1320,7 @@ fn copy_heki_pages_from_vtl0(pa: u64, nranges: u64) -> Option> { /// `phys_frame_range` specifies the physical frame range to protect /// `mem_attr` specifies the memory attributes to be applied to the range #[inline] -fn protect_physical_memory_range( +pub(crate) fn protect_physical_memory_range( phys_frame_range: PhysFrameRange, mem_attr: MemAttr, ) -> Result<(), Errno> { From f35be190acb88996bc335d9ea95fa1e2884b8aa9 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 22 Jan 2026 17:19:26 +0000 Subject: [PATCH 37/45] revise VmapProvider --- litebox_common_optee/src/vmap.rs | 48 +++++++++------------- litebox_platform_linux_userland/src/lib.rs | 32 +-------------- litebox_platform_lvbs/src/lib.rs | 23 +++++------ litebox_shim_optee/src/ptr.rs | 4 +- 4 files changed, 33 insertions(+), 74 deletions(-) diff --git a/litebox_common_optee/src/vmap.rs b/litebox_common_optee/src/vmap.rs index 7133b6e99..1aa61d812 100644 --- a/litebox_common_optee/src/vmap.rs +++ b/litebox_common_optee/src/vmap.rs @@ -8,19 +8,12 @@ use thiserror::Error; /// /// `ALIGN`: The page frame size. /// -/// This provider is written to implement `litebox_shim_optee::ptr::PhysMutPtr` and +/// This provider exists to service `litebox_shim_optee::ptr::PhysMutPtr` and /// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need /// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). pub trait VmapProvider { - /// Data structure for an array of physical page addresses which are virtually contiguous. - type PhysPageAddrArray; - - /// Data structure to maintain the mapping information returned by `vmap()`. - type PhysPageMapInfo; - /// Map the given `PhysPageAddrArray` into virtually contiguous addresses with the given - /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function - /// expects that it can access and update the page table using `&self`. + /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. /// /// This function is analogous to Linux kernel's `vmap()`. /// @@ -34,14 +27,13 @@ pub trait VmapProvider { /// physical pages, so the implementation should safely handle such cases. unsafe fn vmap( &self, - _pages: Self::PhysPageAddrArray, + _pages: &PhysPageAddrArray, _perms: PhysPageMapPermissions, - ) -> Result { + ) -> Result, PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). - /// Use `&self` to access and update the page table. /// /// This function is analogous to Linux kernel's `vunmap()`. /// @@ -49,18 +41,16 @@ pub trait VmapProvider { /// /// The caller should ensure that the virtual addresses in `vmap_info` are not in active /// use by other entities. - unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + unsafe fn vunmap(&self, _vmap_info: PhysPageMapInfo) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } - /// Validate that the given physical pages do not belong to LiteBox-owned memory. - /// Use `&self` to get the memory layout of the platform (i.e., the physical memory - /// range assigned to LiteBox). + /// Validate that the given physical pages are not owned by LiteBox. /// - /// This function is a no-op if there is no other world or VM sharing the physical memory. + /// Platform is expected to track which physical memory addresses are owned by LiteBox (e.g., VTL1 memory addresses). /// - /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. - fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { + /// Returns `Ok(())` if the physical pages are not owned by LiteBox. Otherwise, returns `Err(PhysPointerError)`. + fn validate_unowned(&self, _pages: &PhysPageAddrArray) -> Result<(), PhysPointerError> { Ok(()) } @@ -70,19 +60,19 @@ pub trait VmapProvider { /// - No protection: allow others to read and write the pages. /// /// This function can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. - /// It is a no-op if there is no other world or VM sharing the physical memory. + /// If the platform does not support such protection, this function returns `Ok(())` without any action. /// /// Returns `Ok(())` if it successfully protects the pages. If it fails, returns /// `Err(PhysPointerError)`. /// /// # Safety /// - /// Since this function is expected to use hypercalls or other privileged hardware features, - /// the caller must ensure that it is safe to perform such operations at the time of the call. - /// Also, the caller should unprotect the pages when they are no longer needed to be protected. + /// This function relies on hypercalls or other privileged hardware features and assumes those features + /// are safe to use. + /// The caller should unprotect the pages when they are no longer needed to access them. unsafe fn protect( &self, - _pages: Self::PhysPageAddrArray, + _pages: &PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError> { Ok(()) @@ -97,13 +87,15 @@ pub trait VmapProvider { /// a valid key ID, etc.). pub type PhysPageAddr = litebox::mm::linux::NonZeroAddress; +/// Data structure for an array of physical page addresses which are virtually contiguous. +pub type PhysPageAddrArray = [PhysPageAddr]; + /// Data structure to maintain the mapping information returned by `vmap()`. -/// -/// `base` is the virtual address of the mapped region which is page aligned. -/// `size` is the size of the mapped region in bytes. #[derive(Clone)] pub struct PhysPageMapInfo { + /// Virtual address of the mapped region which is page aligned. pub base: *mut u8, + /// The size of the mapped region in bytes. pub size: usize, } @@ -113,14 +105,12 @@ bitflags::bitflags! { /// /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED /// permissions are explicitly prohibited. - #[non_exhaustive] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct PhysPageMapPermissions: u8 { /// Readable const READ = 1 << 0; /// Writable const WRITE = 1 << 1; - const _ = !0; } } diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 15fa2fc80..9171016a8 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -19,7 +19,7 @@ use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; use litebox_common_linux::{MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall}; -use litebox_common_optee::vmap::{PhysPageAddr, PhysPageMapInfo, VmapProvider}; +use litebox_common_optee::vmap::VmapProvider; mod syscall_intercept; @@ -2193,35 +2193,7 @@ impl litebox::platform::CrngProvider for LinuxUserland { /// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). /// We might need to emulate these functions' behaviors using virtual addresses for development or /// testing, or use a kernel module to provide this functionality (if needed). -impl VmapProvider for LinuxUserland { - type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; - - type PhysPageMapInfo = PhysPageMapInfo; - - // unsafe fn vmap( - // &self, - // _pages: Self::PhysPageAddrArray, - // _perms: PhysPageMapPermissions, - // ) -> Result { - // Err(PhysPointerError::UnsupportedOperation) - // } - - // unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { - // Err(PhysPointerError::UnsupportedOperation) - // } - - // fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { - // Err(PhysPointerError::UnsupportedOperation) - // } - - // unsafe fn protect( - // &self, - // _pages: Self::PhysPageAddrArray, - // _perms: PhysPageMapPermissions, - // ) -> Result<(), PhysPointerError> { - // Err(PhysPointerError::UnsupportedOperation) - // } -} +impl VmapProvider for LinuxUserland {} #[cfg(test)] mod tests { diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 01f4c5a76..bc4f6409e 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -26,7 +26,8 @@ use litebox::platform::{ use litebox::{mm::linux::PageRange, platform::page_mgmt::FixedAddressBehavior}; use litebox_common_linux::{PunchthroughSyscall, errno::Errno}; use litebox_common_optee::vmap::{ - PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, + VmapProvider, }; use x86_64::structures::paging::{ PageOffset, PageSize, PageTableFlags, PhysFrame, Size4KiB, frame::PhysFrameRange, @@ -782,18 +783,14 @@ fn check_contiguity( } impl VmapProvider for LinuxKernel { - type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; - - type PhysPageMapInfo = PhysPageMapInfo; - unsafe fn vmap( &self, - pages: Self::PhysPageAddrArray, + pages: &PhysPageAddrArray, perms: PhysPageMapPermissions, - ) -> Result { + ) -> Result, PhysPointerError> { // TODO: Remove this check once this platform supports virtually contiguous // non-contiguous physical page mapping. - check_contiguity(&pages)?; + check_contiguity(pages)?; if pages.is_empty() { return Err(PhysPointerError::InvalidPhysicalAddress(0)); @@ -822,7 +819,7 @@ impl VmapProvider for LinuxKerne } if let Ok(page_addr) = self.page_table.map_phys_frame_range(frame_range, flags) { - Ok(Self::PhysPageMapInfo { + Ok(PhysPageMapInfo { base: page_addr, size: pages.len() * ALIGN, }) @@ -833,7 +830,7 @@ impl VmapProvider for LinuxKerne } } - unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + unsafe fn vunmap(&self, vmap_info: PhysPageMapInfo) -> Result<(), PhysPointerError> { if ALIGN == PAGE_SIZE { let Some(page_range) = PageRange::::new( vmap_info.base as usize, @@ -854,13 +851,13 @@ impl VmapProvider for LinuxKerne } } - fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { + fn validate_unowned(&self, pages: &PhysPageAddrArray) -> Result<(), PhysPointerError> { if pages.is_empty() { return Ok(()); } let start_address = self.vtl1_phys_frame_range.start.start_address().as_u64(); let end_address = self.vtl1_phys_frame_range.end.start_address().as_u64(); - for page in &pages { + for page in pages { let addr = page.as_usize() as u64; // a physical page belonging to LiteBox (VTL1) should not be used for `vmap` if addr >= start_address && addr < end_address { @@ -872,7 +869,7 @@ impl VmapProvider for LinuxKerne unsafe fn protect( &self, - pages: Self::PhysPageAddrArray, + pages: &PhysPageAddrArray, perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError> { let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 70f3aee77..8eca53c64 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -120,7 +120,7 @@ impl PhysMutPtr { core::mem::size_of::(), )); } - platform().validate(pages.into())?; + platform().validate_unowned(pages)?; Ok(Self { pages: pages.into(), offset, @@ -420,7 +420,7 @@ impl PhysMutPtr { if self.map_info.is_none() { let sub_pages = &self.pages[start..end]; unsafe { - platform().vmap(sub_pages.into(), perms).map(|info| { + platform().vmap(sub_pages, perms).map(|info| { self.map_info = Some(info); })?; } From 00fa88cde1c4bee922577449fefd263368cc5dc2 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 22 Jan 2026 17:36:23 +0000 Subject: [PATCH 38/45] revise comments --- litebox_common_optee/src/lib.rs | 13 ++++++++++++- litebox_shim_optee/src/ptr.rs | 6 +++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index a519be8a2..eaff7a013 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1129,7 +1129,9 @@ impl TryFrom for UteeEntryFunc { } } -/// Temporary reference memory parameter +/// Temporary memory reference parameter +/// +/// `optee_msg_param_tmem` from `optee_os/core/include/optee_msg.h` #[derive(Clone, Copy, Debug)] #[repr(C)] pub struct OpteeMsgParamTmem { @@ -1142,6 +1144,8 @@ pub struct OpteeMsgParamTmem { } /// Registered memory reference parameter +/// +/// `optee_msg_param_rmem` from `optee_os/core/include/optee_msg.h` #[derive(Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamRmem { @@ -1154,6 +1158,11 @@ pub struct OpteeMsgParamRmem { } /// FF-A memory reference parameter +/// +/// `optee_msg_param_fmem` from `optee_os/core/include/optee_msg.h` +/// +/// Note: LiteBox doesn't currently support FF-A shared memory, so this struct is +/// provided for completeness but is not used. #[derive(Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamFmem { @@ -1384,6 +1393,8 @@ impl OpteeMsgArg { /// exchange all arguments through that memory page. /// TODO: Since this is LVBS-specific structure to facilitate the translation between VTL call convention, /// we might want to move it to the `litebox_platform_lvbs` crate later. +/// Also, we might need to document how to inteprete this structure by referencing `optee_smc.h` and +/// Arm's SMCCC. #[repr(align(4096))] #[derive(Clone, Copy)] #[repr(C)] diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 8eca53c64..ad71231a9 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -42,7 +42,7 @@ //! This module should allow byte-level access while transparently handling page //! mapping and data access across page boundaries. This could become complicated //! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, -//! unaligned access is matter to be considered. +//! unaligned access is a matter to be considered. //! //! In addition, often times, this physical pointer abstraction is involved with //! a list of physical addresses (i.e., scatter-gather list). For example, in @@ -53,7 +53,7 @@ //! //! When this module needs to access data across physical page boundaries, it assumes //! that those physical pages are virtually contiguous in VTL0 or normal-world address -//! space. Otherwise, this module could end up with accessing unrelated data. This is +//! space. Otherwise, this module could end up with accessing misordered data. This is //! best-effort assumption and ensuring this is the caller's responsibility (e.g., even //! if this module always requires a list of physical addresses, the caller might //! provide a wrong list by mistake or intentionally). @@ -77,7 +77,7 @@ fn align_up(len: usize, align: usize) -> usize { } /// Represent a physical pointer to an object with on-demand mapping. -/// - `pages`: An array of page-aligned physical addresses. Physical addresses in this array should be +/// - `pages`: An array of page-aligned physical addresses. We expect physical addresses in this array are /// virtually contiguous. /// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. /// - `count`: The number of objects of type `T` that can be accessed from this pointer. From 8bfbd9b5de403b31b2085981241160161fcdec45 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 00:04:57 +0000 Subject: [PATCH 39/45] feedbacks --- litebox_common_optee/src/lib.rs | 62 ++++++----- litebox_shim_optee/src/msg_handler.rs | 144 ++++++++++---------------- 2 files changed, 92 insertions(+), 114 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index eaff7a013..6672443cd 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1118,13 +1118,13 @@ pub enum OpteeMessageCommand { } impl TryFrom for UteeEntryFunc { - type Error = OpteeSmcReturn; + type Error = OpteeSmcReturnCode; fn try_from(cmd: OpteeMessageCommand) -> Result { match cmd { OpteeMessageCommand::OpenSession => Ok(UteeEntryFunc::OpenSession), OpteeMessageCommand::CloseSession => Ok(UteeEntryFunc::CloseSession), OpteeMessageCommand::InvokeCommand => Ok(UteeEntryFunc::InvokeCommand), - _ => Err(OpteeSmcReturn::EBadCmd), + _ => Err(OpteeSmcReturnCode::EBadCmd), } } } @@ -1333,52 +1333,52 @@ pub struct OpteeMsgArg { impl OpteeMsgArg { /// Validate the message argument structure. - pub fn validate(&self) -> Result<(), OpteeSmcReturn> { - let _ = - OpteeMessageCommand::try_from(self.cmd as u32).map_err(|_| OpteeSmcReturn::EBadCmd)?; + pub fn validate(&self) -> Result<(), OpteeSmcReturnCode> { + let _ = OpteeMessageCommand::try_from(self.cmd as u32) + .map_err(|_| OpteeSmcReturnCode::EBadCmd)?; if self.cmd == OpteeMessageCommand::OpenSession && self.num_params < 2 { - return Err(OpteeSmcReturn::EBadCmd); + return Err(OpteeSmcReturnCode::EBadCmd); } if self.num_params as usize > self.params.len() { - Err(OpteeSmcReturn::EBadCmd) + Err(OpteeSmcReturnCode::EBadCmd) } else { Ok(()) } } - pub fn get_param_tmem(&self, index: usize) -> Result { + pub fn get_param_tmem(&self, index: usize) -> Result { if index >= self.num_params as usize { - Err(OpteeSmcReturn::ENotAvail) + Err(OpteeSmcReturnCode::ENotAvail) } else { Ok(self.params[index] .get_param_tmem() - .ok_or(OpteeSmcReturn::EBadCmd)?) + .ok_or(OpteeSmcReturnCode::EBadCmd)?) } } - pub fn get_param_rmem(&self, index: usize) -> Result { + pub fn get_param_rmem(&self, index: usize) -> Result { if index >= self.num_params as usize { - Err(OpteeSmcReturn::ENotAvail) + Err(OpteeSmcReturnCode::ENotAvail) } else { Ok(self.params[index] .get_param_rmem() - .ok_or(OpteeSmcReturn::EBadCmd)?) + .ok_or(OpteeSmcReturnCode::EBadCmd)?) } } - pub fn get_param_fmem(&self, index: usize) -> Result { + pub fn get_param_fmem(&self, index: usize) -> Result { if index >= self.num_params as usize { - Err(OpteeSmcReturn::ENotAvail) + Err(OpteeSmcReturnCode::ENotAvail) } else { Ok(self.params[index] .get_param_fmem() - .ok_or(OpteeSmcReturn::EBadCmd)?) + .ok_or(OpteeSmcReturnCode::EBadCmd)?) } } - pub fn get_param_value(&self, index: usize) -> Result { + pub fn get_param_value(&self, index: usize) -> Result { if index >= self.num_params as usize { - Err(OpteeSmcReturn::ENotAvail) + Err(OpteeSmcReturnCode::ENotAvail) } else { Ok(self.params[index] .get_param_value() - .ok_or(OpteeSmcReturn::EBadCmd)?) + .ok_or(OpteeSmcReturnCode::EBadCmd)?) } } } @@ -1423,21 +1423,21 @@ impl OpteeSmcArgs { const NUM_OPTEE_SMC_ARGS: usize = 9; /// Get the function ID of an OP-TEE SMC call - pub fn func_id(&self) -> Result { + pub fn func_id(&self) -> Result { OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK) - .map_err(|_| OpteeSmcReturn::EBadCmd) + .map_err(|_| OpteeSmcReturnCode::EBadCmd) } /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. - pub fn optee_msg_arg_phys_addr(&self) -> Result { + pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. if self.args[1] & 0xffff_ffff_0000_0000 == 0 && self.args[2] & 0xffff_ffff_0000_0000 == 0 { let addr = (self.args[1] << 32) | self.args[2]; Ok(addr as u64) } else { - Err(OpteeSmcReturn::EBadAddr) + Err(OpteeSmcReturnCode::EBadAddr) } } } @@ -1479,10 +1479,10 @@ impl OpteeSmcFunction { #[non_exhaustive] pub enum OpteeSmcResult<'a> { Generic { - status: OpteeSmcReturn, + status: OpteeSmcReturnCode, }, ExchangeCapabilities { - status: OpteeSmcReturn, + status: OpteeSmcReturnCode, capabilities: OpteeSecureWorldCapabilities, max_notif_value: usize, data: usize, @@ -1500,10 +1500,13 @@ pub enum OpteeSmcResult<'a> { build_id: usize, }, DisableShmCache { - status: OpteeSmcReturn, + status: OpteeSmcReturnCode, shm_upper32: usize, shm_lower32: usize, }, + CallWithArg { + msg_arg: Box, + }, } impl From> for OpteeSmcArgs { @@ -1562,6 +1565,11 @@ impl From> for OpteeSmcArgs { smc.args[2] = shm_lower32; smc } + OpteeSmcResult::CallWithArg { .. } => { + panic!( + "OpteeSmcResult::CallWithArg cannot be converted to OpteeSmcArgs directly. Handle the incorporate OpteeMsgArg." + ); + } } } } @@ -1592,7 +1600,7 @@ const OPTEE_SMC_RETURN_UNKNOWN_FUNCTION: usize = 0xffff_ffff; #[non_exhaustive] #[derive(Copy, Clone, PartialEq, TryFromPrimitive)] #[repr(usize)] -pub enum OpteeSmcReturn { +pub enum OpteeSmcReturnCode { Ok = OPTEE_SMC_RETURN_OK, EThreadLimit = OPTEE_SMC_RETURN_ETHREAD_LIMIT, EBusy = OPTEE_SMC_RETURN_EBUSY, diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 009f1c9d7..3557af7ac 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -13,15 +13,15 @@ //! physical address containing `OpteeMsgArg` structure (the address is contained in //! the SMC call arguments). This `OpteeMsgArg` structure may contain references to normal //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE -//! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register +//! SMC call, some OP-TEE messages/commands target OP-TEE shim not TAs (e.g., register //! shared memory). use crate::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; -use litebox::mm::linux::PAGE_SIZE; +use litebox::{mm::linux::PAGE_SIZE, utils::TruncateExt}; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, - OpteeSmcResult, OpteeSmcReturn, vmap::PhysPageAddr, + OpteeSmcResult, OpteeSmcReturnCode, vmap::PhysPageAddr, }; use once_cell::race::OnceBox; @@ -59,38 +59,26 @@ fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } -/// The result of handling an OP-TEE SMC call along with an extracted OP-TEE message argument to handle. -pub struct OpteeSmcHandled<'a> { - pub result: OpteeSmcResult<'a>, - pub msg_to_handle: Option, -} - /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. -/// It returns an `OpteeSmcResult` representing the result of the SMC call and -/// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE messagewhich should be handled by +/// It returns an `OpteeSmcResult` representing the result of the SMC call or `OpteeMsgArg` it contains +/// if the SMC call involves with an OP-TEE message which should be handled by /// `handle_optee_msg_arg` or `handle_ta_request`. -/// -/// # Panics -/// -/// Panics if the normal world physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args( smc: &mut OpteeSmcArgs, -) -> Result, OpteeSmcReturn> { +) -> Result, OpteeSmcReturnCode> { let func_id = smc.func_id()?; match func_id { OpteeSmcFunction::CallWithArg | OpteeSmcFunction::CallWithRpcArg | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; - let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); + let msg_arg_addr: usize = msg_arg_addr.truncate(); let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; - let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; - Ok(OpteeSmcHandled { - result: OpteeSmcResult::Generic { - status: OpteeSmcReturn::Ok, - }, - msg_to_handle: Some(*msg_arg), + .map_err(|_| OpteeSmcReturnCode::EBadAddr)?; + let msg_arg = + unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturnCode::EBadAddr)?; + Ok(OpteeSmcResult::CallWithArg { + msg_arg: Box::new(*msg_arg), }) } OpteeSmcFunction::ExchangeCapabilities => { @@ -98,80 +86,62 @@ pub fn handle_optee_smc_args( let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM | OpteeSecureWorldCapabilities::MEMREF_NULL | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(OpteeSmcHandled { - result: OpteeSmcResult::ExchangeCapabilities { - status: OpteeSmcReturn::Ok, - capabilities: default_cap, - max_notif_value: MAX_NOTIF_VALUE, - data: NUM_RPC_PARMS, - }, - msg_to_handle: None, + Ok(OpteeSmcResult::ExchangeCapabilities { + status: OpteeSmcReturnCode::Ok, + capabilities: default_cap, + max_notif_value: MAX_NOTIF_VALUE, + data: NUM_RPC_PARMS, }) } OpteeSmcFunction::DisableShmCache => { // Currently, we do not support this feature. - Ok(OpteeSmcHandled { - result: OpteeSmcResult::DisableShmCache { - status: OpteeSmcReturn::ENotAvail, - shm_upper32: 0, - shm_lower32: 0, - }, - msg_to_handle: None, + Ok(OpteeSmcResult::DisableShmCache { + status: OpteeSmcReturnCode::ENotAvail, + shm_upper32: 0, + shm_lower32: 0, }) } - OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcHandled { - result: OpteeSmcResult::Uuid { - data: &[ - OPTEE_MSG_OS_OPTEE_UUID_0, - OPTEE_MSG_OS_OPTEE_UUID_1, - OPTEE_MSG_OS_OPTEE_UUID_2, - OPTEE_MSG_OS_OPTEE_UUID_3, - ], - }, - msg_to_handle: None, + OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_OS_OPTEE_UUID_0, + OPTEE_MSG_OS_OPTEE_UUID_1, + OPTEE_MSG_OS_OPTEE_UUID_2, + OPTEE_MSG_OS_OPTEE_UUID_3, + ], }), - OpteeSmcFunction::CallsUid => Ok(OpteeSmcHandled { - result: OpteeSmcResult::Uuid { - data: &[ - OPTEE_MSG_UID_0, - OPTEE_MSG_UID_1, - OPTEE_MSG_UID_2, - OPTEE_MSG_UID_3, - ], - }, - msg_to_handle: None, + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ], }), - OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcHandled { - result: OpteeSmcResult::OsRevision { - major: OPTEE_MSG_REVISION_MAJOR, - minor: OPTEE_MSG_REVISION_MINOR, - build_id: OPTEE_MSG_BUILD_ID, - }, - msg_to_handle: None, + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::OsRevision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + build_id: OPTEE_MSG_BUILD_ID, }), - OpteeSmcFunction::CallsRevision => Ok(OpteeSmcHandled { - result: OpteeSmcResult::Revision { - major: OPTEE_MSG_REVISION_MAJOR, - minor: OPTEE_MSG_REVISION_MINOR, - }, - msg_to_handle: None, + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::Revision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, }), - _ => Err(OpteeSmcReturn::UnknownFunction), + _ => Err(OpteeSmcReturnCode::UnknownFunction), } } /// This function handles an OP-TEE message contained in `OpteeMsgArg`. /// Currently, it only handles shared memory registration and unregistration. /// If an OP-TEE message involves with a TA request, it simply returns -/// `Err(OpteeSmcReturn::Ok)` while expecting that the caller will handle +/// `Err(OpteeSmcReturnCode::Ok)` while expecting that the caller will handle /// the message with `handle_ta_request`. -pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> { +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturnCode> { msg_arg.validate()?; match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { let tmem = msg_arg.get_param_tmem(0)?; if tmem.buf_ptr == 0 || tmem.size == 0 || tmem.shm_ref == 0 { - return Err(OpteeSmcReturn::EBadAddr); + return Err(OpteeSmcReturnCode::EBadAddr); } // `tmem.buf_ptr` encodes two different information: // - The physical page address of the first `ShmRefPagesData` @@ -189,15 +159,15 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> OpteeMessageCommand::UnregisterShm => { let tmem = msg_arg.get_param_tmem(0)?; if tmem.shm_ref == 0 { - return Err(OpteeSmcReturn::EBadAddr); + return Err(OpteeSmcReturnCode::EBadAddr); } shm_ref_map() .remove(tmem.shm_ref) - .ok_or(OpteeSmcReturn::EBadAddr)?; + .ok_or(OpteeSmcReturnCode::EBadAddr)?; } OpteeMessageCommand::OpenSession | OpteeMessageCommand::InvokeCommand - | OpteeMessageCommand::CloseSession => return Err(OpteeSmcReturn::Ok), + | OpteeMessageCommand::CloseSession => return Err(OpteeSmcReturnCode::Ok), _ => { todo!("Unimplemented OpteeMessageCommand: {:?}", msg_arg.cmd); } @@ -206,7 +176,7 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> } /// This function handles a TA request contained in `OpteeMsgArg` -pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result { +pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result { todo!() } @@ -255,10 +225,10 @@ impl ShmRefMap { } } - pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturnCode> { let mut guard = self.inner.lock(); if guard.contains_key(&shm_ref) { - Err(OpteeSmcReturn::ENotAvail) + Err(OpteeSmcReturnCode::ENotAvail) } else { let _ = guard.insert(shm_ref, info); Ok(()) @@ -288,25 +258,25 @@ impl ShmRefMap { page_offset: u64, aligned_size: u64, shm_ref: u64, - ) -> Result<(), OpteeSmcReturn> { + ) -> Result<(), OpteeSmcReturnCode> { if page_offset >= ALIGN as u64 || aligned_size == 0 { - return Err(OpteeSmcReturn::EBadAddr); + return Err(OpteeSmcReturnCode::EBadAddr); } let num_pages = usize::try_from(aligned_size).unwrap() / ALIGN; let mut pages = Vec::with_capacity(num_pages); let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { let mut cur_ptr = NormalWorldConstPtr::::with_usize(cur_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + .map_err(|_| OpteeSmcReturnCode::EBadAddr)?; let pages_data = - unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; + unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturnCode::EBadAddr)?; for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; } else { pages.push( PhysPageAddr::new(usize::try_from(*page).unwrap()) - .ok_or(OpteeSmcReturn::EBadAddr)?, + .ok_or(OpteeSmcReturnCode::EBadAddr)?, ); } } From 1ae12d97b4f5902eac7ba8e9d43a3a18713166e7 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 00:37:19 +0000 Subject: [PATCH 40/45] ratchet --- dev_tests/src/ratchet.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index c6414893e..49c76bc60 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -39,7 +39,7 @@ fn ratchet_globals() -> Result<()> { ("litebox_runner_lvbs/", 3), ("litebox_runner_snp/", 1), ("litebox_shim_linux/", 1), - ("litebox_shim_optee/", 1), + ("litebox_shim_optee/", 2), ], |file| { Ok(file From e46f5dca344554097e9d6514695100a9f000459e Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 00:48:51 +0000 Subject: [PATCH 41/45] clippy --- litebox_shim_optee/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index e0e4decba..b846a3a1c 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -1214,6 +1214,9 @@ impl Default for SessionIdPool { } } +pub type NormalWorldConstPtr = crate::ptr::PhysConstPtr; +pub type NormalWorldMutPtr = crate::ptr::PhysMutPtr; + #[cfg(test)] mod test_utils { use super::*; @@ -1240,6 +1243,3 @@ mod test_utils { } } } - -pub type NormalWorldConstPtr = crate::ptr::PhysConstPtr; -pub type NormalWorldMutPtr = crate::ptr::PhysMutPtr; From 25c0151ca2be3cb76be5cbf0b84c3a2a93575bda Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 01:02:16 +0000 Subject: [PATCH 42/45] feature gate --- litebox_platform_linux_userland/src/lib.rs | 3 +++ litebox_platform_lvbs/src/lib.rs | 12 ++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 9171016a8..c6bd4f0cb 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -19,6 +19,8 @@ use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; use litebox_common_linux::{MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall}; + +#[cfg(feature = "optee_syscall")] use litebox_common_optee::vmap::VmapProvider; mod syscall_intercept; @@ -2193,6 +2195,7 @@ impl litebox::platform::CrngProvider for LinuxUserland { /// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). /// We might need to emulate these functions' behaviors using virtual addresses for development or /// testing, or use a kernel module to provide this functionality (if needed). +#[cfg(feature = "optee_syscall")] impl VmapProvider for LinuxUserland {} #[cfg(test)] diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index bc4f6409e..bbb91ea92 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -25,15 +25,17 @@ use litebox::platform::{ }; use litebox::{mm::linux::PageRange, platform::page_mgmt::FixedAddressBehavior}; use litebox_common_linux::{PunchthroughSyscall, errno::Errno}; -use litebox_common_optee::vmap::{ - PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, - VmapProvider, -}; use x86_64::structures::paging::{ PageOffset, PageSize, PageTableFlags, PhysFrame, Size4KiB, frame::PhysFrameRange, mapper::MapToError, }; +#[cfg(feature = "optee_syscall")] +use litebox_common_optee::vmap::{ + PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, + VmapProvider, +}; + extern crate alloc; pub mod arch; @@ -769,6 +771,7 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< /// Note: This is a temporary check to let `VmapProvider` work with this platform /// which does not yet support virtually contiguous mapping of non-contiguous physical pages /// (for now, it maps physical pages with a fixed offset). +#[cfg(feature = "optee_syscall")] fn check_contiguity( addrs: &[PhysPageAddr], ) -> Result<(), PhysPointerError> { @@ -782,6 +785,7 @@ fn check_contiguity( Ok(()) } +#[cfg(feature = "optee_syscall")] impl VmapProvider for LinuxKernel { unsafe fn vmap( &self, From ae7a2e149a08e4a977d3aa492b934e8e28e5187a Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 01:20:55 +0000 Subject: [PATCH 43/45] move vmap to litebox_common_linux --- litebox_common_linux/src/lib.rs | 1 + .../src/vmap.rs | 0 litebox_common_optee/src/lib.rs | 1 - litebox_platform_linux_userland/src/lib.rs | 8 +++----- litebox_platform_lvbs/src/lib.rs | 15 ++++++++------- litebox_shim_optee/src/msg_handler.rs | 3 ++- litebox_shim_optee/src/ptr.rs | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) rename {litebox_common_optee => litebox_common_linux}/src/vmap.rs (100%) diff --git a/litebox_common_linux/src/lib.rs b/litebox_common_linux/src/lib.rs index 42cd6f271..172edacf8 100644 --- a/litebox_common_linux/src/lib.rs +++ b/litebox_common_linux/src/lib.rs @@ -21,6 +21,7 @@ pub mod errno; pub mod loader; pub mod mm; pub mod signal; +pub mod vmap; extern crate alloc; diff --git a/litebox_common_optee/src/vmap.rs b/litebox_common_linux/src/vmap.rs similarity index 100% rename from litebox_common_optee/src/vmap.rs rename to litebox_common_linux/src/vmap.rs diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 6672443cd..21aa88ac8 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -17,7 +17,6 @@ use num_enum::TryFromPrimitive; use syscall_nr::{LdelfSyscallNr, TeeSyscallNr}; pub mod syscall_nr; -pub mod vmap; // Based on `optee_os/lib/libutee/include/utee_syscalls.h` #[non_exhaustive] diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index c6bd4f0cb..8af05c43d 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -18,10 +18,9 @@ use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; -use litebox_common_linux::{MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall}; - -#[cfg(feature = "optee_syscall")] -use litebox_common_optee::vmap::VmapProvider; +use litebox_common_linux::{ + MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall, vmap::VmapProvider, +}; mod syscall_intercept; @@ -2195,7 +2194,6 @@ impl litebox::platform::CrngProvider for LinuxUserland { /// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). /// We might need to emulate these functions' behaviors using virtual addresses for development or /// testing, or use a kernel module to provide this functionality (if needed). -#[cfg(feature = "optee_syscall")] impl VmapProvider for LinuxUserland {} #[cfg(test)] diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index bbb91ea92..8fc047af7 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -24,18 +24,19 @@ use litebox::platform::{ PunchthroughProvider, PunchthroughToken, RawMutex as _, RawPointerProvider, }; use litebox::{mm::linux::PageRange, platform::page_mgmt::FixedAddressBehavior}; -use litebox_common_linux::{PunchthroughSyscall, errno::Errno}; +use litebox_common_linux::{ + PunchthroughSyscall, + errno::Errno, + vmap::{ + PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, + VmapProvider, + }, +}; use x86_64::structures::paging::{ PageOffset, PageSize, PageTableFlags, PhysFrame, Size4KiB, frame::PhysFrameRange, mapper::MapToError, }; -#[cfg(feature = "optee_syscall")] -use litebox_common_optee::vmap::{ - PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, - VmapProvider, -}; - extern crate alloc; pub mod arch; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 3557af7ac..5a5f0a7ad 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -19,9 +19,10 @@ use crate::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::{mm::linux::PAGE_SIZE, utils::TruncateExt}; +use litebox_common_linux::vmap::PhysPageAddr; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, - OpteeSmcResult, OpteeSmcReturnCode, vmap::PhysPageAddr, + OpteeSmcResult, OpteeSmcReturnCode, }; use once_cell::race::OnceBox; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index ad71231a9..5bce8551a 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -61,7 +61,7 @@ // TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, // we can move them to a different crate (e.g., `litebox`) if needed. -use litebox_common_optee::vmap::{ +use litebox_common_linux::vmap::{ PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox_platform_multiplex::platform; From ce31693ff0c5483b5cc4cf77694b715ca847527f Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 15:00:35 +0000 Subject: [PATCH 44/45] rename --- litebox_common_linux/src/vmap.rs | 4 ++-- litebox_common_optee/src/lib.rs | 12 ++++++------ litebox_platform_linux_userland/src/lib.rs | 6 +++--- litebox_platform_lvbs/src/lib.rs | 6 +++--- litebox_shim_optee/src/msg_handler.rs | 20 ++++++++++---------- litebox_shim_optee/src/ptr.rs | 2 +- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/litebox_common_linux/src/vmap.rs b/litebox_common_linux/src/vmap.rs index 1aa61d812..e2c6e3d65 100644 --- a/litebox_common_linux/src/vmap.rs +++ b/litebox_common_linux/src/vmap.rs @@ -11,7 +11,7 @@ use thiserror::Error; /// This provider exists to service `litebox_shim_optee::ptr::PhysMutPtr` and /// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need /// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). -pub trait VmapProvider { +pub trait VmapManager { /// Map the given `PhysPageAddrArray` into virtually contiguous addresses with the given /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. /// @@ -140,7 +140,7 @@ impl From for MemoryRegionPermissions { } } -/// Possible errors for physical pointer access with `VmapProvider` +/// Possible errors for physical pointer access with `VmapManager` #[non_exhaustive] #[derive(Error, Debug)] pub enum PhysPointerError { diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 21aa88ac8..0fadedd0c 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1187,7 +1187,7 @@ pub struct OpteeMsgParamValue { pub c: u64, } -/// Parameter used together with `OpteeMsgArg` +/// Parameter used together with `OpteeMsgArgs` #[derive(Clone, Copy)] #[repr(C)] pub union OpteeMsgParamUnion { @@ -1305,7 +1305,7 @@ impl OpteeMsgParam { /// exchange messages. #[derive(Clone, Copy)] #[repr(C)] -pub struct OpteeMsgArg { +pub struct OpteeMsgArgs { /// OP-TEE message command. This is a superset of `UteeEntryFunc`. pub cmd: OpteeMessageCommand, /// TA function ID which is used if `cmd == InvokeCommand`. Note that the meaning of `cmd` and `func` @@ -1330,7 +1330,7 @@ pub struct OpteeMsgArg { pub params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], } -impl OpteeMsgArg { +impl OpteeMsgArgs { /// Validate the message argument structure. pub fn validate(&self) -> Result<(), OpteeSmcReturnCode> { let _ = OpteeMessageCommand::try_from(self.cmd as u32) @@ -1427,7 +1427,7 @@ impl OpteeSmcArgs { .map_err(|_| OpteeSmcReturnCode::EBadCmd) } - /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy + /// Get the physical address of `OpteeMsgArgs`. The secure world is expected to map and copy /// this structure. pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and @@ -1504,7 +1504,7 @@ pub enum OpteeSmcResult<'a> { shm_lower32: usize, }, CallWithArg { - msg_arg: Box, + msg_arg: Box, }, } @@ -1566,7 +1566,7 @@ impl From> for OpteeSmcArgs { } OpteeSmcResult::CallWithArg { .. } => { panic!( - "OpteeSmcResult::CallWithArg cannot be converted to OpteeSmcArgs directly. Handle the incorporate OpteeMsgArg." + "OpteeSmcResult::CallWithArg cannot be converted to OpteeSmcArgs directly. Handle the incorporate OpteeMsgArgs." ); } } diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 8af05c43d..cd1692b06 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -19,7 +19,7 @@ use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; use litebox_common_linux::{ - MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall, vmap::VmapProvider, + MRemapFlags, MapFlags, ProtFlags, PunchthroughSyscall, vmap::VmapManager, }; mod syscall_intercept; @@ -2189,12 +2189,12 @@ impl litebox::platform::CrngProvider for LinuxUserland { } } -/// Dummy `VmapProvider`. +/// Dummy `VmapManager`. /// /// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). /// We might need to emulate these functions' behaviors using virtual addresses for development or /// testing, or use a kernel module to provide this functionality (if needed). -impl VmapProvider for LinuxUserland {} +impl VmapManager for LinuxUserland {} #[cfg(test)] mod tests { diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 8fc047af7..49c69bbc7 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -29,7 +29,7 @@ use litebox_common_linux::{ errno::Errno, vmap::{ PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, - VmapProvider, + VmapManager, }, }; use x86_64::structures::paging::{ @@ -769,7 +769,7 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< /// Checks whether the given physical addresses are contiguous with respect to ALIGN. /// -/// Note: This is a temporary check to let `VmapProvider` work with this platform +/// Note: This is a temporary check to let `VmapManager` work with this platform /// which does not yet support virtually contiguous mapping of non-contiguous physical pages /// (for now, it maps physical pages with a fixed offset). #[cfg(feature = "optee_syscall")] @@ -787,7 +787,7 @@ fn check_contiguity( } #[cfg(feature = "optee_syscall")] -impl VmapProvider for LinuxKernel { +impl VmapManager for LinuxKernel { unsafe fn vmap( &self, pages: &PhysPageAddrArray, diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 5a5f0a7ad..158b16c3c 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -10,8 +10,8 @@ //! up to nine register values. By checking the SMC function ID, the shim determines whether //! it is for passing an OP-TEE message or a pure SMC function call (e.g., get OP-TEE OS //! version). If it is for passing an OP-TEE message/command, the shim accesses a normal world -//! physical address containing `OpteeMsgArg` structure (the address is contained in -//! the SMC call arguments). This `OpteeMsgArg` structure may contain references to normal +//! physical address containing `OpteeMsgArgs` structure (the address is contained in +//! the SMC call arguments). This `OpteeMsgArgs` structure may contain references to normal //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, some OP-TEE messages/commands target OP-TEE shim not TAs (e.g., register //! shared memory). @@ -21,8 +21,8 @@ use hashbrown::HashMap; use litebox::{mm::linux::PAGE_SIZE, utils::TruncateExt}; use litebox_common_linux::vmap::PhysPageAddr; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, - OpteeSmcResult, OpteeSmcReturnCode, + OpteeMessageCommand, OpteeMsgArgs, OpteeSecureWorldCapabilities, OpteeSmcArgs, + OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturnCode, }; use once_cell::race::OnceBox; @@ -61,7 +61,7 @@ fn page_align_up(len: u64) -> u64 { } /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. -/// It returns an `OpteeSmcResult` representing the result of the SMC call or `OpteeMsgArg` it contains +/// It returns an `OpteeSmcResult` representing the result of the SMC call or `OpteeMsgArgs` it contains /// if the SMC call involves with an OP-TEE message which should be handled by /// `handle_optee_msg_arg` or `handle_ta_request`. pub fn handle_optee_smc_args( @@ -74,7 +74,7 @@ pub fn handle_optee_smc_args( | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr: usize = msg_arg_addr.truncate(); - let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) + let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) .map_err(|_| OpteeSmcReturnCode::EBadAddr)?; let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturnCode::EBadAddr)?; @@ -131,12 +131,12 @@ pub fn handle_optee_smc_args( } } -/// This function handles an OP-TEE message contained in `OpteeMsgArg`. +/// This function handles an OP-TEE message contained in `OpteeMsgArgs`. /// Currently, it only handles shared memory registration and unregistration. /// If an OP-TEE message involves with a TA request, it simply returns /// `Err(OpteeSmcReturnCode::Ok)` while expecting that the caller will handle /// the message with `handle_ta_request`. -pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturnCode> { +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArgs) -> Result<(), OpteeSmcReturnCode> { msg_arg.validate()?; match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { @@ -176,8 +176,8 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturnC Ok(()) } -/// This function handles a TA request contained in `OpteeMsgArg` -pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result { +/// This function handles a TA request contained in `OpteeMsgArgs` +pub fn handle_ta_request(_msg_arg: &OpteeMsgArgs) -> Result { todo!() } diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 5bce8551a..a19973369 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -62,7 +62,7 @@ // we can move them to a different crate (e.g., `litebox`) if needed. use litebox_common_linux::vmap::{ - PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapManager, }; use litebox_platform_multiplex::platform; From 4d0df5ee7e6d1d05e019e32e75e83a6406756021 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Sat, 24 Jan 2026 00:28:08 +0000 Subject: [PATCH 45/45] typo --- litebox_common_optee/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 0fadedd0c..363851d4c 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1566,7 +1566,7 @@ impl From> for OpteeSmcArgs { } OpteeSmcResult::CallWithArg { .. } => { panic!( - "OpteeSmcResult::CallWithArg cannot be converted to OpteeSmcArgs directly. Handle the incorporate OpteeMsgArgs." + "OpteeSmcResult::CallWithArg cannot be converted to OpteeSmcArgs directly. Handle the incorporated OpteeMsgArgs." ); } }