From 5953e7fb78ad36850e6c4a3371b4bf955869e5a9 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 10 Dec 2025 23:00:53 +0000 Subject: [PATCH 01/52] msg_proto --- litebox_common_optee/src/lib.rs | 66 +++++++++++++++------------ litebox_shim_optee/src/lib.rs | 2 + litebox_shim_optee/src/msg_proto.rs | 71 +++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 28 deletions(-) create mode 100644 litebox_shim_optee/src/msg_proto.rs diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index b8a4c8f8f..4bea9f397 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1334,7 +1334,7 @@ impl OpteeSmcFunction { /// OP-TEE SMC call uses CPU registers to pass input and output values. /// Thus, this structure is technically equivalent to `OpteeSmcArgs`, but we separate them for clarity. #[repr(align(4096))] -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Default)] #[repr(C)] pub struct OpteeSmcResult { args: [usize; Self::NUM_OPTEE_SMC_ARGS], @@ -1343,53 +1343,63 @@ pub struct OpteeSmcResult { impl OpteeSmcResult { const NUM_OPTEE_SMC_ARGS: usize = 9; - pub fn return_status(&mut self, status: OpteeSmcReturn) { - self.args[0] = status as usize; + pub fn new(status: OpteeSmcReturn) -> Self { + let mut res = Self::default(); + res.args[0] = status as usize; + res } - pub fn exchange_capabilities( - &mut self, + pub fn new_exchange_capabilities( status: OpteeSmcReturn, capabilities: OpteeSecureWorldCapabilities, max_notif_value: usize, data: usize, - ) { - self.return_status(status); - self.args[1] = capabilities.bits(); - self.args[2] = max_notif_value; - self.args[3] = data; + ) -> Self { + let mut res = Self::default(); + res.args[0] = status as usize; + res.args[1] = capabilities.bits(); + res.args[2] = max_notif_value; + res.args[3] = data; + res } /// # Panics /// panics if any element of `data` cannot be converted to `usize`. - pub fn uuid(&mut self, data: [u32; 4]) { + pub fn new_uuid(data: &[u32; 4]) -> Self { + let mut res = Self::default(); // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - self.args[0] = usize::try_from(data[0]).unwrap(); - self.args[1] = usize::try_from(data[1]).unwrap(); - self.args[2] = usize::try_from(data[2]).unwrap(); - self.args[3] = usize::try_from(data[3]).unwrap(); + res.args[0] = usize::try_from(data[0]).unwrap(); + res.args[1] = usize::try_from(data[1]).unwrap(); + res.args[2] = usize::try_from(data[2]).unwrap(); + res.args[3] = usize::try_from(data[3]).unwrap(); + res } - pub fn revision(&mut self, major: usize, minor: usize) { - self.args[0] = major; - self.args[1] = minor; + pub fn new_revision(major: usize, minor: usize) -> Self { + let mut res = Self::default(); + res.args[0] = major; + res.args[1] = minor; + res } - pub fn os_revision(&mut self, major: usize, minor: usize, build_id: usize) { - self.args[0] = major; - self.args[1] = minor; - self.args[2] = build_id; + pub fn new_os_revision(major: usize, minor: usize, build_id: usize) -> Self { + let mut res = Self::default(); + res.args[0] = major; + res.args[1] = minor; + res.args[2] = build_id; + res } - pub fn disable_shm_cache( - &mut self, + pub fn new_disable_shm_cache( status: OpteeSmcReturn, shm_upper32: usize, shm_lower32: usize, - ) { - self.args[0] = status as usize; - self.args[1] = shm_upper32; - self.args[2] = shm_lower32; + ) -> Self { + let mut res = Self::default(); + res.args[0] = status as usize; + res.args[1] = shm_upper32; + res.args[2] = shm_lower32; + res } } diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 760ad5170..e1eda2528 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -31,6 +31,8 @@ use litebox_platform_multiplex::Platform; pub mod loader; pub(crate) mod syscalls; +pub mod msg_proto; + const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; /// Initialize the shim to run a task with the given parameters. diff --git a/litebox_shim_optee/src/msg_proto.rs b/litebox_shim_optee/src/msg_proto.rs new file mode 100644 index 000000000..62b140864 --- /dev/null +++ b/litebox_shim_optee/src/msg_proto.rs @@ -0,0 +1,71 @@ +use litebox_common_linux::errno::Errno; +use litebox_common_optee::{ + OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, + OpteeSmcReturn, +}; + +// TODO: Replace these with version and build info +const OPTEE_MSG_REVISION_MAJOR: usize = 2; +const OPTEE_MSG_REVISION_MINOR: usize = 0; +const OPTEE_MSG_BUILD_ID: usize = 0; + +// TODO: Replace this with an actual UID +const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; +const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; +const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; + +// We do not support notification for now +const MAX_NOTIF_VALUE: usize = 0; +const NUM_RPC_PARMS: usize = 4; + +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { + let func_id = smc.func_id()?; + + match func_id { + OpteeSmcFunction::CallWithArg + | OpteeSmcFunction::CallWithRpcArg + | OpteeSmcFunction::CallWithRegdArg => { + // TODO: handle the contained `OpteeMsgArg` and return appropriate result + Ok(OpteeSmcResult::new(OpteeSmcReturn::Ok)) + } + OpteeSmcFunction::ExchangeCapabilities => { + // TODO: update the below when we support more features + let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM + | OpteeSecureWorldCapabilities::MEMREF_NULL + | OpteeSecureWorldCapabilities::RPC_ARG; + Ok(OpteeSmcResult::new_exchange_capabilities( + OpteeSmcReturn::Ok, + default_cap, + MAX_NOTIF_VALUE, + NUM_RPC_PARMS, + )) + } + OpteeSmcFunction::DisableShmCache => { + // We do not support this feature + Ok(OpteeSmcResult::new_disable_shm_cache( + OpteeSmcReturn::ENotAvail, + 0, + 0, + )) + } + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ])), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + OPTEE_MSG_BUILD_ID, + )), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + )), + _ => Err(Errno::EINVAL), + } +} + +pub fn halde_optee_msg_arg(_msg: &OpteeMsgArg) {} From 6299b38e5b22a5b7f942fa82e256d8fec8995a15 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 05:49:05 +0000 Subject: [PATCH 02/52] improve optee msg handlers --- litebox_common_optee/src/lib.rs | 79 ++++++--- litebox_shim_optee/src/lib.rs | 2 +- litebox_shim_optee/src/msg_handler.rs | 229 ++++++++++++++++++++++++++ litebox_shim_optee/src/msg_proto.rs | 71 -------- 4 files changed, 288 insertions(+), 93 deletions(-) create mode 100644 litebox_shim_optee/src/msg_handler.rs delete mode 100644 litebox_shim_optee/src/msg_proto.rs diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 4bea9f397..0089cedac 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1116,11 +1116,11 @@ pub enum OpteeMessageCommand { #[repr(C)] pub struct OpteeMsgParamTmem { /// Physical address of the buffer - buf_ptr: u64, + pub buf_ptr: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Temporary shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// Registered memory reference parameter @@ -1128,11 +1128,11 @@ pub struct OpteeMsgParamTmem { #[repr(C)] pub struct OpteeMsgParamRmem { /// Offset into shared memory reference - offs: u64, + pub offs: u64, /// Size of the buffer - size: u64, + pub size: u64, /// Shared memory reference or identifier - shm_ref: u64, + pub shm_ref: u64, } /// FF-A memory reference parameter @@ -1140,15 +1140,15 @@ pub struct OpteeMsgParamRmem { #[repr(C)] pub struct OpteeMsgParamFmem { /// Lower bits of offset into shared memory reference - offs_low: u32, + pub offs_low: u32, /// Higher bits of offset into shared memory reference - offs_high: u32, + pub offs_high: u32, /// Internal offset into the first page of shared memory reference - internal_offs: u16, + pub internal_offs: u16, /// Size of the buffer - size: u64, + pub size: u64, /// Global identifier of the shared memory - global_id: u64, + pub global_id: u64, } /// Opaque value parameter @@ -1156,9 +1156,9 @@ pub struct OpteeMsgParamFmem { #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct OpteeMsgParamValue { - a: u64, - b: u64, - c: u64, + pub a: u64, + pub b: u64, + pub c: u64, } /// Parameter used together with `OpteeMsgArg` @@ -1233,7 +1233,7 @@ impl OpteeMsgParam { #[repr(C)] pub struct OpteeMsgArg { /// OP-TEE message command. This is a superset of `UteeEntryFunc`. - cmd: OpteeMessageCommand, + pub cmd: OpteeMessageCommand, /// TA function ID which is used if `cmd == InvokeCommand`. Note that the meaning of `cmd` and `func` /// is swapped compared to TAs. func: u32, @@ -1256,6 +1256,41 @@ pub struct OpteeMsgArg { params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], } +impl OpteeMsgArg { + #[cfg(target_pointer_width = "64")] + pub fn get_param_tmem(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.tmem }) + } + } + #[cfg(target_pointer_width = "64")] + pub fn get_param_rmem(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.rmem }) + } + } + #[cfg(target_pointer_width = "64")] + pub fn get_param_fmem(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.fmem }) + } + } + #[cfg(target_pointer_width = "64")] + pub fn get_param_value(&self, index: usize) -> Result { + if index >= self.params.len() || index >= self.num_params as usize { + Err(Errno::EINVAL) + } else { + Ok(unsafe { self.params[index].u.value }) + } + } +} + /// OP-TEE SMC call arguments. /// OP-TEE assumes that the underlying architecture is Arm with TrustZone and /// thus it uses Secure Monitor Call (SMC) calling convention (SMCCC). @@ -1289,12 +1324,13 @@ impl OpteeSmcArgs { /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. - pub fn optee_msg_arg_phys_addr(&self) -> Result { + #[cfg(target_pointer_width = "64")] + pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. if self.args[1] & 0xffff_ffff_0000_0000 == 0 && self.args[2] & 0xffff_ffff_0000_0000 == 0 { let addr = (self.args[1] << 32) | self.args[2]; - Ok(addr) + Ok(addr as u64) } else { Err(Errno::EINVAL) } @@ -1365,13 +1401,14 @@ impl OpteeSmcResult { /// # Panics /// panics if any element of `data` cannot be converted to `usize`. + #[cfg(target_pointer_width = "64")] pub fn new_uuid(data: &[u32; 4]) -> Self { let mut res = Self::default(); // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - res.args[0] = usize::try_from(data[0]).unwrap(); - res.args[1] = usize::try_from(data[1]).unwrap(); - res.args[2] = usize::try_from(data[2]).unwrap(); - res.args[3] = usize::try_from(data[3]).unwrap(); + res.args[0] = data[0] as usize; + res.args[1] = data[1] as usize; + res.args[2] = data[2] as usize; + res.args[3] = data[3] as usize; res } diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index e1eda2528..1f1364845 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -31,7 +31,7 @@ use litebox_platform_multiplex::Platform; pub mod loader; pub(crate) mod syscalls; -pub mod msg_proto; +pub mod msg_handler; const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs new file mode 100644 index 000000000..a690228c4 --- /dev/null +++ b/litebox_shim_optee/src/msg_handler.rs @@ -0,0 +1,229 @@ +use alloc::{boxed::Box, vec::Vec}; +use hashbrown::HashMap; +use litebox::mm::linux::PAGE_SIZE; +use litebox_common_linux::errno::Errno; +use litebox_common_optee::{ + OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, + OpteeSmcResult, OpteeSmcReturn, +}; +use once_cell::race::OnceBox; + +// TODO: Replace these with version and build info +const OPTEE_MSG_REVISION_MAJOR: usize = 2; +const OPTEE_MSG_REVISION_MINOR: usize = 0; +const OPTEE_MSG_BUILD_ID: usize = 0; + +// TODO: Replace this with an actual UID +const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; +const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; +const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; + +// We do not support notification for now +const MAX_NOTIF_VALUE: usize = 0; +const NUM_RPC_PARMS: usize = 4; + +#[inline] +#[cfg(target_pointer_width = "64")] +fn page_align_down(address: u64) -> u64 { + address & !(PAGE_SIZE as u64 - 1) +} + +#[inline] +#[cfg(target_pointer_width = "64")] +fn page_align_up(len: u64) -> u64 { + len.next_multiple_of(PAGE_SIZE as u64) +} + +// Placeholder for copying data from remote memory (e.g., VTL0 physical memory) +// TODO: Specify it in the litebox crate? +// TODO: Define a type for remote address +#[allow(clippy::unnecessary_wraps)] +fn copy_from_remote_memory(_remote_addr: u64) -> Result +where + T: Copy, +{ + // TODO: implement the actual remote copy + Ok(unsafe { core::mem::zeroed() }) +} + +// Placeholder for copying data to remote memory (e.g., VTL0 physical memory) +// TODO: Specify it in the litebox crate? +// TODO: Define a type for remote address +#[expect(unused)] +#[allow(clippy::unnecessary_wraps)] +fn copy_to_remote_memory(_remote_addr: u64, _data: &T) -> Result<(), Errno> +where + T: Copy, +{ + // TODO: implement the actual remote copy + Ok(()) +} + +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { + let func_id = smc.func_id()?; + + match func_id { + OpteeSmcFunction::CallWithArg + | OpteeSmcFunction::CallWithRpcArg + | OpteeSmcFunction::CallWithRegdArg => { + let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; + let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; + handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::new(OpteeSmcReturn::Ok)) + } + OpteeSmcFunction::ExchangeCapabilities => { + // TODO: update the below when we support more features + let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM + | OpteeSecureWorldCapabilities::MEMREF_NULL + | OpteeSecureWorldCapabilities::RPC_ARG; + Ok(OpteeSmcResult::new_exchange_capabilities( + OpteeSmcReturn::Ok, + default_cap, + MAX_NOTIF_VALUE, + NUM_RPC_PARMS, + )) + } + OpteeSmcFunction::DisableShmCache => { + // We do not support this feature + Ok(OpteeSmcResult::new_disable_shm_cache( + OpteeSmcReturn::ENotAvail, + 0, + 0, + )) + } + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ])), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + OPTEE_MSG_BUILD_ID, + )), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( + OPTEE_MSG_REVISION_MAJOR, + OPTEE_MSG_REVISION_MINOR, + )), + _ => Err(Errno::EINVAL), + } +} + +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), Errno> { + match msg_arg.cmd { + OpteeMessageCommand::RegisterShm => { + if let Ok(tmem) = msg_arg.get_param_tmem(0) { + shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; + } else { + return Err(Errno::EINVAL); + } + } + OpteeMessageCommand::UnregisterShm => { + if let Ok(tmem) = msg_arg.get_param_tmem(0) { + shm_ref_map().remove(tmem.shm_ref).ok_or(Errno::ENOENT)?; + } else { + return Err(Errno::EINVAL); + } + } + _ => {} + } + + Ok(()) +} + +#[derive(Clone)] +struct ShmRefInfo { + pub pages: Box<[u64]>, + pub page_offset: u64, +} + +#[derive(Clone, Copy)] +#[repr(C)] +struct ShmRefPagesData { + pub pages_list: [u64; PAGELIST_ENTRIES_PER_PAGE], + pub next_page_data: u64, +} +const PAGELIST_ENTRIES_PER_PAGE: usize = + PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); + +/// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// This data structure is for registering shared memory regions before they are +/// used during OP-TEE calls with parameters referencing shared memory. +/// Any normal memory references without this registration will be rejected. +struct ShmRefMap { + inner: spin::mutex::SpinMutex>, +} + +impl ShmRefMap { + pub fn new() -> Self { + Self { + inner: spin::mutex::SpinMutex::new(HashMap::new()), + } + } + + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), Errno> { + let mut guard = self.inner.lock(); + if guard.contains_key(&shm_ref) { + Err(Errno::EEXIST) + } else { + let _ = guard.insert(shm_ref, info); + Ok(()) + } + } + + pub fn remove(&self, shm_ref: u64) -> Option { + let mut guard = self.inner.lock(); + guard.remove(&shm_ref) + } + + #[expect(unused)] + pub fn get(&self, shm_ref: u64) -> Option { + let guard = self.inner.lock(); + guard.get(&shm_ref).cloned() + } + + pub fn register_shm(&self, phys_addr: u64, size: u64, shm_ref: u64) -> Result<(), Errno> { + let aligned_phys_addr = page_align_down(phys_addr); + let page_offset = phys_addr - aligned_phys_addr; + let aligned_size = page_align_up(page_offset + size); + let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; + let mut pages = Vec::with_capacity(num_pages); + + let mut cur_addr = aligned_phys_addr; + loop { + let Ok(pages_data) = copy_from_remote_memory::(cur_addr) else { + return Err(Errno::EFAULT); + }; + for page in &pages_data.pages_list { + if *page == 0 || pages.len() == num_pages { + break; + } else if !page.is_multiple_of(u64::try_from(PAGE_SIZE).unwrap()) { + return Err(Errno::EINVAL); + } else { + pages.push(*page); + } + } + if pages_data.next_page_data == 0 || pages.len() == num_pages { + break; + } else { + cur_addr = pages_data.next_page_data; + } + } + + self.insert( + shm_ref, + ShmRefInfo { + pages: pages.into_boxed_slice(), + page_offset, + }, + )?; + + Ok(()) + } +} + +fn shm_ref_map() -> &'static ShmRefMap { + static SHM_REF_MAP: OnceBox = OnceBox::new(); + SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) +} diff --git a/litebox_shim_optee/src/msg_proto.rs b/litebox_shim_optee/src/msg_proto.rs deleted file mode 100644 index 62b140864..000000000 --- a/litebox_shim_optee/src/msg_proto.rs +++ /dev/null @@ -1,71 +0,0 @@ -use litebox_common_linux::errno::Errno; -use litebox_common_optee::{ - OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, - OpteeSmcReturn, -}; - -// TODO: Replace these with version and build info -const OPTEE_MSG_REVISION_MAJOR: usize = 2; -const OPTEE_MSG_REVISION_MINOR: usize = 0; -const OPTEE_MSG_BUILD_ID: usize = 0; - -// TODO: Replace this with an actual UID -const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; -const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; -const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; -const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; - -// We do not support notification for now -const MAX_NOTIF_VALUE: usize = 0; -const NUM_RPC_PARMS: usize = 4; - -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { - let func_id = smc.func_id()?; - - match func_id { - OpteeSmcFunction::CallWithArg - | OpteeSmcFunction::CallWithRpcArg - | OpteeSmcFunction::CallWithRegdArg => { - // TODO: handle the contained `OpteeMsgArg` and return appropriate result - Ok(OpteeSmcResult::new(OpteeSmcReturn::Ok)) - } - OpteeSmcFunction::ExchangeCapabilities => { - // TODO: update the below when we support more features - let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM - | OpteeSecureWorldCapabilities::MEMREF_NULL - | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(OpteeSmcResult::new_exchange_capabilities( - OpteeSmcReturn::Ok, - default_cap, - MAX_NOTIF_VALUE, - NUM_RPC_PARMS, - )) - } - OpteeSmcFunction::DisableShmCache => { - // We do not support this feature - Ok(OpteeSmcResult::new_disable_shm_cache( - OpteeSmcReturn::ENotAvail, - 0, - 0, - )) - } - OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ - OPTEE_MSG_UID_0, - OPTEE_MSG_UID_1, - OPTEE_MSG_UID_2, - OPTEE_MSG_UID_3, - ])), - OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - OPTEE_MSG_BUILD_ID, - )), - OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - )), - _ => Err(Errno::EINVAL), - } -} - -pub fn halde_optee_msg_arg(_msg: &OpteeMsgArg) {} From 8048e8aa67bca2c1f76541c804c0473e4981220a Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 17:25:54 +0000 Subject: [PATCH 03/52] improve abstraction --- litebox_common_optee/src/lib.rs | 172 +++++++++++++++----------- litebox_shim_optee/src/msg_handler.rs | 61 ++++----- 2 files changed, 134 insertions(+), 99 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 0089cedac..5b9c0e43b 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1291,7 +1291,7 @@ impl OpteeMsgArg { } } -/// OP-TEE SMC call arguments. +/// A memory page to exchange OP-TEE SMC call arguments. /// OP-TEE assumes that the underlying architecture is Arm with TrustZone and /// thus it uses Secure Monitor Call (SMC) calling convention (SMCCC). /// Since we currently rely on the existing OP-TEE driver which assumes SMCCC, we translate it into @@ -1299,9 +1299,28 @@ impl OpteeMsgArg { /// Specifically, OP-TEE SMC call uses up to nine CPU registers to pass arguments. /// However, since VTL call only supports up to four parameters, we allocate a VTL0 memory page and /// exchange all arguments through that memory page. +/// TODO: Since this is LVBS-specific structure to facilitate the translation between VTL call convention, +/// we might want to move it to the `litebox_platform_lvbs` crate later. #[repr(align(4096))] #[derive(Clone, Copy)] #[repr(C)] +pub struct OpteeSmcArgsPage { + pub args: [usize; Self::NUM_OPTEE_SMC_ARGS], +} +impl OpteeSmcArgsPage { + const NUM_OPTEE_SMC_ARGS: usize = 9; +} + +impl From<&OpteeSmcArgsPage> for OpteeSmcArgs { + fn from(page: &OpteeSmcArgsPage) -> Self { + let mut smc = OpteeSmcArgs::default(); + smc.args.copy_from_slice(&page.args); + smc + } +} + +/// OP-TEE SMC call arguments. +#[derive(Clone, Copy, Default)] pub struct OpteeSmcArgs { args: [usize; Self::NUM_OPTEE_SMC_ARGS], } @@ -1309,14 +1328,6 @@ pub struct OpteeSmcArgs { impl OpteeSmcArgs { const NUM_OPTEE_SMC_ARGS: usize = 9; - pub fn arg_index(&self, index: usize) -> Option { - if index < Self::NUM_OPTEE_SMC_ARGS { - Some(self.args[index]) - } else { - None - } - } - /// Get the function ID of an OP-TEE SMC call pub fn func_id(&self) -> Result { OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK).map_err(|_| Errno::EINVAL) @@ -1368,75 +1379,94 @@ impl OpteeSmcFunction { /// OP-TEE SMC call result. /// OP-TEE SMC call uses CPU registers to pass input and output values. -/// Thus, this structure is technically equivalent to `OpteeSmcArgs`, but we separate them for clarity. -#[repr(align(4096))] -#[derive(Clone, Copy, Default)] -#[repr(C)] -pub struct OpteeSmcResult { - args: [usize; Self::NUM_OPTEE_SMC_ARGS], -} - -impl OpteeSmcResult { - const NUM_OPTEE_SMC_ARGS: usize = 9; - - pub fn new(status: OpteeSmcReturn) -> Self { - let mut res = Self::default(); - res.args[0] = status as usize; - res - } - - pub fn new_exchange_capabilities( +/// Thus, we convert this into `OpteeSmcArgs` later. +#[non_exhaustive] +pub enum OpteeSmcResult<'a> { + Generic { + status: OpteeSmcReturn, + }, + ExchangeCapabilities { status: OpteeSmcReturn, capabilities: OpteeSecureWorldCapabilities, max_notif_value: usize, data: usize, - ) -> Self { - let mut res = Self::default(); - res.args[0] = status as usize; - res.args[1] = capabilities.bits(); - res.args[2] = max_notif_value; - res.args[3] = data; - res - } - - /// # Panics - /// panics if any element of `data` cannot be converted to `usize`. - #[cfg(target_pointer_width = "64")] - pub fn new_uuid(data: &[u32; 4]) -> Self { - let mut res = Self::default(); - // OP-TEE doesn't use the high 32 bit of each argument to avoid sign extension and overflow issues. - res.args[0] = data[0] as usize; - res.args[1] = data[1] as usize; - res.args[2] = data[2] as usize; - res.args[3] = data[3] as usize; - res - } - - pub fn new_revision(major: usize, minor: usize) -> Self { - let mut res = Self::default(); - res.args[0] = major; - res.args[1] = minor; - res - } - - pub fn new_os_revision(major: usize, minor: usize, build_id: usize) -> Self { - let mut res = Self::default(); - res.args[0] = major; - res.args[1] = minor; - res.args[2] = build_id; - res - } - - pub fn new_disable_shm_cache( + }, + Uuid { + data: &'a [u32; 4], + }, + Revision { + major: usize, + minor: usize, + }, + OsRevision { + major: usize, + minor: usize, + build_id: usize, + }, + DisableShmCache { status: OpteeSmcReturn, shm_upper32: usize, shm_lower32: usize, - ) -> Self { - let mut res = Self::default(); - res.args[0] = status as usize; - res.args[1] = shm_upper32; - res.args[2] = shm_lower32; - res + }, +} + +impl From> for OpteeSmcArgs { + fn from(value: OpteeSmcResult) -> Self { + match value { + OpteeSmcResult::Generic { status } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc + } + OpteeSmcResult::ExchangeCapabilities { + status, + capabilities, + max_notif_value, + data, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = capabilities.bits(); + smc.args[2] = max_notif_value; + smc.args[3] = data; + smc + } + OpteeSmcResult::Uuid { data } => { + let mut smc = OpteeSmcArgs::default(); + for (i, arg) in smc.args.iter_mut().enumerate().take(4) { + *arg = data[i] as usize; + } + smc + } + OpteeSmcResult::Revision { major, minor } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc + } + OpteeSmcResult::OsRevision { + major, + minor, + build_id, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = major; + smc.args[1] = minor; + smc.args[2] = build_id; + smc + } + OpteeSmcResult::DisableShmCache { + status, + shm_upper32, + shm_lower32, + } => { + let mut smc = OpteeSmcArgs::default(); + smc.args[0] = status as usize; + smc.args[1] = shm_upper32; + smc.args[2] = shm_lower32; + smc + } + } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index a690228c4..24b6a8dcf 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -60,7 +60,8 @@ where Ok(()) } -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { +/// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, Errno> { let func_id = smc.func_id()?; match func_id { @@ -69,43 +70,47 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; - handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::new(OpteeSmcReturn::Ok)) + handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::Generic { + status: OpteeSmcReturn::Ok, + }) } OpteeSmcFunction::ExchangeCapabilities => { // TODO: update the below when we support more features let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM | OpteeSecureWorldCapabilities::MEMREF_NULL | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(OpteeSmcResult::new_exchange_capabilities( - OpteeSmcReturn::Ok, - default_cap, - MAX_NOTIF_VALUE, - NUM_RPC_PARMS, - )) + Ok(OpteeSmcResult::ExchangeCapabilities { + status: OpteeSmcReturn::Ok, + capabilities: default_cap, + max_notif_value: MAX_NOTIF_VALUE, + data: NUM_RPC_PARMS, + }) } OpteeSmcFunction::DisableShmCache => { // We do not support this feature - Ok(OpteeSmcResult::new_disable_shm_cache( - OpteeSmcReturn::ENotAvail, - 0, - 0, - )) + Ok(OpteeSmcResult::DisableShmCache { + status: OpteeSmcReturn::ENotAvail, + shm_upper32: 0, + shm_lower32: 0, + }) } - OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::new_uuid(&[ - OPTEE_MSG_UID_0, - OPTEE_MSG_UID_1, - OPTEE_MSG_UID_2, - OPTEE_MSG_UID_3, - ])), - OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::new_os_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - OPTEE_MSG_BUILD_ID, - )), - OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::new_revision( - OPTEE_MSG_REVISION_MAJOR, - OPTEE_MSG_REVISION_MINOR, - )), + OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ], + }), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::OsRevision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + build_id: OPTEE_MSG_BUILD_ID, + }), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::Revision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + }), _ => Err(Errno::EINVAL), } } From c7310c3ff09ee1188efae268a7385e7e1f83fb76 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 20:01:50 +0000 Subject: [PATCH 04/52] revison and add RemotePtr placeholders --- litebox_shim_optee/src/lib.rs | 1 + litebox_shim_optee/src/msg_handler.rs | 53 ++++---- litebox_shim_optee/src/remote_pointers.rs | 148 ++++++++++++++++++++++ 3 files changed, 173 insertions(+), 29 deletions(-) create mode 100644 litebox_shim_optee/src/remote_pointers.rs diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 1f1364845..b47e4eb27 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -32,6 +32,7 @@ pub mod loader; pub(crate) mod syscalls; pub mod msg_handler; +pub mod remote_pointers; const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 24b6a8dcf..82a13caac 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,6 +1,8 @@ +use crate::remote_pointers::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; +use litebox::platform::RawConstPointer; use litebox_common_linux::errno::Errno; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, @@ -35,32 +37,17 @@ fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } -// Placeholder for copying data from remote memory (e.g., VTL0 physical memory) -// TODO: Specify it in the litebox crate? -// TODO: Define a type for remote address -#[allow(clippy::unnecessary_wraps)] -fn copy_from_remote_memory(_remote_addr: u64) -> Result -where - T: Copy, -{ - // TODO: implement the actual remote copy - Ok(unsafe { core::mem::zeroed() }) -} +// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical +// address does not belong to VTL1) +pub struct Novalidation; +impl ValidateAccess for Novalidation {} -// Placeholder for copying data to remote memory (e.g., VTL0 physical memory) -// TODO: Specify it in the litebox crate? -// TODO: Define a type for remote address -#[expect(unused)] -#[allow(clippy::unnecessary_wraps)] -fn copy_to_remote_memory(_remote_addr: u64, _data: &T) -> Result<(), Errno> -where - T: Copy, -{ - // TODO: implement the actual remote copy - Ok(()) -} +pub struct Vtl0PhysAddr; +impl RemotePtrKind for Vtl0PhysAddr {} /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +/// # Panics +/// Panics if the physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, Errno> { let func_id = smc.func_id()?; @@ -69,7 +56,13 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; - let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; + let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); + let remote_ptr = + RemoteConstPtr::::from_usize(msg_arg_addr); + let msg_arg = unsafe { remote_ptr.read_at_offset(0) } + .ok_or(Errno::EFAULT)? + .into_owned(); + // let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }) @@ -195,11 +188,13 @@ impl ShmRefMap { let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; let mut pages = Vec::with_capacity(num_pages); - let mut cur_addr = aligned_phys_addr; + let mut cur_addr = usize::try_from(aligned_phys_addr).unwrap(); loop { - let Ok(pages_data) = copy_from_remote_memory::(cur_addr) else { - return Err(Errno::EFAULT); - }; + let cur_ptr = + RemoteConstPtr::::from_usize(cur_addr); + let pages_data = unsafe { cur_ptr.read_at_offset(0) } + .ok_or(Errno::EFAULT)? + .into_owned(); for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; @@ -212,7 +207,7 @@ impl ShmRefMap { if pages_data.next_page_data == 0 || pages.len() == num_pages { break; } else { - cur_addr = pages_data.next_page_data; + cur_addr = usize::try_from(pages_data.next_page_data).unwrap(); } } diff --git a/litebox_shim_optee/src/remote_pointers.rs b/litebox_shim_optee/src/remote_pointers.rs new file mode 100644 index 000000000..fed4d7e6b --- /dev/null +++ b/litebox_shim_optee/src/remote_pointers.rs @@ -0,0 +1,148 @@ +//! Placeholders for implementing remote pointer access (e.g., reading from VTL0 physical memory) +//! TODO: Improve these and move these to the litebox crate later + +use litebox::platform::{RawConstPointer, RawMutPointer}; + +pub trait ValidateAccess {} +pub trait RemotePtrKind {} + +#[repr(C)] +pub struct RemoteConstPtr { + inner: *const T, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, +} + +impl RemoteConstPtr { + pub fn from_ptr(ptr: *const T) -> Self { + Self { + inner: ptr, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, + } + } +} + +impl Clone for RemoteConstPtr { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for RemoteConstPtr {} + +impl core::fmt::Debug for RemoteConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("RemoteConstPtr").field(&self.inner).finish() + } +} + +impl RawConstPointer for RemoteConstPtr { + unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { + // TODO: read data from the remote side + let val: T = unsafe { core::mem::zeroed() }; + Some(alloc::borrow::Cow::Owned(val)) + } + + unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { + // TODO: read data from the remote side + if len == 0 { + return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); + } + let mut data = alloc::vec::Vec::new(); + data.reserve_exact(len); + unsafe { data.set_len(len) }; + Some(alloc::borrow::Cow::Owned(data)) + } + + fn as_usize(&self) -> usize { + self.inner.expose_provenance() + } + + fn from_usize(addr: usize) -> Self { + Self { + inner: core::ptr::with_exposed_provenance(addr), + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, + } + } +} + +#[repr(C)] +pub struct RemoteMutPtr { + inner: *mut T, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, +} + +impl RemoteMutPtr { + pub fn from_ptr(ptr: *mut T) -> Self { + Self { + inner: ptr, + _kind: core::marker::PhantomData, + _validator: core::marker::PhantomData, + } + } +} + +impl Clone for RemoteMutPtr { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for RemoteMutPtr {} + +impl core::fmt::Debug for RemoteMutPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("RemoteMutPtr").field(&self.inner).finish() + } +} + +impl RawConstPointer for RemoteMutPtr { + unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { + // TODO: read data from the remote side + let val: T = unsafe { core::mem::zeroed() }; + Some(alloc::borrow::Cow::Owned(val)) + } + + unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { + // TODO: read data from the remote side + if len == 0 { + return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); + } + let mut data = alloc::vec::Vec::new(); + data.reserve_exact(len); + unsafe { data.set_len(len) }; + Some(alloc::borrow::Cow::Owned(data)) + } + + fn as_usize(&self) -> usize { + self.inner.expose_provenance() + } + + fn from_usize(addr: usize) -> Self { + Self::from_ptr(core::ptr::with_exposed_provenance_mut(addr)) + } +} + +impl RawMutPointer for RemoteMutPtr { + unsafe fn write_at_offset<'a>(self, _count: isize, _value: T) -> Option<()> { + Some(()) + } + + fn mutate_subslice_with( + self, + _range: impl core::ops::RangeBounds, + _f: impl FnOnce(&mut [T]) -> R, + ) -> Option { + unimplemented!("use write_slice_at_offset instead") + } + + fn copy_from_slice(self, _start_offset: usize, _buf: &[T]) -> Option<()> + where + T: Copy, + { + Some(()) + } +} From 09db748bad12fee294f057f50350ed6ed55a8861 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 11 Dec 2025 23:31:34 +0000 Subject: [PATCH 05/52] rename --- litebox_shim_optee/src/lib.rs | 2 +- litebox_shim_optee/src/msg_handler.rs | 2 +- litebox_shim_optee/src/{remote_pointers.rs => ptr.rs} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename litebox_shim_optee/src/{remote_pointers.rs => ptr.rs} (100%) diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index b47e4eb27..57743a6e0 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -32,7 +32,7 @@ pub mod loader; pub(crate) mod syscalls; pub mod msg_handler; -pub mod remote_pointers; +pub mod ptr; const MAX_KERNEL_BUF_SIZE: usize = 0x80_000; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 82a13caac..39967eec0 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,4 +1,4 @@ -use crate::remote_pointers::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; +use crate::ptr::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; diff --git a/litebox_shim_optee/src/remote_pointers.rs b/litebox_shim_optee/src/ptr.rs similarity index 100% rename from litebox_shim_optee/src/remote_pointers.rs rename to litebox_shim_optee/src/ptr.rs From e3c6785e5ae75dc9711f50f521664ca2e75a9683 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 00:57:52 +0000 Subject: [PATCH 06/52] revision --- litebox_shim_optee/src/msg_handler.rs | 18 ++++-------------- litebox_shim_optee/src/ptr.rs | 19 ++++++++++++++++++- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 39967eec0..22a9c1b54 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,4 +1,4 @@ -use crate::ptr::{RemoteConstPtr, RemotePtrKind, ValidateAccess}; +use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; @@ -37,14 +37,6 @@ fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } -// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical -// address does not belong to VTL1) -pub struct Novalidation; -impl ValidateAccess for Novalidation {} - -pub struct Vtl0PhysAddr; -impl RemotePtrKind for Vtl0PhysAddr {} - /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// # Panics /// Panics if the physical address in `smc` cannot be converted to `usize`. @@ -57,9 +49,8 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let remote_ptr = - RemoteConstPtr::::from_usize(msg_arg_addr); - let msg_arg = unsafe { remote_ptr.read_at_offset(0) } + let ptr = NormalWorldConstPtr::::from_usize(msg_arg_addr); + let msg_arg = unsafe { ptr.read_at_offset(0) } .ok_or(Errno::EFAULT)? .into_owned(); // let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; @@ -190,8 +181,7 @@ impl ShmRefMap { let mut cur_addr = usize::try_from(aligned_phys_addr).unwrap(); loop { - let cur_ptr = - RemoteConstPtr::::from_usize(cur_addr); + let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); let pages_data = unsafe { cur_ptr.read_at_offset(0) } .ok_or(Errno::EFAULT)? .into_owned(); diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index fed4d7e6b..f77e93cc8 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -1,4 +1,5 @@ -//! Placeholders for implementing remote pointer access (e.g., reading from VTL0 physical memory) +//! Placeholders for specifying remote pointer access (e.g., reading data from +//! VTL0 physical memory) //! TODO: Improve these and move these to the litebox crate later use litebox::platform::{RawConstPointer, RawMutPointer}; @@ -146,3 +147,19 @@ impl RawMutPointer for RemoteM Some(()) } } + +// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical +// address does not belong to VTL1) +pub struct Novalidation; +impl ValidateAccess for Novalidation {} + +pub struct Vtl0PhysAddr; +impl RemotePtrKind for Vtl0PhysAddr {} + +/// Normal world const pointer type. For now, normal world implies VTL0 but it can be something else +/// including TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldConstPtr = RemoteConstPtr; + +/// Normal world mutable pointer type. For now, normal world implies VTL0 but it can be something else +/// including TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldMutPtr = RemoteMutPtr; From 03bfe782bbfac969bc11b519b3279c68b1a2e107 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 03:50:42 +0000 Subject: [PATCH 07/52] revise remote pointers --- litebox_shim_optee/src/ptr.rs | 121 ++++++++++++++++++++++------------ 1 file changed, 80 insertions(+), 41 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index f77e93cc8..9b53cab21 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -4,44 +4,58 @@ use litebox::platform::{RawConstPointer, RawMutPointer}; +// TODO: use the one from the litebox crate pub trait ValidateAccess {} -pub trait RemotePtrKind {} + +/// Trait to access a pointer to remote memory +/// For now, we only consider copying the entire value before acccessing it. +/// We do not consider byte-level access or unaligned access. +pub trait RemoteMemoryAccess { + fn read_at_offset(ptr: *mut T, count: isize) -> Option; + + fn write_at_offset(ptr: *mut T, count: isize, value: T) -> Option<()>; + + fn slice_from(ptr: *mut T, len: usize) -> Option>; + + fn copy_from_slice(start_offset: usize, buf: &[T]) -> Option<()>; +} #[repr(C)] -pub struct RemoteConstPtr { +pub struct RemoteConstPtr { inner: *const T, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } -impl RemoteConstPtr { +impl RemoteConstPtr { pub fn from_ptr(ptr: *const T) -> Self { Self { inner: ptr, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } } } -impl Clone for RemoteConstPtr { +impl Clone for RemoteConstPtr { fn clone(&self) -> Self { *self } } -impl Copy for RemoteConstPtr {} +impl Copy for RemoteConstPtr {} -impl core::fmt::Debug for RemoteConstPtr { +impl core::fmt::Debug for RemoteConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_tuple("RemoteConstPtr").field(&self.inner).finish() } } -impl RawConstPointer for RemoteConstPtr { - unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { - // TODO: read data from the remote side - let val: T = unsafe { core::mem::zeroed() }; +impl RawConstPointer + for RemoteConstPtr +{ + unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { + let val = A::read_at_offset(self.inner.cast_mut(), count)?; Some(alloc::borrow::Cow::Owned(val)) } @@ -63,47 +77,48 @@ impl RawConstPointer for Remot fn from_usize(addr: usize) -> Self { Self { inner: core::ptr::with_exposed_provenance(addr), - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } } } #[repr(C)] -pub struct RemoteMutPtr { +pub struct RemoteMutPtr { inner: *mut T, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } -impl RemoteMutPtr { +impl RemoteMutPtr { pub fn from_ptr(ptr: *mut T) -> Self { Self { inner: ptr, - _kind: core::marker::PhantomData, + _access: core::marker::PhantomData, _validator: core::marker::PhantomData, } } } -impl Clone for RemoteMutPtr { +impl Clone for RemoteMutPtr { fn clone(&self) -> Self { *self } } -impl Copy for RemoteMutPtr {} +impl Copy for RemoteMutPtr {} -impl core::fmt::Debug for RemoteMutPtr { +impl core::fmt::Debug for RemoteMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_tuple("RemoteMutPtr").field(&self.inner).finish() } } -impl RawConstPointer for RemoteMutPtr { - unsafe fn read_at_offset<'a>(self, _count: isize) -> Option> { - // TODO: read data from the remote side - let val: T = unsafe { core::mem::zeroed() }; +impl RawConstPointer + for RemoteMutPtr +{ + unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { + let val = A::read_at_offset(self.inner, count)?; Some(alloc::borrow::Cow::Owned(val)) } @@ -112,10 +127,8 @@ impl RawConstPointer for Remot if len == 0 { return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); } - let mut data = alloc::vec::Vec::new(); - data.reserve_exact(len); - unsafe { data.set_len(len) }; - Some(alloc::borrow::Cow::Owned(data)) + let data = A::slice_from(self.inner, len)?; + Some(alloc::borrow::Cow::Owned(data.into())) } fn as_usize(&self) -> usize { @@ -127,9 +140,11 @@ impl RawConstPointer for Remot } } -impl RawMutPointer for RemoteMutPtr { - unsafe fn write_at_offset<'a>(self, _count: isize, _value: T) -> Option<()> { - Some(()) +impl RawMutPointer + for RemoteMutPtr +{ + unsafe fn write_at_offset<'a>(self, count: isize, value: T) -> Option<()> { + A::write_at_offset(self.inner, count, value) } fn mutate_subslice_with( @@ -140,11 +155,11 @@ impl RawMutPointer for RemoteM unimplemented!("use write_slice_at_offset instead") } - fn copy_from_slice(self, _start_offset: usize, _buf: &[T]) -> Option<()> + fn copy_from_slice(self, start_offset: usize, buf: &[T]) -> Option<()> where T: Copy, { - Some(()) + A::copy_from_slice(start_offset, buf) } } @@ -153,13 +168,37 @@ impl RawMutPointer for RemoteM pub struct Novalidation; impl ValidateAccess for Novalidation {} -pub struct Vtl0PhysAddr; -impl RemotePtrKind for Vtl0PhysAddr {} +pub struct Vtl0PhysMemoryAccess; +impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { + fn read_at_offset(_ptr: *mut T, _count: isize) -> Option { + // TODO: read a value from VTL0 physical memory + let val: T = unsafe { core::mem::zeroed() }; + Some(val) + } + + fn write_at_offset(_ptr: *mut T, _count: isize, _value: T) -> Option<()> { + // TODO: write a value to VTL0 physical memory + Some(()) + } + + fn slice_from(_ptr: *mut T, len: usize) -> Option> { + // TODO: read a slice from VTL0 physical memory + let mut data: alloc::vec::Vec = alloc::vec::Vec::new(); + data.reserve_exact(len); + unsafe { data.set_len(len) }; + Some(data.into_boxed_slice()) + } + + fn copy_from_slice(_start_offset: usize, _buf: &[T]) -> Option<()> { + // TODO: write a slice to VTL0 physical memory + Some(()) + } +} -/// Normal world const pointer type. For now, normal world implies VTL0 but it can be something else -/// including TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldConstPtr = RemoteConstPtr; +/// Normal world const pointer type. For now, we only consider VTL0 physical memory but it can be +/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldConstPtr = RemoteConstPtr; -/// Normal world mutable pointer type. For now, normal world implies VTL0 but it can be something else -/// including TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldMutPtr = RemoteMutPtr; +/// Normal world mutable pointer type. For now, we only consider VTL0 physical memory but it can be +/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. +pub type NormalWorldMutPtr = RemoteMutPtr; From 00bd8bb2b433521562806c7a24486383ea4154aa Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 03:57:44 +0000 Subject: [PATCH 08/52] ratchet --- dev_tests/src/ratchet.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index 4841ab019..3fec680f0 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -35,7 +35,7 @@ fn ratchet_globals() -> Result<()> { ("litebox_runner_lvbs/", 3), ("litebox_runner_snp/", 1), ("litebox_shim_linux/", 1), - ("litebox_shim_optee/", 6), + ("litebox_shim_optee/", 7), ], |file| { Ok(file From 11bd6b17be163b2834b97d3ef9148711f25379dc Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 06:10:59 +0000 Subject: [PATCH 09/52] handle ta request. wip --- litebox_common_optee/src/lib.rs | 35 ++++++---- litebox_shim_optee/src/msg_handler.rs | 92 +++++++++++++++++++++------ 2 files changed, 98 insertions(+), 29 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 5b9c0e43b..34804c730 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1111,6 +1111,18 @@ pub enum OpteeMessageCommand { Unknown = 0xffff_ffff, } +impl TryFrom for UteeEntryFunc { + type Error = OpteeSmcReturn; + fn try_from(cmd: OpteeMessageCommand) -> Result { + match cmd { + OpteeMessageCommand::OpenSession => Ok(UteeEntryFunc::OpenSession), + OpteeMessageCommand::CloseSession => Ok(UteeEntryFunc::CloseSession), + OpteeMessageCommand::InvokeCommand => Ok(UteeEntryFunc::InvokeCommand), + _ => Err(OpteeSmcReturn::EBadCmd), + } + } +} + /// Temporary reference memory parameter #[derive(Clone, Copy, Debug)] #[repr(C)] @@ -1236,24 +1248,24 @@ pub struct OpteeMsgArg { pub cmd: OpteeMessageCommand, /// TA function ID which is used if `cmd == InvokeCommand`. Note that the meaning of `cmd` and `func` /// is swapped compared to TAs. - func: u32, + pub func: u32, /// Session ID. This is "IN" parameter most of the time except for `cmd == OpenSession` where /// the secure world generates and returns a session ID. - session: u32, + pub session: u32, /// Cancellation ID. This is a unique value to identify this request. - cancel_id: u32, + pub cancel_id: u32, pad: u32, /// Return value from the secure world - ret: u32, + pub ret: u32, /// Origin of the return value - ret_origin: TeeOrigin, + pub ret_origin: TeeOrigin, /// Number of parameters contained in `params` - num_params: u32, + pub num_params: u32, /// Parameters to be passed to the secure world. If `cmd == OpenSession`, the first two params contain /// a TA UUID and they are not delivered to the TA. /// Note that, originally, the length of this array is variable. We fix it to `TEE_NUM_PARAMS + 2` to /// simplify the implementation (our OP-TEE Shim supports up to four parameters as well). - params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], + pub params: [OpteeMsgParam; TEE_NUM_PARAMS + 2], } impl OpteeMsgArg { @@ -1329,21 +1341,22 @@ impl OpteeSmcArgs { const NUM_OPTEE_SMC_ARGS: usize = 9; /// Get the function ID of an OP-TEE SMC call - pub fn func_id(&self) -> Result { - OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK).map_err(|_| Errno::EINVAL) + pub fn func_id(&self) -> Result { + OpteeSmcFunction::try_from(self.args[0] & OpteeSmcFunction::MASK) + .map_err(|_| OpteeSmcReturn::EBadCmd) } /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. #[cfg(target_pointer_width = "64")] - pub fn optee_msg_arg_phys_addr(&self) -> Result { + pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. if self.args[1] & 0xffff_ffff_0000_0000 == 0 && self.args[2] & 0xffff_ffff_0000_0000 == 0 { let addr = (self.args[1] << 32) | self.args[2]; Ok(addr as u64) } else { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::EBadAddr) } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 22a9c1b54..8c38ce5e5 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -3,10 +3,9 @@ use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; use litebox::platform::RawConstPointer; -use litebox_common_linux::errno::Errno; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, - OpteeSmcResult, OpteeSmcReturn, + OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeSecureWorldCapabilities, OpteeSmcArgs, + OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, UteeEntryFunc, UteeParamOwned, }; use once_cell::race::OnceBox; @@ -40,7 +39,7 @@ fn page_align_up(len: u64) -> u64 { /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// # Panics /// Panics if the physical address in `smc` cannot be converted to `usize`. -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, Errno> { +pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; match func_id { @@ -51,10 +50,10 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result::from_usize(msg_arg_addr); let msg_arg = unsafe { ptr.read_at_offset(0) } - .ok_or(Errno::EFAULT)? + .ok_or(OpteeSmcReturn::EBadAddr)? .into_owned(); // let msg_arg = copy_from_remote_memory::(msg_arg_addr)?; - handle_optee_msg_arg(&msg_arg).map(|()| OpteeSmcResult::Generic { + handle_optee_msg_arg(&msg_arg).map(|_| OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }) } @@ -95,30 +94,82 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Err(Errno::EINVAL), + _ => Err(OpteeSmcReturn::UnknownFunction), } } -pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), Errno> { +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result { match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { if let Ok(tmem) = msg_arg.get_param_tmem(0) { shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; } else { - return Err(Errno::EINVAL); + return Err(OpteeSmcReturn::EBadAddr); } } OpteeMessageCommand::UnregisterShm => { if let Ok(tmem) = msg_arg.get_param_tmem(0) { - shm_ref_map().remove(tmem.shm_ref).ok_or(Errno::ENOENT)?; + shm_ref_map() + .remove(tmem.shm_ref) + .ok_or(OpteeSmcReturn::EBadAddr)?; } else { - return Err(Errno::EINVAL); + return Err(OpteeSmcReturn::EBadCmd); } } - _ => {} + OpteeMessageCommand::OpenSession + | OpteeMessageCommand::InvokeCommand + | OpteeMessageCommand::CloseSession => return handle_ta_request(msg_arg), + _ => { + todo!("Unimplemented OpteeMessageCommand: {:?}", msg_arg.cmd); + } + } + + Ok(*msg_arg) +} + +pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { + let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; + + let shift: usize = if ta_entry_func == UteeEntryFunc::OpenSession { + // TODO: load a TA using its UUID (if not yet loaded) + + 2 // first two params are for TA UUID + } else { + 0 + }; + let num_params = usize::try_from(msg_arg.num_params).unwrap(); + + let ta_cmd_id = msg_arg.func; + let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; + + for (i, param) in msg_arg.params[shift..shift + num_params].iter().enumerate() { + ta_params[i] = match param.attr_type() { + OpteeMsgAttrType::None => UteeParamOwned::None, + OpteeMsgAttrType::ValueInput => { + let value = msg_arg + .get_param_value(shift + i) + .map_err(|_| OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInput { + value_a: value.a, + value_b: value.b, + } + } + OpteeMsgAttrType::ValueOutput => UteeParamOwned::ValueOutput { out_address: None }, + OpteeMsgAttrType::ValueInout => { + let value = msg_arg + .get_param_value(shift + i) + .map_err(|_| OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInout { + value_a: value.a, + value_b: value.b, + out_address: None, + } + } + _ => todo!(), + } } - Ok(()) + Ok(*msg_arg) } #[derive(Clone)] @@ -151,10 +202,10 @@ impl ShmRefMap { } } - pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), Errno> { + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { let mut guard = self.inner.lock(); if guard.contains_key(&shm_ref) { - Err(Errno::EEXIST) + Err(OpteeSmcReturn::ENotAvail) } else { let _ = guard.insert(shm_ref, info); Ok(()) @@ -172,7 +223,12 @@ impl ShmRefMap { guard.get(&shm_ref).cloned() } - pub fn register_shm(&self, phys_addr: u64, size: u64, shm_ref: u64) -> Result<(), Errno> { + pub fn register_shm( + &self, + phys_addr: u64, + size: u64, + shm_ref: u64, + ) -> Result<(), OpteeSmcReturn> { let aligned_phys_addr = page_align_down(phys_addr); let page_offset = phys_addr - aligned_phys_addr; let aligned_size = page_align_up(page_offset + size); @@ -183,13 +239,13 @@ impl ShmRefMap { loop { let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); let pages_data = unsafe { cur_ptr.read_at_offset(0) } - .ok_or(Errno::EFAULT)? + .ok_or(OpteeSmcReturn::EBadAddr)? .into_owned(); for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; } else if !page.is_multiple_of(u64::try_from(PAGE_SIZE).unwrap()) { - return Err(Errno::EINVAL); + return Err(OpteeSmcReturn::EBadAddr); } else { pages.push(*page); } From f4a7103ebee2e48046f1b59497c90fcbc55a9eb2 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 16:42:28 +0000 Subject: [PATCH 10/52] support tmem and some revision --- litebox_common_optee/src/lib.rs | 60 +++++++++++++++--- litebox_shim_optee/src/msg_handler.rs | 90 +++++++++++++++++++++++---- 2 files changed, 129 insertions(+), 21 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 34804c730..b58a58bc0 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1236,6 +1236,54 @@ impl OpteeMsgParam { pub fn attr_type(&self) -> OpteeMsgAttrType { OpteeMsgAttrType::try_from(self.attr.typ()).unwrap_or(OpteeMsgAttrType::None) } + pub fn get_param_tmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_TMEM_INOUT + ) { + Some(unsafe { self.u.tmem }) + } else { + None + } + } + pub fn get_param_rmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.rmem }) + } else { + None + } + } + pub fn get_param_fmem(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT + | OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + ) { + Some(unsafe { self.u.fmem }) + } else { + None + } + } + pub fn get_param_value(&self) -> Option { + if matches!( + self.attr.typ(), + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT + | OPTEE_MSG_ATTR_TYPE_VALUE_INOUT + ) { + Some(unsafe { self.u.value }) + } else { + None + } + } } /// `optee_msg_arg` from `optee_os/core/include/optee_msg.h` @@ -1269,36 +1317,32 @@ pub struct OpteeMsgArg { } impl OpteeMsgArg { - #[cfg(target_pointer_width = "64")] pub fn get_param_tmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.tmem }) + Ok(self.params[index].get_param_tmem().ok_or(Errno::EINVAL)?) } } - #[cfg(target_pointer_width = "64")] pub fn get_param_rmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.rmem }) + Ok(self.params[index].get_param_rmem().ok_or(Errno::EINVAL)?) } } - #[cfg(target_pointer_width = "64")] pub fn get_param_fmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.fmem }) + Ok(self.params[index].get_param_fmem().ok_or(Errno::EINVAL)?) } } - #[cfg(target_pointer_width = "64")] pub fn get_param_value(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { Err(Errno::EINVAL) } else { - Ok(unsafe { self.params[index].u.value }) + Ok(self.params[index].get_param_value().ok_or(Errno::EINVAL)?) } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 8c38ce5e5..e3d6c32cb 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -4,8 +4,9 @@ use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; use litebox::platform::RawConstPointer; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeSecureWorldCapabilities, OpteeSmcArgs, - OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, UteeEntryFunc, UteeParamOwned, + OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, + OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, + UteeEntryFunc, UteeParamOwned, }; use once_cell::race::OnceBox; @@ -38,7 +39,7 @@ fn page_align_up(len: u64) -> u64 { /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// # Panics -/// Panics if the physical address in `smc` cannot be converted to `usize`. +/// Panics if the normal world physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; @@ -98,6 +99,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Result { match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { @@ -127,28 +129,31 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result Result { let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; - let shift: usize = if ta_entry_func == UteeEntryFunc::OpenSession { + let skip: usize = if ta_entry_func == UteeEntryFunc::OpenSession { // TODO: load a TA using its UUID (if not yet loaded) 2 // first two params are for TA UUID } else { 0 }; - let num_params = usize::try_from(msg_arg.num_params).unwrap(); + let num_params: usize = msg_arg.num_params.try_into().unwrap(); let ta_cmd_id = msg_arg.func; let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; - for (i, param) in msg_arg.params[shift..shift + num_params].iter().enumerate() { + // TODO: handle `out_address` + for (i, param) in msg_arg.params[skip..skip + num_params].iter().enumerate() { ta_params[i] = match param.attr_type() { OpteeMsgAttrType::None => UteeParamOwned::None, OpteeMsgAttrType::ValueInput => { - let value = msg_arg - .get_param_value(shift + i) - .map_err(|_| OpteeSmcReturn::EBadCmd)?; + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInput { value_a: value.a, value_b: value.b, @@ -156,16 +161,56 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result UteeParamOwned::ValueOutput { out_address: None }, OpteeMsgAttrType::ValueInout => { - let value = msg_arg - .get_param_value(shift + i) - .map_err(|_| OpteeSmcReturn::EBadCmd)?; + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInout { value_a: value.a, value_b: value.b, out_address: None, } } - _ => todo!(), + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { + let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let data_size: usize = tmem.size.try_into().unwrap(); + let slice = unsafe { ptr.to_cow_slice(data_size) } + .ok_or(OpteeSmcReturn::EBadAddr)? + .into_owned(); + UteeParamOwned::MemrefInput { data: slice.into() } + } else { + UteeParamOwned::None + } + } + OpteeMsgAttrType::TmemOutput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { + let buffer_size: usize = tmem.size.try_into().unwrap(); + UteeParamOwned::MemrefOutput { + buffer_size, + out_addresses: Some(Box::new([phys_addr])), + } + } else { + UteeParamOwned::None + } + } + OpteeMsgAttrType::TmemInout => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { + let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let buffer_size: usize = tmem.size.try_into().unwrap(); + let slice = unsafe { ptr.to_cow_slice(buffer_size) } + .ok_or(OpteeSmcReturn::EBadAddr)? + .into_owned(); + UteeParamOwned::MemrefInout { + data: slice.into(), + buffer_size, + out_addresses: Some(Box::new([phys_addr])), + } + } else { + UteeParamOwned::None + } + } + _ => todo!("handle OpteeMsgParamRmem"), } } @@ -273,3 +318,22 @@ fn shm_ref_map() -> &'static ShmRefMap { static SHM_REF_MAP: OnceBox = OnceBox::new(); SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } + +/// Get a normal world physical address of OP-TEE shared memory from `OpteeMsgParamTmem`. +fn get_shm_phys_addr_from_optee_msg_param_tmem(tmem: OpteeMsgParamTmem) -> Option { + if tmem.buf_ptr == 0 || tmem.size == 0 { + None + } else { + // TODO: validate this address + Some(tmem.buf_ptr.try_into().unwrap()) + } +} + +/// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. +/// All addresses must be page-aligned except possibly the first one. +/// These addresses are virtually contiguous within the normal world, but not necessarily +/// physically contiguous. +#[expect(unused)] +fn get_shm_phys_addrs_from_optee_msg_param_rmem(_rmem: OpteeMsgParamTmem) -> Option> { + None +} From 4f814dd0b901f8dab0d7eeadadbeac7a3ddb0e5c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 17:59:51 +0000 Subject: [PATCH 11/52] fix tmem and rmem handling --- litebox_shim_optee/src/msg_handler.rs | 121 +++++++++++++++++++------- 1 file changed, 88 insertions(+), 33 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index e3d6c32cb..303da7968 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -168,43 +168,89 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { - let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let data_size: usize = tmem.size.try_into().unwrap(); - let slice = unsafe { ptr.to_cow_slice(data_size) } - .ok_or(OpteeSmcReturn::EBadAddr)? - .into_owned(); + OpteeMsgAttrType::TmemInput | OpteeMsgAttrType::RmemInput => { + if let (Ok(phys_addrs), data_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { + // TODO: loop to handle scatter-gather list + // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let slice = alloc::vec![0u8; data_size]; UteeParamOwned::MemrefInput { data: slice.into() } } else { UteeParamOwned::None } } - OpteeMsgAttrType::TmemOutput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { - let buffer_size: usize = tmem.size.try_into().unwrap(); + OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { + if let (Ok(phys_addrs), buffer_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { UteeParamOwned::MemrefOutput { buffer_size, - out_addresses: Some(Box::new([phys_addr])), + out_addresses: Some(phys_addrs), } } else { UteeParamOwned::None } } - OpteeMsgAttrType::TmemInout => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - if let Some(phys_addr) = get_shm_phys_addr_from_optee_msg_param_tmem(tmem) { - let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let buffer_size: usize = tmem.size.try_into().unwrap(); - let slice = unsafe { ptr.to_cow_slice(buffer_size) } - .ok_or(OpteeSmcReturn::EBadAddr)? - .into_owned(); + OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { + if let (Ok(phys_addrs), buffer_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { + // TODO: loop to handle scatter-gather list + // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); + let slice = alloc::vec![0u8; buffer_size]; UteeParamOwned::MemrefInout { data: slice.into(), buffer_size, - out_addresses: Some(Box::new([phys_addr])), + out_addresses: Some(phys_addrs), } } else { UteeParamOwned::None @@ -223,6 +269,7 @@ struct ShmRefInfo { pub page_offset: u64, } +/// Scatter-gather list of OP-TEE shared physical pages in VTL0. #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { @@ -262,7 +309,6 @@ impl ShmRefMap { guard.remove(&shm_ref) } - #[expect(unused)] pub fn get(&self, shm_ref: u64) -> Option { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() @@ -320,20 +366,29 @@ fn shm_ref_map() -> &'static ShmRefMap { } /// Get a normal world physical address of OP-TEE shared memory from `OpteeMsgParamTmem`. -fn get_shm_phys_addr_from_optee_msg_param_tmem(tmem: OpteeMsgParamTmem) -> Option { - if tmem.buf_ptr == 0 || tmem.size == 0 { - None - } else { - // TODO: validate this address - Some(tmem.buf_ptr.try_into().unwrap()) - } +/// Note that we use this function for handing TA requests and in that context there is no +/// difference between `OpteeMsgParamTmem` and `OpteeMsgParamRmem`. +/// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. +fn get_shm_phys_addrs_from_optee_msg_param_tmem( + tmem: OpteeMsgParamTmem, +) -> Result, OpteeSmcReturn> { + let rmem = OpteeMsgParamRmem { + offs: tmem.buf_ptr, + size: tmem.size, + shm_ref: tmem.shm_ref, + }; + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem) } /// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. /// All addresses must be page-aligned except possibly the first one. /// These addresses are virtually contiguous within the normal world, but not necessarily /// physically contiguous. -#[expect(unused)] -fn get_shm_phys_addrs_from_optee_msg_param_rmem(_rmem: OpteeMsgParamTmem) -> Option> { - None +fn get_shm_phys_addrs_from_optee_msg_param_rmem( + rmem: OpteeMsgParamRmem, +) -> Result, OpteeSmcReturn> { + let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { + return Err(OpteeSmcReturn::EBadAddr); + }; + Ok(Box::new([])) } From 03439581aea12a9fd8249c0451df7ddab4b8c1e1 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 18:06:14 +0000 Subject: [PATCH 12/52] separate out handle_ta_request --- litebox_shim_optee/src/msg_handler.rs | 170 +------------------------- 1 file changed, 6 insertions(+), 164 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 303da7968..b19f9b55c 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -4,9 +4,8 @@ use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; use litebox::platform::RawConstPointer; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, - OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, - UteeEntryFunc, UteeParamOwned, + OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, + OpteeSmcResult, OpteeSmcReturn, }; use once_cell::race::OnceBox; @@ -26,13 +25,11 @@ const MAX_NOTIF_VALUE: usize = 0; const NUM_RPC_PARMS: usize = 4; #[inline] -#[cfg(target_pointer_width = "64")] fn page_align_down(address: u64) -> u64 { address & !(PAGE_SIZE as u64 - 1) } #[inline] -#[cfg(target_pointer_width = "64")] fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } @@ -130,139 +127,11 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result Result { - let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; - - let skip: usize = if ta_entry_func == UteeEntryFunc::OpenSession { - // TODO: load a TA using its UUID (if not yet loaded) - - 2 // first two params are for TA UUID - } else { - 0 - }; - let num_params: usize = msg_arg.num_params.try_into().unwrap(); - - let ta_cmd_id = msg_arg.func; - let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; - - // TODO: handle `out_address` - for (i, param) in msg_arg.params[skip..skip + num_params].iter().enumerate() { - ta_params[i] = match param.attr_type() { - OpteeMsgAttrType::None => UteeParamOwned::None, - OpteeMsgAttrType::ValueInput => { - let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; - UteeParamOwned::ValueInput { - value_a: value.a, - value_b: value.b, - } - } - OpteeMsgAttrType::ValueOutput => UteeParamOwned::ValueOutput { out_address: None }, - OpteeMsgAttrType::ValueInout => { - let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; - UteeParamOwned::ValueInout { - value_a: value.a, - value_b: value.b, - out_address: None, - } - } - OpteeMsgAttrType::TmemInput | OpteeMsgAttrType::RmemInput => { - if let (Ok(phys_addrs), data_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), - } - } { - // TODO: loop to handle scatter-gather list - // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let slice = alloc::vec![0u8; data_size]; - UteeParamOwned::MemrefInput { data: slice.into() } - } else { - UteeParamOwned::None - } - } - OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { - if let (Ok(phys_addrs), buffer_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), - } - } { - UteeParamOwned::MemrefOutput { - buffer_size, - out_addresses: Some(phys_addrs), - } - } else { - UteeParamOwned::None - } - } - OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { - if let (Ok(phys_addrs), buffer_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), - } - } { - // TODO: loop to handle scatter-gather list - // let ptr = NormalWorldConstPtr::::from_usize(phys_addr); - let slice = alloc::vec![0u8; buffer_size]; - UteeParamOwned::MemrefInout { - data: slice.into(), - buffer_size, - out_addresses: Some(phys_addrs), - } - } else { - UteeParamOwned::None - } - } - _ => todo!("handle OpteeMsgParamRmem"), - } - } - - Ok(*msg_arg) +pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result { + todo!() } +#[expect(unused)] #[derive(Clone)] struct ShmRefInfo { pub pages: Box<[u64]>, @@ -309,6 +178,7 @@ impl ShmRefMap { guard.remove(&shm_ref) } + #[expect(unused)] pub fn get(&self, shm_ref: u64) -> Option { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() @@ -364,31 +234,3 @@ fn shm_ref_map() -> &'static ShmRefMap { static SHM_REF_MAP: OnceBox = OnceBox::new(); SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } - -/// Get a normal world physical address of OP-TEE shared memory from `OpteeMsgParamTmem`. -/// Note that we use this function for handing TA requests and in that context there is no -/// difference between `OpteeMsgParamTmem` and `OpteeMsgParamRmem`. -/// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. -fn get_shm_phys_addrs_from_optee_msg_param_tmem( - tmem: OpteeMsgParamTmem, -) -> Result, OpteeSmcReturn> { - let rmem = OpteeMsgParamRmem { - offs: tmem.buf_ptr, - size: tmem.size, - shm_ref: tmem.shm_ref, - }; - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem) -} - -/// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. -/// All addresses must be page-aligned except possibly the first one. -/// These addresses are virtually contiguous within the normal world, but not necessarily -/// physically contiguous. -fn get_shm_phys_addrs_from_optee_msg_param_rmem( - rmem: OpteeMsgParamRmem, -) -> Result, OpteeSmcReturn> { - let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { - return Err(OpteeSmcReturn::EBadAddr); - }; - Ok(Box::new([])) -} From 0b2f39be9bf9b8708d91966d8ea1eca48387116a Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 18:34:34 +0000 Subject: [PATCH 13/52] get os uuid --- litebox_common_optee/src/lib.rs | 2 ++ litebox_shim_optee/src/msg_handler.rs | 23 +++++++++++++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index b58a58bc0..3c0e9500a 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1407,6 +1407,7 @@ impl OpteeSmcArgs { /// `OPTEE_SMC_FUNCID_*` from `core/arch/arm/include/sm/optee_smc.h` /// TODO: Add stuffs based on the OP-TEE driver that LVBS is using. +const OPTEE_SMC_FUNCID_GET_OS_UUID: usize = 0x0; const OPTEE_SMC_FUNCID_GET_OS_REVISION: usize = 0x1; const OPTEE_SMC_FUNCID_CALL_WITH_ARG: usize = 0x4; const OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES: usize = 0x9; @@ -1420,6 +1421,7 @@ const OPTEE_SMC_FUNCID_CALLS_REVISION: usize = 0xff03; #[derive(PartialEq, TryFromPrimitive)] #[repr(usize)] pub enum OpteeSmcFunction { + GetOsUuid = OPTEE_SMC_FUNCID_GET_OS_UUID, GetOsRevision = OPTEE_SMC_FUNCID_GET_OS_REVISION, CallWithArg = OPTEE_SMC_FUNCID_CALL_WITH_ARG, ExchangeCapabilities = OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES, diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index b19f9b55c..8a79f7e4c 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -9,17 +9,26 @@ use litebox_common_optee::{ }; use once_cell::race::OnceBox; -// TODO: Replace these with version and build info +// OP-TEE version and build info (2.0) +// TODO: Consider repacing it with our own version info const OPTEE_MSG_REVISION_MAJOR: usize = 2; const OPTEE_MSG_REVISION_MINOR: usize = 0; const OPTEE_MSG_BUILD_ID: usize = 0; -// TODO: Replace this with an actual UID +// This UID is from OP-TEE OS +// TODO: Consider replacing it with our own UID const OPTEE_MSG_UID_0: u32 = 0x384f_b3e0; const OPTEE_MSG_UID_1: u32 = 0xe7f8_11e3; const OPTEE_MSG_UID_2: u32 = 0xaf63_0002; const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; +// This is the UUID of OP-TEE Trusted OS +// TODO: Consider replacing it with our own UUID +const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x486178e0; +const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f811e3; +const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e0002; +const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5c51b; + // We do not support notification for now const MAX_NOTIF_VALUE: usize = 0; const NUM_RPC_PARMS: usize = 4; @@ -39,7 +48,6 @@ fn page_align_up(len: u64) -> u64 { /// Panics if the normal world physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; - match func_id { OpteeSmcFunction::CallWithArg | OpteeSmcFunction::CallWithRpcArg @@ -50,7 +58,6 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result(msg_arg_addr)?; handle_optee_msg_arg(&msg_arg).map(|_| OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }) @@ -75,6 +82,14 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Ok(OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_OS_OPTEE_UUID_0, + OPTEE_MSG_OS_OPTEE_UUID_1, + OPTEE_MSG_OS_OPTEE_UUID_2, + OPTEE_MSG_OS_OPTEE_UUID_3, + ], + }), OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { data: &[ OPTEE_MSG_UID_0, From c79d8f39dab820a98b0477c6576bb50ef06bf007 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 19:09:25 +0000 Subject: [PATCH 14/52] comment --- litebox_shim_optee/src/msg_handler.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 8a79f7e4c..9f650984b 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,3 +1,16 @@ +//! OP-TEE's message passing is a bit complex because it involves with multiple actors +//! (normal world: client app and driver; secure world: OP-TEE OS and TAs), +//! consists multiple layers, and relies on shared memory references (i.e., no serialization). +//! +//! Since the normal world is out of LiteBox's scope, the OP-TEE shim starts with handling +//! an OP-TEE SMC call from the normal-world OP-TEE driver which consists of +//! up to nine register values. By checking the SMC function ID, the shim determines whether +//! it is for passing an OP-TEE message or a pure SMC function call (e.g., get OP-TEE OS +//! version). If it is for passing an OP-TEE message/command, the shim accesses a normal world +//! physical address containing `OpteeMsgArg` structure (the address is contained in +//! the SMC call arguments). This `OpteeMsgArg` structure may contain references to normal +//! world physical addresses to exchange a large amount of data. Also, a certain OP-TEE +//! message/command does not involve with any TA (e.g., register shared memory). use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; @@ -10,7 +23,7 @@ use litebox_common_optee::{ use once_cell::race::OnceBox; // OP-TEE version and build info (2.0) -// TODO: Consider repacing it with our own version info +// TODO: Consider replacing it with our own version info const OPTEE_MSG_REVISION_MAJOR: usize = 2; const OPTEE_MSG_REVISION_MINOR: usize = 0; const OPTEE_MSG_BUILD_ID: usize = 0; From 102cfa90b263c572d01db5b94d181dc0823a77fc Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 12 Dec 2025 19:26:33 +0000 Subject: [PATCH 15/52] replace Errno with OpteeSmcReturn --- litebox_common_optee/src/lib.rs | 32 +++++++++++++++--------- litebox_shim_optee/src/msg_handler.rs | 35 ++++++++++++--------------- 2 files changed, 36 insertions(+), 31 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 3c0e9500a..29712db2e 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1317,32 +1317,40 @@ pub struct OpteeMsgArg { } impl OpteeMsgArg { - pub fn get_param_tmem(&self, index: usize) -> Result { + pub fn get_param_tmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_tmem().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_tmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } - pub fn get_param_rmem(&self, index: usize) -> Result { + pub fn get_param_rmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_rmem().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_rmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } - pub fn get_param_fmem(&self, index: usize) -> Result { + pub fn get_param_fmem(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_fmem().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_fmem() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } - pub fn get_param_value(&self, index: usize) -> Result { + pub fn get_param_value(&self, index: usize) -> Result { if index >= self.params.len() || index >= self.num_params as usize { - Err(Errno::EINVAL) + Err(OpteeSmcReturn::ENotAvail) } else { - Ok(self.params[index].get_param_value().ok_or(Errno::EINVAL)?) + Ok(self.params[index] + .get_param_value() + .ok_or(OpteeSmcReturn::EBadCmd)?) } } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 9f650984b..e66d36fb3 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -9,8 +9,9 @@ //! version). If it is for passing an OP-TEE message/command, the shim accesses a normal world //! physical address containing `OpteeMsgArg` structure (the address is contained in //! the SMC call arguments). This `OpteeMsgArg` structure may contain references to normal -//! world physical addresses to exchange a large amount of data. Also, a certain OP-TEE -//! message/command does not involve with any TA (e.g., register shared memory). +//! world physical addresses to exchange a large amount of data. Also, like the OP-TEE +//! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register +//! shared memory). use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; @@ -88,7 +89,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { - // We do not support this feature + // Currently, we do not support this feature. Ok(OpteeSmcResult::DisableShmCache { status: OpteeSmcReturn::ENotAvail, shm_upper32: 0, @@ -128,20 +129,14 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Result { match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { - if let Ok(tmem) = msg_arg.get_param_tmem(0) { - shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; - } else { - return Err(OpteeSmcReturn::EBadAddr); - } + let tmem = msg_arg.get_param_tmem(0)?; + shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; } OpteeMessageCommand::UnregisterShm => { - if let Ok(tmem) = msg_arg.get_param_tmem(0) { - shm_ref_map() - .remove(tmem.shm_ref) - .ok_or(OpteeSmcReturn::EBadAddr)?; - } else { - return Err(OpteeSmcReturn::EBadCmd); - } + let tmem = msg_arg.get_param_tmem(0)?; + shm_ref_map() + .remove(tmem.shm_ref) + .ok_or(OpteeSmcReturn::EBadAddr)?; } OpteeMessageCommand::OpenSession | OpteeMessageCommand::InvokeCommand @@ -166,15 +161,17 @@ struct ShmRefInfo { pub page_offset: u64, } -/// Scatter-gather list of OP-TEE shared physical pages in VTL0. +/// A scatter-gather list of OP-TEE shared physical pages in VTL0. #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { - pub pages_list: [u64; PAGELIST_ENTRIES_PER_PAGE], + pub pages_list: [u64; Self::PAGELIST_ENTRIES_PER_PAGE], pub next_page_data: u64, } -const PAGELIST_ENTRIES_PER_PAGE: usize = - PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); +impl ShmRefPagesData { + const PAGELIST_ENTRIES_PER_PAGE: usize = + PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); +} /// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. /// This data structure is for registering shared memory regions before they are From ea8335f8bfe8001e167cdaa285d5828901de9677 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Sun, 14 Dec 2025 05:26:28 +0000 Subject: [PATCH 16/52] add comments --- litebox_common_optee/src/lib.rs | 3 +++ litebox_shim_optee/src/msg_handler.rs | 14 +++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 29712db2e..b035d95b7 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1318,6 +1318,9 @@ pub struct OpteeMsgArg { impl OpteeMsgArg { pub fn get_param_tmem(&self, index: usize) -> Result { + // `self.params.len()` indicates the maximum number of parameters possible whereas `self.num_params` + // indicates the number of parameters that the message sender specifies (which must be less than or + // equal to the maximum). if index >= self.params.len() || index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index e66d36fb3..5faa4018f 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -161,7 +161,12 @@ struct ShmRefInfo { pub page_offset: u64, } -/// A scatter-gather list of OP-TEE shared physical pages in VTL0. +/// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to +/// share with the secure world (VTL1). Each [`ShmRefPagesData`] occupies one memory page +/// where `pages_list` contains a list of physical page addresses and `next_page_data` +/// contains the physical address of the next [`ShmRefPagesData`] if any. Entries of `pages_list` +/// and `next_page_data` contain zero if the list ends. These physical page addresses are +/// virtually contiguous in the normal world. All these address values must be page aligned. #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { @@ -209,6 +214,13 @@ impl ShmRefMap { guard.get(&shm_ref).cloned() } + /// This function registers shared memory information that the normal world (VTL0) provides. + /// Specifically, it walks through [`ShmRefPagesData`] structures referenced by `phys_addr` + /// to create a slice of the shared physical page addresses and registers the slice with + /// `shm_ref` as its identifier. `size` indicates the total size of this registered shared + /// memory region. Note that `phys_addr` may not be page aligned. In that case, its page-aligned + /// address points to the first [`ShmRefPagesData`] structure while its page offset indicates + /// the page offset of the first page (i.e., `pages_list[0]` of the first [`ShmRefPagesData`]). pub fn register_shm( &self, phys_addr: u64, From 16850b8f523489c3b3819023f5ff01796475cb22 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Sun, 14 Dec 2025 14:49:31 +0000 Subject: [PATCH 17/52] validate message --- litebox_common_optee/src/lib.rs | 25 +++++++++++++++++-------- litebox_shim_optee/src/msg_handler.rs | 9 +++++---- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index b035d95b7..0af2e652c 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1108,7 +1108,6 @@ pub enum OpteeMessageCommand { UnregisterShm = OPTEE_MSG_CMD_UNREGISTER_SHM, DoBottomHalf = OPTEE_MSG_CMD_DO_BOTTOM_HALF, StopAsyncNotif = OPTEE_MSG_CMD_STOP_ASYNC_NOTIF, - Unknown = 0xffff_ffff, } impl TryFrom for UteeEntryFunc { @@ -1317,11 +1316,21 @@ pub struct OpteeMsgArg { } impl OpteeMsgArg { + /// Validate the message argument structure. + pub fn validate(&self) -> Result<(), OpteeSmcReturn> { + let _ = + OpteeMessageCommand::try_from(self.cmd as u32).map_err(|_| OpteeSmcReturn::EBadCmd)?; + if self.cmd == OpteeMessageCommand::OpenSession && self.num_params < 2 { + return Err(OpteeSmcReturn::EBadCmd); + } + if self.num_params as usize > self.params.len() { + Err(OpteeSmcReturn::EBadCmd) + } else { + Ok(()) + } + } pub fn get_param_tmem(&self, index: usize) -> Result { - // `self.params.len()` indicates the maximum number of parameters possible whereas `self.num_params` - // indicates the number of parameters that the message sender specifies (which must be less than or - // equal to the maximum). - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] @@ -1330,7 +1339,7 @@ impl OpteeMsgArg { } } pub fn get_param_rmem(&self, index: usize) -> Result { - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] @@ -1339,7 +1348,7 @@ impl OpteeMsgArg { } } pub fn get_param_fmem(&self, index: usize) -> Result { - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] @@ -1348,7 +1357,7 @@ impl OpteeMsgArg { } } pub fn get_param_value(&self, index: usize) -> Result { - if index >= self.params.len() || index >= self.num_params as usize { + if index >= self.num_params as usize { Err(OpteeSmcReturn::ENotAvail) } else { Ok(self.params[index] diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 5faa4018f..ec22e082f 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -38,10 +38,10 @@ const OPTEE_MSG_UID_3: u32 = 0xa5d5_c51b; // This is the UUID of OP-TEE Trusted OS // TODO: Consider replacing it with our own UUID -const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x486178e0; -const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f811e3; -const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e0002; -const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5c51b; +const OPTEE_MSG_OS_OPTEE_UUID_0: u32 = 0x4861_78e0; +const OPTEE_MSG_OS_OPTEE_UUID_1: u32 = 0xe7f8_11e3; +const OPTEE_MSG_OS_OPTEE_UUID_2: u32 = 0xbc5e_0002; +const OPTEE_MSG_OS_OPTEE_UUID_3: u32 = 0xa5d5_c51b; // We do not support notification for now const MAX_NOTIF_VALUE: usize = 0; @@ -127,6 +127,7 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result Result { + msg_arg.validate()?; match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { let tmem = msg_arg.get_param_tmem(0)?; From 0ca522a3390acd5876686a996c0e83b8a4696f14 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 15 Dec 2025 05:07:17 +0000 Subject: [PATCH 18/52] clarification --- litebox_shim_optee/src/msg_handler.rs | 40 +++++++++++++++++---------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index ec22e082f..494346837 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -131,10 +131,27 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result { let tmem = msg_arg.get_param_tmem(0)?; - shm_ref_map().register_shm(tmem.buf_ptr, tmem.size, tmem.shm_ref)?; + if tmem.buf_ptr == 0 || tmem.size == 0 || tmem.shm_ref == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } + // `tmem.buf_ptr` embeds two different information: + // - The physical page address of the first `ShmRefPagesData` + // - The page offset of the first shared memory page (`pages_list[0]`) + let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); + let page_offset = tmem.buf_ptr - shm_ref_pages_data_phys_addr; + let aligned_size = page_align_up(page_offset + tmem.size); + shm_ref_map().register_shm( + shm_ref_pages_data_phys_addr, + page_offset, + aligned_size, + tmem.shm_ref, + )?; } OpteeMessageCommand::UnregisterShm => { let tmem = msg_arg.get_param_tmem(0)?; + if tmem.shm_ref == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } shm_ref_map() .remove(tmem.shm_ref) .ok_or(OpteeSmcReturn::EBadAddr)?; @@ -216,25 +233,21 @@ impl ShmRefMap { } /// This function registers shared memory information that the normal world (VTL0) provides. - /// Specifically, it walks through [`ShmRefPagesData`] structures referenced by `phys_addr` - /// to create a slice of the shared physical page addresses and registers the slice with - /// `shm_ref` as its identifier. `size` indicates the total size of this registered shared - /// memory region. Note that `phys_addr` may not be page aligned. In that case, its page-aligned - /// address points to the first [`ShmRefPagesData`] structure while its page offset indicates + /// Specifically, it walks through a linked list of [`ShmRefPagesData`] structures referenced by + /// `shm_ref_pages_data_phys_addr` to create a slice of the shared physical page addresses + /// and registers the slice with `shm_ref` as its identifier. `page_offset` indicates /// the page offset of the first page (i.e., `pages_list[0]` of the first [`ShmRefPagesData`]). + /// `aligned_size` indicates the page-aligned size of the shared memory region to register. pub fn register_shm( &self, - phys_addr: u64, - size: u64, + shm_ref_pages_data_phys_addr: u64, + page_offset: u64, + aligned_size: u64, shm_ref: u64, ) -> Result<(), OpteeSmcReturn> { - let aligned_phys_addr = page_align_down(phys_addr); - let page_offset = phys_addr - aligned_phys_addr; - let aligned_size = page_align_up(page_offset + size); let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; let mut pages = Vec::with_capacity(num_pages); - - let mut cur_addr = usize::try_from(aligned_phys_addr).unwrap(); + let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); let pages_data = unsafe { cur_ptr.read_at_offset(0) } @@ -263,7 +276,6 @@ impl ShmRefMap { page_offset, }, )?; - Ok(()) } } From f2ea3b89d6a185c24b20e8187196d832ffa05038 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 16 Dec 2025 18:08:57 +0000 Subject: [PATCH 19/52] get rid of recursive handler invocation --- litebox_shim_optee/src/msg_handler.rs | 119 ++++++++++++++++---------- 1 file changed, 74 insertions(+), 45 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 494346837..9a44a2506 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -58,9 +58,14 @@ fn page_align_up(len: u64) -> u64 { } /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. +/// It returns an `OpteeSmcResult` representing the result of the SMC call and +/// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE messagewhich should be handled by +/// `handle_optee_msg_arg` or `handle_ta_request`. /// # Panics /// Panics if the normal world physical address in `smc` cannot be converted to `usize`. -pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result, OpteeSmcReturn> { +pub fn handle_optee_smc_args( + smc: &mut OpteeSmcArgs, +) -> Result<(OpteeSmcResult<'_>, Option), OpteeSmcReturn> { let func_id = smc.func_id()?; match func_id { OpteeSmcFunction::CallWithArg @@ -72,61 +77,86 @@ pub fn handle_optee_smc_args(smc: &mut OpteeSmcArgs) -> Result { // TODO: update the below when we support more features let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM | OpteeSecureWorldCapabilities::MEMREF_NULL | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(OpteeSmcResult::ExchangeCapabilities { - status: OpteeSmcReturn::Ok, - capabilities: default_cap, - max_notif_value: MAX_NOTIF_VALUE, - data: NUM_RPC_PARMS, - }) + Ok(( + OpteeSmcResult::ExchangeCapabilities { + status: OpteeSmcReturn::Ok, + capabilities: default_cap, + max_notif_value: MAX_NOTIF_VALUE, + data: NUM_RPC_PARMS, + }, + None, + )) } OpteeSmcFunction::DisableShmCache => { // Currently, we do not support this feature. - Ok(OpteeSmcResult::DisableShmCache { - status: OpteeSmcReturn::ENotAvail, - shm_upper32: 0, - shm_lower32: 0, - }) + Ok(( + OpteeSmcResult::DisableShmCache { + status: OpteeSmcReturn::ENotAvail, + shm_upper32: 0, + shm_lower32: 0, + }, + None, + )) } - OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcResult::Uuid { - data: &[ - OPTEE_MSG_OS_OPTEE_UUID_0, - OPTEE_MSG_OS_OPTEE_UUID_1, - OPTEE_MSG_OS_OPTEE_UUID_2, - OPTEE_MSG_OS_OPTEE_UUID_3, - ], - }), - OpteeSmcFunction::CallsUid => Ok(OpteeSmcResult::Uuid { - data: &[ - OPTEE_MSG_UID_0, - OPTEE_MSG_UID_1, - OPTEE_MSG_UID_2, - OPTEE_MSG_UID_3, - ], - }), - OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcResult::OsRevision { - major: OPTEE_MSG_REVISION_MAJOR, - minor: OPTEE_MSG_REVISION_MINOR, - build_id: OPTEE_MSG_BUILD_ID, - }), - OpteeSmcFunction::CallsRevision => Ok(OpteeSmcResult::Revision { - major: OPTEE_MSG_REVISION_MAJOR, - minor: OPTEE_MSG_REVISION_MINOR, - }), + OpteeSmcFunction::GetOsUuid => Ok(( + OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_OS_OPTEE_UUID_0, + OPTEE_MSG_OS_OPTEE_UUID_1, + OPTEE_MSG_OS_OPTEE_UUID_2, + OPTEE_MSG_OS_OPTEE_UUID_3, + ], + }, + None, + )), + OpteeSmcFunction::CallsUid => Ok(( + OpteeSmcResult::Uuid { + data: &[ + OPTEE_MSG_UID_0, + OPTEE_MSG_UID_1, + OPTEE_MSG_UID_2, + OPTEE_MSG_UID_3, + ], + }, + None, + )), + OpteeSmcFunction::GetOsRevision => Ok(( + OpteeSmcResult::OsRevision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + build_id: OPTEE_MSG_BUILD_ID, + }, + None, + )), + OpteeSmcFunction::CallsRevision => Ok(( + OpteeSmcResult::Revision { + major: OPTEE_MSG_REVISION_MAJOR, + minor: OPTEE_MSG_REVISION_MINOR, + }, + None, + )), _ => Err(OpteeSmcReturn::UnknownFunction), } } -/// This function handles an OP-TEE message contained in `OpteeMsgArg` -pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result { +/// This function handles an OP-TEE message contained in `OpteeMsgArg`. +/// Currently, it only handles share memory registration and unregistration. +/// If an OP-TEE message involves with a TA request, it simply returns +/// `Err(OpteeSmcReturn::Ok)` while expecting that the caller will handle +/// the message with `handle_ta_request`. +pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> { msg_arg.validate()?; match msg_arg.cmd { OpteeMessageCommand::RegisterShm => { @@ -158,13 +188,12 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result return handle_ta_request(msg_arg), + | OpteeMessageCommand::CloseSession => return Err(OpteeSmcReturn::Ok), _ => { todo!("Unimplemented OpteeMessageCommand: {:?}", msg_arg.cmd); } } - - Ok(*msg_arg) + Ok(()) } /// This function handles a TA request contained in `OpteeMsgArg` From 68dd2c6dbccd41ad4246596b9ae217c88f894842 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 19 Dec 2025 00:58:08 +0000 Subject: [PATCH 20/52] some docs for physical pointer (wip) --- litebox_shim_optee/src/ptr.rs | 65 ++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 5 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 9b53cab21..b91e10584 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -1,14 +1,69 @@ -//! Placeholders for specifying remote pointer access (e.g., reading data from -//! VTL0 physical memory) +//! Physical Pointer Abstraction with On-demand Mapping +//! +//! This module implements types and traits to support accessing physical addresses +//! (e.g., VTL0 or normal-world physical memory) from LiteBox with on-demand mapping. +//! In the context of LVBS and OP-TEE, accessing physical memory is necessary +//! because VTL0 and VTL1 as well as normal world and secure world do not share +//! the same virtual address space, but they still have to share data through memory. +//! VTL1 and secure world receive physical addresses from VTL0 and normal world, +//! respectively, and they need to read from or write to those addresses. +//! +//! To simplify all these, we could persistently map the entire VTL0/normal-world +//! physical memory into VTL1/secure-world address space at once and just access them +//! through corresponding virtual addresses. Also, we could define some APIs to let +//! LiteBox (shim) map/unmap arbitrary physical addresses. However, we do not take +//! these approaches due to security concerns (e.g., data corruption or information +//! leakage due to concurrent and persistent access). +//! +//! Instead, the approach this module takes is to map the required physical memory +//! region on-demand when accessing them while using a buffer to copy data to/from +//! those regions. This way, we can ensure that data must be copied into +//! LiteBox-managed memory before being used while avoiding any unknown side effects +//! due to persistent memory mapping. +//! +//! Considerations: +//! +//! Ideally, we should be able to validate whether a given physical address is okay +//! to access or even exists in the first place. For example, accessing LiteBox's +//! own memory with this physical pointer abstraction should be prohibited. Also, +//! some device memory is mapped to certain physical address ranges and LiteBox +//! should not touch them without in-depth knowledge. However, this is a bit tricky +//! because, in many cases, LiteBox does not directly interact with the underlying +//! hardware or BIOS/UEFI. In the case of LVBS, LiteBox obtains the physical memory +//! information from VTL0 including the total physical memory size and the memory +//! range assigned to VTL1/LiteBox. Thus, this module can confirm whether a given +//! physical address belongs to VTL0's physical memory. +//! +//! This module should allow byte-level access while transparently handling page +//! mapping and data access across page boundaries. This could become complicated +//! when we consider multiple page sizes (e.g., 4KiB, 2MiB, 1GiB). Also, unaligned +//! access is matter to be considered. +//! +//! In addition, often times, this physical pointer abstraction is involved with +//! a list of physical page addresses (i.e., scatter-gather list). For example, in +//! the worse case, a two-byte data structure can span across two physical pages. +//! Thus, to enhance the performance, we may need to consider mapping multiple pages +//! at once, copy data from/to them, and unmap them later. Currently, our +//! implementation (in `litebox_platform_lvbs`) does not implement this functionality +//! yet and it just maps/unmaps one page at a time. We could define separate +//! interfaces for this functionality later (e.g., its parameter would be a slice of +//! `usize` instead of single `usize`). +//! +//! When this module needs to access data across physical page boundaries, it assumes +//! that those physical pages are virtually contiguous in VTL0 or normal-world address +//! space. Otherwise, this module could end up with accessing incorrect data. This is +//! best-effort assumption and it is the VTL0 or normal-world side's responsibility +//! (e.g., even if we always require a list of physical addresses, they can provide +//! a wrong list by mistake or intentionally). + //! TODO: Improve these and move these to the litebox crate later use litebox::platform::{RawConstPointer, RawMutPointer}; -// TODO: use the one from the litebox crate pub trait ValidateAccess {} -/// Trait to access a pointer to remote memory -/// For now, we only consider copying the entire value before acccessing it. +/// Trait to access a pointer to physical memory +/// For now, we only consider copying the entire value before accessing it. /// We do not consider byte-level access or unaligned access. pub trait RemoteMemoryAccess { fn read_at_offset(ptr: *mut T, count: isize) -> Option; From a5b31617805e1717c8ba3909adbfd5d4c8989649 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Sat, 20 Dec 2025 00:49:10 +0000 Subject: [PATCH 21/52] improve phys ptr abstraction (wip) --- litebox_shim_optee/src/ptr.rs | 291 ++++++++++++++++++++++++++++++---- 1 file changed, 258 insertions(+), 33 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index b91e10584..eb19b1711 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -11,56 +11,76 @@ //! To simplify all these, we could persistently map the entire VTL0/normal-world //! physical memory into VTL1/secure-world address space at once and just access them //! through corresponding virtual addresses. Also, we could define some APIs to let -//! LiteBox (shim) map/unmap arbitrary physical addresses. However, we do not take -//! these approaches due to security concerns (e.g., data corruption or information -//! leakage due to concurrent and persistent access). +//! LiteBox (shim) map/unmap arbitrary physical addresses (i.e., implementing and +//! exposing APIs like Linux kernel's `vmap()` and `vunmap()`). However, this module +//! does not take these approaches due to scalability (e.g., how to deal with a system +//! with terabytes of physical memory?) and security concerns (e.g., data corruption or +//! information leakage due to concurrent and persistent access). //! //! Instead, the approach this module takes is to map the required physical memory //! region on-demand when accessing them while using a buffer to copy data to/from -//! those regions. This way, we can ensure that data must be copied into +//! those regions. This way, this module can ensure that data must be copied into //! LiteBox-managed memory before being used while avoiding any unknown side effects //! due to persistent memory mapping. //! //! Considerations: //! -//! Ideally, we should be able to validate whether a given physical address is okay -//! to access or even exists in the first place. For example, accessing LiteBox's -//! own memory with this physical pointer abstraction should be prohibited. Also, -//! some device memory is mapped to certain physical address ranges and LiteBox -//! should not touch them without in-depth knowledge. However, this is a bit tricky -//! because, in many cases, LiteBox does not directly interact with the underlying -//! hardware or BIOS/UEFI. In the case of LVBS, LiteBox obtains the physical memory -//! information from VTL0 including the total physical memory size and the memory -//! range assigned to VTL1/LiteBox. Thus, this module can confirm whether a given -//! physical address belongs to VTL0's physical memory. +//! Ideally, this module should be able to validate whether a given physical address +//! is okay to access or even exists in the first place. For example, accessing +//! LiteBox's own memory with this physical pointer abstraction must be prohibited to +//! prevent the Boomerang attack. Also, some device memory is mapped to certain +//! physical address ranges and LiteBox should not touch them without in-depth +//! knowledge. However, this is a bit tricky because, in many cases, LiteBox does +//! not directly interact with the underlying hardware or BIOS/UEFI. In the case of +//! LVBS, LiteBox obtains the physical memory information from VTL0 including the +//! total physical memory size and the memory range assigned to VTL1/LiteBox. +//! Thus, this module can at least confirm a given physical address does not belong +//! to VTL1's physical memory. //! //! This module should allow byte-level access while transparently handling page //! mapping and data access across page boundaries. This could become complicated -//! when we consider multiple page sizes (e.g., 4KiB, 2MiB, 1GiB). Also, unaligned -//! access is matter to be considered. +//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, +//! unaligned access is matter to be considered. //! //! In addition, often times, this physical pointer abstraction is involved with -//! a list of physical page addresses (i.e., scatter-gather list). For example, in -//! the worse case, a two-byte data structure can span across two physical pages. -//! Thus, to enhance the performance, we may need to consider mapping multiple pages -//! at once, copy data from/to them, and unmap them later. Currently, our -//! implementation (in `litebox_platform_lvbs`) does not implement this functionality -//! yet and it just maps/unmaps one page at a time. We could define separate -//! interfaces for this functionality later (e.g., its parameter would be a slice of -//! `usize` instead of single `usize`). +//! a list of physical addresses (i.e., scatter-gather list). For example, in +//! the worse case, a two-byte value can span across two non-contiguous physical +//! pages. Thus, to enhance the performance, we may need to consider mapping +//! multiple pages at once, copy data from/to them, and unmap them later. Currently, +//! our implementation (in `litebox_platform_lvbs`) does not implement this +//! functionality yet and it just maps/unmaps one page at a time (this works but is +//! inefficient). //! //! When this module needs to access data across physical page boundaries, it assumes //! that those physical pages are virtually contiguous in VTL0 or normal-world address //! space. Otherwise, this module could end up with accessing incorrect data. This is -//! best-effort assumption and it is the VTL0 or normal-world side's responsibility -//! (e.g., even if we always require a list of physical addresses, they can provide +//! best-effort assumption and ensuring this is the caller's responsibility (e.g., even +//! if this module always requires a list of physical addresses, the caller can provide //! a wrong list by mistake or intentionally). -//! TODO: Improve these and move these to the litebox crate later - use litebox::platform::{RawConstPointer, RawMutPointer}; - -pub trait ValidateAccess {} +use thiserror::Error; + +/// Trait to validate that a physical pointer does not belong to LiteBox-managed memory +/// (including both kernel and userspace memory). +/// +/// This validation is mainly to deal with the Boomerang attack where a normal-world client +/// tricks the secure-world kernel (i.e., LiteBox) to access the secure-world memory. +/// However, even if there is no such threat (e.g., no normal/secure world separation), +/// this validation is still beneficial to ensure the memory safety. +/// +/// Succeeding these operations does not guarantee that the physical pointer is valid to +/// access, just that it is outside of LiteBox-managed memory and won't be used to access +/// it as an unmanaged channel. +pub trait ValidateAccess { + /// Validate that the given physical pointer does not belong to LiteBox-managed memory. + /// + /// Here, we do not use `*const T` or `*mut T` because this is a physical pointer which + /// must not be dereferenced directly. + /// + /// Returns `Some(pa)` if valid. If the pointer is not valid, returns `None`. + fn validate(pa: usize) -> Result; +} /// Trait to access a pointer to physical memory /// For now, we only consider copying the entire value before accessing it. @@ -75,6 +95,189 @@ pub trait RemoteMemoryAccess { fn copy_from_slice(start_offset: usize, buf: &[T]) -> Option<()>; } +/// Data structure for an array of physical pages. These physical pages should be +/// virtually contiguous in the source address space. +#[derive(Clone)] +pub struct PhysPageArray(alloc::boxed::Box<[usize]>); + +impl PhysPageArray<4096> { + /// Create a new `PhysPageArray` from the given slice of physical addresses. + pub fn try_from_slice(addrs: &[usize]) -> Result { + for addr in addrs { + if !addr.is_multiple_of(4096) { + return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, 4096)); + } + } + Ok(Self(addrs.into())) + } +} + +/// Data structure to maintain the mapping information returned by `vmap()`. +/// `base` is the virtual address of the mapped region which is page aligned. +/// `size` is the size of the mapped region in bytes. +#[derive(Clone)] +pub struct PhysPageMapInfo { + pub base: *mut u8, + pub size: usize, +} + +bitflags::bitflags! { + /// Physical page map permissions which is a restricted version of + /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. + /// + /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED + /// permissions are explicitly prohibited. + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub struct PhysPageMapPermissions: u8 { + /// Readable + const READ = 1 << 0; + /// Writable + const WRITE = 1 << 1; + } +} + +/// Trait to map and unmap physical pages into virtually contiguous address space. +/// +/// The implementation of this trait is platform-specific because it depends on how +/// the underlying platform manages page tables and memory regions. +pub trait PhysPageMapper { + /// Map the given [`PhysPageArray`] into virtually contiguous address space with the given + /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. + /// This function is analogous to Linux kernel's `vmap()`. + /// + /// # Safety + /// + /// The caller must ensure that `pages` are not in active use. LiteBox itself cannot fully guarantee this + /// and it needs some helps from the caller, hypervisor, or hardware. + unsafe fn vmap( + pages: PhysPageArray, + perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError>; + /// Unmap the previously mapped virtually contiguous address space ([`PhysPageMapInfo`]). + /// This function is analogous to Linux kernel's `vunmap()`. + /// + /// # Safety + /// + /// The caller must ensure that the virtual addresses belonging to `vmap_info` are not in active use. + /// Like `vmap()`, LiteBox itself cannot fully guarantee this and it needs some helps from other parties. + unsafe fn vunmap( + vmap_info: PhysPageMapInfo, + ) -> Result<(), PhysPointerError>; +} + +/// Represent a physical pointer to a read-only object. +/// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in +/// this array should be virtually contiguous. +/// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. +/// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough +/// memory for an object of type `T`. +/// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses +#[derive(Clone)] +#[repr(C)] +pub struct PhysConstPtr { + pages: PhysPageArray, + offset: usize, + map_info: Option>, + _type: core::marker::PhantomData, + _mapper: core::marker::PhantomData, + _validator: core::marker::PhantomData, +} + +impl + PhysConstPtr +{ + /// Create a new `PhysConstPtr` from the given physical page array and offset. + pub fn try_from_page_array( + pages: PhysPageArray, + offset: usize, + ) -> Result { + if offset >= ALIGN { + return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); + } + let size = if pages.0.is_empty() { + 0 + } else { + ALIGN - offset + (pages.0.len() - 1) * ALIGN + }; + if size < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + size, + core::mem::size_of::(), + )); + } + for pa in &pages.0 { + V::validate::(*pa)?; + } + Ok(Self { + pages, + offset, + map_info: None, + _type: core::marker::PhantomData, + _mapper: core::marker::PhantomData, + _validator: core::marker::PhantomData, + }) + } + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// The caller must ensure that `pa`, ..., `pa+len` are both physically and virtually contiguous. + pub fn try_from_contiguous_pages(pa: usize, len: usize) -> Result { + if len < core::mem::size_of::() { + return Err(PhysPointerError::InsufficientPhysicalPages( + len, + core::mem::size_of::(), + )); + } + let start_page = pa - (pa % ALIGN); + let end_page = pa + len; + let end_page_aligned = if end_page.is_multiple_of(ALIGN) { + end_page + } else { + end_page + (ALIGN - (end_page % ALIGN)) + }; + let mut pages = alloc::vec::Vec::new(); + let mut current_page = start_page; + while current_page < end_page_aligned { + V::validate::(current_page)?; + pages.push(current_page); + current_page += ALIGN; + } + Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) + } + /// Map the physical pages if not already mapped. + fn map(&mut self) -> Result<(), PhysPointerError> { + if self.map_info.is_none() { + unsafe { + self.map_info = Some(M::vmap(self.pages.clone(), PhysPageMapPermissions::READ)?); + } + } + Ok(()) + } + /// Unmap the physical pages if mapped. + fn unmap(&mut self) -> Result<(), PhysPointerError> { + if let Some(map_info) = self.map_info.take() { + unsafe { + M::vunmap(map_info)?; + } + self.map_info = None; + } + Ok(()) + } + pub fn as_usize(&mut self) -> Result { + todo!() + } + pub fn from_usize(&mut self, addr: usize) -> Result<(), PhysPointerError> { + todo!() + } +} + +impl core::fmt::Debug for PhysConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysConstPtr") + .field("pages", &self.pages.0) + .field("offset", &self.offset) + .finish_non_exhaustive() + } +} + #[repr(C)] pub struct RemoteConstPtr { inner: *const T, @@ -218,10 +421,14 @@ impl RawMutPointer } } -// TODO: implement a validation mechanism for VTL0 physical addresses (e.g., ensure this physical -// address does not belong to VTL1) +// TODO: Sample no-op implementations to be removed. Implement a validation mechanism for +// VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) pub struct Novalidation; -impl ValidateAccess for Novalidation {} +impl ValidateAccess for Novalidation { + fn validate(pa: usize) -> Result { + Ok(pa) + } +} pub struct Vtl0PhysMemoryAccess; impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { @@ -250,6 +457,24 @@ impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { } } +/// Possible errors for physical page access +#[non_exhaustive] +#[derive(Error, Debug)] +pub enum PhysPointerError { + #[error("Physical address {0:#x} is invalid to access")] + InvalidPhysicalAddress(usize), + #[error("Physical address {0:#x} is not aligned to {1} bytes")] + UnalignedPhysicalAddress(usize, usize), + #[error("Offset {0:#x} is not aligned to {1} bytes")] + UnalignedOffset(usize, usize), + #[error("Base offset {0:#x} is greater than or equal to alignment ({1} bytes)")] + InvalidBaseOffset(usize, usize), + #[error( + "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" + )] + InsufficientPhysicalPages(usize, usize), +} + /// Normal world const pointer type. For now, we only consider VTL0 physical memory but it can be /// something else like TrustZone normal world, other VMPL or TD partition, or other processes. pub type NormalWorldConstPtr = RemoteConstPtr; From 0723e9d204189b0cb7c8277bc09d5fe36e0b9a5c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 18:38:53 +0000 Subject: [PATCH 22/52] checkpoint --- litebox_shim_optee/src/msg_handler.rs | 21 +- litebox_shim_optee/src/ptr.rs | 352 ++++++++++---------------- 2 files changed, 150 insertions(+), 223 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 9a44a2506..b5af78f3b 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -12,11 +12,10 @@ //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register //! shared memory). -use crate::ptr::NormalWorldConstPtr; +use crate::ptr::NormalWorldPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; -use litebox::platform::RawConstPointer; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, @@ -73,15 +72,14 @@ pub fn handle_optee_smc_args( | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let ptr = NormalWorldConstPtr::::from_usize(msg_arg_addr); - let msg_arg = unsafe { ptr.read_at_offset(0) } - .ok_or(OpteeSmcReturn::EBadAddr)? - .into_owned(); + let mut ptr = NormalWorldPtr::::try_from_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; Ok(( OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }, - Some(msg_arg), + Some(*msg_arg), )) } OpteeSmcFunction::ExchangeCapabilities => { @@ -278,10 +276,11 @@ impl ShmRefMap { let mut pages = Vec::with_capacity(num_pages); let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { - let cur_ptr = NormalWorldConstPtr::::from_usize(cur_addr); - let pages_data = unsafe { cur_ptr.read_at_offset(0) } - .ok_or(OpteeSmcReturn::EBadAddr)? - .into_owned(); + let mut cur_ptr = + NormalWorldPtr::::try_from_usize(cur_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let pages_data = + unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index eb19b1711..6c7a91c5b 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -58,7 +58,7 @@ //! if this module always requires a list of physical addresses, the caller can provide //! a wrong list by mistake or intentionally). -use litebox::platform::{RawConstPointer, RawMutPointer}; +use litebox::platform::page_mgmt::MemoryRegionPermissions; use thiserror::Error; /// Trait to validate that a physical pointer does not belong to LiteBox-managed memory @@ -82,19 +82,6 @@ pub trait ValidateAccess { fn validate(pa: usize) -> Result; } -/// Trait to access a pointer to physical memory -/// For now, we only consider copying the entire value before accessing it. -/// We do not consider byte-level access or unaligned access. -pub trait RemoteMemoryAccess { - fn read_at_offset(ptr: *mut T, count: isize) -> Option; - - fn write_at_offset(ptr: *mut T, count: isize, value: T) -> Option<()>; - - fn slice_from(ptr: *mut T, len: usize) -> Option>; - - fn copy_from_slice(start_offset: usize, buf: &[T]) -> Option<()>; -} - /// Data structure for an array of physical pages. These physical pages should be /// virtually contiguous in the source address space. #[derive(Clone)] @@ -135,6 +122,18 @@ bitflags::bitflags! { const WRITE = 1 << 1; } } +impl From for PhysPageMapPermissions { + fn from(perms: MemoryRegionPermissions) -> Self { + let mut phys_perms = PhysPageMapPermissions::empty(); + if perms.contains(MemoryRegionPermissions::READ) { + phys_perms |= PhysPageMapPermissions::READ; + } + if perms.contains(MemoryRegionPermissions::WRITE) { + phys_perms |= PhysPageMapPermissions::WRITE; + } + phys_perms + } +} /// Trait to map and unmap physical pages into virtually contiguous address space. /// @@ -165,18 +164,20 @@ pub trait PhysPageMapper { ) -> Result<(), PhysPointerError>; } -/// Represent a physical pointer to a read-only object. +/// Represent a physical pointer to an object with on-demand mapping. /// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in /// this array should be virtually contiguous. /// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. +/// - `count`: The number of objects of type `T` that can be accessed from this pointer. /// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough /// memory for an object of type `T`. /// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses #[derive(Clone)] #[repr(C)] -pub struct PhysConstPtr { +pub struct PhysMappedPtr { pages: PhysPageArray, offset: usize, + count: usize, map_info: Option>, _type: core::marker::PhantomData, _mapper: core::marker::PhantomData, @@ -184,9 +185,9 @@ pub struct PhysConstPtr { } impl - PhysConstPtr + PhysMappedPtr { - /// Create a new `PhysConstPtr` from the given physical page array and offset. + /// Create a new `PhysMappedPtr` from the given physical page array and offset. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -211,23 +212,24 @@ impl Ok(Self { pages, offset, + count: size / core::mem::size_of::(), map_info: None, _type: core::marker::PhantomData, _mapper: core::marker::PhantomData, _validator: core::marker::PhantomData, }) } - /// Create a new `PhysConstPtr` from the given contiguous physical address and length. - /// The caller must ensure that `pa`, ..., `pa+len` are both physically and virtually contiguous. - pub fn try_from_contiguous_pages(pa: usize, len: usize) -> Result { - if len < core::mem::size_of::() { + /// Create a new `PhysMappedPtr` from the given contiguous physical address and length. + /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( - len, + bytes, core::mem::size_of::(), )); } let start_page = pa - (pa % ALIGN); - let end_page = pa + len; + let end_page = pa + bytes; let end_page_aligned = if end_page.is_multiple_of(ALIGN) { end_page } else { @@ -242,218 +244,140 @@ impl } Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) } + /// Create a new `PhysMappedPtr` from the given physical address for a single object. + /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + pub fn try_from_usize(pa: usize) -> Result { + Self::try_from_contiguous_pages(pa, core::mem::size_of::()) + } + /// Read the value at the given type-aware offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + let val = { + let mut buffer = core::mem::MaybeUninit::::uninit(); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(addr, buffer.as_mut_ptr(), 1); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + addr.cast::(), + buffer.as_mut_ptr().cast::(), + core::mem::size_of::(), + ); + } + } + unsafe { buffer.assume_init() } + }; + self.unmap_all()?; + Ok(alloc::boxed::Box::new(val)) + } + /// Write the value at the given type-aware offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn write_at_offset( + &mut self, + count: usize, + value: T, + ) -> Result<(), PhysPointerError> { + if count >= self.count { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { core::ptr::write(addr, value) }; + } else { + unsafe { core::ptr::write_unaligned(addr, value) }; + } + self.unmap_all()?; + Ok(()) + } /// Map the physical pages if not already mapped. - fn map(&mut self) -> Result<(), PhysPointerError> { + fn map_all(&mut self, perms: PhysPageMapPermissions) -> Result<(), PhysPointerError> { if self.map_info.is_none() { unsafe { - self.map_info = Some(M::vmap(self.pages.clone(), PhysPageMapPermissions::READ)?); + self.map_info = Some(M::vmap(self.pages.clone(), perms)?); } + Ok(()) + } else { + Err(PhysPointerError::AlreadyMapped(self.pages.0[0])) } - Ok(()) } /// Unmap the physical pages if mapped. - fn unmap(&mut self) -> Result<(), PhysPointerError> { + fn unmap_all(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { M::vunmap(map_info)?; } self.map_info = None; + Ok(()) + } else { + Err(PhysPointerError::Unmapped(self.pages.0[0])) } - Ok(()) - } - pub fn as_usize(&mut self) -> Result { - todo!() - } - pub fn from_usize(&mut self, addr: usize) -> Result<(), PhysPointerError> { - todo!() } } -impl core::fmt::Debug for PhysConstPtr { +impl core::fmt::Debug for PhysMappedPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("PhysConstPtr") + f.debug_struct("PhysMappedPtr") .field("pages", &self.pages.0) .field("offset", &self.offset) .finish_non_exhaustive() } } -#[repr(C)] -pub struct RemoteConstPtr { - inner: *const T, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, -} - -impl RemoteConstPtr { - pub fn from_ptr(ptr: *const T) -> Self { - Self { - inner: ptr, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, - } - } -} - -impl Clone for RemoteConstPtr { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for RemoteConstPtr {} - -impl core::fmt::Debug for RemoteConstPtr { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_tuple("RemoteConstPtr").field(&self.inner).finish() - } -} - -impl RawConstPointer - for RemoteConstPtr -{ - unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { - let val = A::read_at_offset(self.inner.cast_mut(), count)?; - Some(alloc::borrow::Cow::Owned(val)) - } - - unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { - // TODO: read data from the remote side - if len == 0 { - return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); - } - let mut data = alloc::vec::Vec::new(); - data.reserve_exact(len); - unsafe { data.set_len(len) }; - Some(alloc::borrow::Cow::Owned(data)) - } - - fn as_usize(&self) -> usize { - self.inner.expose_provenance() - } - - fn from_usize(addr: usize) -> Self { - Self { - inner: core::ptr::with_exposed_provenance(addr), - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, - } - } -} - -#[repr(C)] -pub struct RemoteMutPtr { - inner: *mut T, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, -} - -impl RemoteMutPtr { - pub fn from_ptr(ptr: *mut T) -> Self { - Self { - inner: ptr, - _access: core::marker::PhantomData, - _validator: core::marker::PhantomData, - } - } -} - -impl Clone for RemoteMutPtr { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for RemoteMutPtr {} - -impl core::fmt::Debug for RemoteMutPtr { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_tuple("RemoteMutPtr").field(&self.inner).finish() - } -} - -impl RawConstPointer - for RemoteMutPtr -{ - unsafe fn read_at_offset<'a>(self, count: isize) -> Option> { - let val = A::read_at_offset(self.inner, count)?; - Some(alloc::borrow::Cow::Owned(val)) - } - - unsafe fn to_cow_slice<'a>(self, len: usize) -> Option> { - // TODO: read data from the remote side - if len == 0 { - return Some(alloc::borrow::Cow::Owned(alloc::vec::Vec::new())); - } - let data = A::slice_from(self.inner, len)?; - Some(alloc::borrow::Cow::Owned(data.into())) - } - - fn as_usize(&self) -> usize { - self.inner.expose_provenance() - } - - fn from_usize(addr: usize) -> Self { - Self::from_ptr(core::ptr::with_exposed_provenance_mut(addr)) - } -} - -impl RawMutPointer - for RemoteMutPtr -{ - unsafe fn write_at_offset<'a>(self, count: isize, value: T) -> Option<()> { - A::write_at_offset(self.inner, count, value) - } - - fn mutate_subslice_with( - self, - _range: impl core::ops::RangeBounds, - _f: impl FnOnce(&mut [T]) -> R, - ) -> Option { - unimplemented!("use write_slice_at_offset instead") - } - - fn copy_from_slice(self, start_offset: usize, buf: &[T]) -> Option<()> - where - T: Copy, - { - A::copy_from_slice(start_offset, buf) - } -} - // TODO: Sample no-op implementations to be removed. Implement a validation mechanism for // VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) -pub struct Novalidation; -impl ValidateAccess for Novalidation { +pub struct NoValidation; +impl ValidateAccess for NoValidation { fn validate(pa: usize) -> Result { Ok(pa) } } -pub struct Vtl0PhysMemoryAccess; -impl RemoteMemoryAccess for Vtl0PhysMemoryAccess { - fn read_at_offset(_ptr: *mut T, _count: isize) -> Option { - // TODO: read a value from VTL0 physical memory - let val: T = unsafe { core::mem::zeroed() }; - Some(val) - } - - fn write_at_offset(_ptr: *mut T, _count: isize, _value: T) -> Option<()> { - // TODO: write a value to VTL0 physical memory - Some(()) - } - - fn slice_from(_ptr: *mut T, len: usize) -> Option> { - // TODO: read a slice from VTL0 physical memory - let mut data: alloc::vec::Vec = alloc::vec::Vec::new(); - data.reserve_exact(len); - unsafe { data.set_len(len) }; - Some(data.into_boxed_slice()) +pub struct MockPhysMemoryMapper; +impl PhysPageMapper for MockPhysMemoryMapper { + unsafe fn vmap( + pages: PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError> { + Ok(PhysPageMapInfo { + base: core::ptr::null_mut(), + size: pages.0.len() * ALIGN, + }) } - - fn copy_from_slice(_start_offset: usize, _buf: &[T]) -> Option<()> { - // TODO: write a slice to VTL0 physical memory - Some(()) + unsafe fn vunmap( + _vmap_info: PhysPageMapInfo, + ) -> Result<(), PhysPointerError> { + Ok(()) } } @@ -473,12 +397,16 @@ pub enum PhysPointerError { "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" )] InsufficientPhysicalPages(usize, usize), + #[error("Index {0} is out of bounds (count: {1})")] + IndexOutOfBounds(usize, usize), + #[error("Physical address {0:#x} is already mapped")] + AlreadyMapped(usize), + #[error("Physical address {0:#x} is unmapped")] + Unmapped(usize), + #[error("No mapping information available")] + NoMappingInfo, } -/// Normal world const pointer type. For now, we only consider VTL0 physical memory but it can be -/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldConstPtr = RemoteConstPtr; - -/// Normal world mutable pointer type. For now, we only consider VTL0 physical memory but it can be -/// something else like TrustZone normal world, other VMPL or TD partition, or other processes. -pub type NormalWorldMutPtr = RemoteMutPtr; +/// Normal world pointer type using MockPhysMemoryMapper for testing purposes. +pub type NormalWorldPtr = + PhysMappedPtr; From ebe939366635211e01384825f40b882bc2158339 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 19:00:30 +0000 Subject: [PATCH 23/52] separate const and mut ptrs --- litebox_shim_optee/src/msg_handler.rs | 9 +-- litebox_shim_optee/src/ptr.rs | 83 +++++++++++++++++++++++---- 2 files changed, 77 insertions(+), 15 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index b5af78f3b..89533a1ac 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -12,7 +12,7 @@ //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register //! shared memory). -use crate::ptr::NormalWorldPtr; +use crate::ptr::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; @@ -72,8 +72,9 @@ pub fn handle_optee_smc_args( | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let mut ptr = NormalWorldPtr::::try_from_usize(msg_arg_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut ptr = + NormalWorldConstPtr::::try_from_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; Ok(( OpteeSmcResult::Generic { @@ -277,7 +278,7 @@ impl ShmRefMap { let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { let mut cur_ptr = - NormalWorldPtr::::try_from_usize(cur_addr) + NormalWorldConstPtr::::try_from_usize(cur_addr) .map_err(|_| OpteeSmcReturn::EBadAddr)?; let pages_data = unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 6c7a91c5b..3e9eb91a0 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -174,7 +174,7 @@ pub trait PhysPageMapper { /// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses #[derive(Clone)] #[repr(C)] -pub struct PhysMappedPtr { +pub struct PhysMutPtr { pages: PhysPageArray, offset: usize, count: usize, @@ -185,9 +185,9 @@ pub struct PhysMappedPtr { } impl - PhysMappedPtr + PhysMutPtr { - /// Create a new `PhysMappedPtr` from the given physical page array and offset. + /// Create a new `PhysMutPtr` from the given physical page array and offset. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -219,7 +219,7 @@ impl _validator: core::marker::PhantomData, }) } - /// Create a new `PhysMappedPtr` from the given contiguous physical address and length. + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { @@ -244,7 +244,7 @@ impl } Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) } - /// Create a new `PhysMappedPtr` from the given physical address for a single object. + /// Create a new `PhysMutPtr` from the given physical address for a single object. /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) @@ -345,15 +345,72 @@ impl } } -impl core::fmt::Debug for PhysMappedPtr { +impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("PhysMappedPtr") - .field("pages", &self.pages.0) + f.debug_struct("PhysMutPtr") + .field("pages[0]", &self.pages.0[0]) .field("offset", &self.offset) .finish_non_exhaustive() } } +/// Represent a physical pointer to a read-only object. This wraps around [`PhysMutPtr`] and +/// exposes only read access. +#[derive(Clone)] +#[repr(C)] +pub struct PhysConstPtr { + inner: PhysMutPtr, +} +impl + PhysConstPtr +{ + /// Create a new `PhysConstPtr` from the given physical page array and offset. + pub fn try_from_page_array( + pages: PhysPageArray, + offset: usize, + ) -> Result { + Ok(Self { + inner: PhysMutPtr::try_from_page_array(pages, offset)?, + }) + } + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, + }) + } + /// Create a new `PhysConstPtr` from the given physical address for a single object. + /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + pub fn try_from_usize(pa: usize) -> Result { + Ok(Self { + inner: PhysMutPtr::try_from_usize(pa)?, + }) + } + /// Read the value at the given type-aware offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_at_offset( + &mut self, + count: usize, + ) -> Result, PhysPointerError> { + unsafe { self.inner.read_at_offset(count) } + } +} + +impl core::fmt::Debug for PhysConstPtr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PhysConstPtr") + .field("pages[0]", &self.inner.pages.0[0]) + .field("offset", &self.inner.offset) + .finish_non_exhaustive() + } +} + // TODO: Sample no-op implementations to be removed. Implement a validation mechanism for // VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) pub struct NoValidation; @@ -407,6 +464,10 @@ pub enum PhysPointerError { NoMappingInfo, } -/// Normal world pointer type using MockPhysMemoryMapper for testing purposes. -pub type NormalWorldPtr = - PhysMappedPtr; +/// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. +pub type NormalWorldConstPtr = + PhysConstPtr; + +/// Normal world mutable pointer type using MockPhysMemoryMapper for testing purposes. +pub type NormalWorldMutPtr = + PhysMutPtr; From 88e215434f14b25ab1403d17ac62e88efdb75c90 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 21:31:09 +0000 Subject: [PATCH 24/52] read/write slice --- litebox_shim_optee/src/ptr.rs | 201 +++++++++++++++++++++++++++++----- 1 file changed, 171 insertions(+), 30 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 3e9eb91a0..4d01e3efe 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -61,6 +61,16 @@ use litebox::platform::page_mgmt::MemoryRegionPermissions; use thiserror::Error; +#[inline] +fn align_down(address: usize, align: usize) -> usize { + address & !(align - 1) +} + +#[inline] +fn align_up(len: usize, align: usize) -> usize { + len.next_multiple_of(align) +} + /// Trait to validate that a physical pointer does not belong to LiteBox-managed memory /// (including both kernel and userspace memory). /// @@ -85,17 +95,29 @@ pub trait ValidateAccess { /// Data structure for an array of physical pages. These physical pages should be /// virtually contiguous in the source address space. #[derive(Clone)] -pub struct PhysPageArray(alloc::boxed::Box<[usize]>); - -impl PhysPageArray<4096> { +pub struct PhysPageArray { + inner: alloc::boxed::Box<[usize]>, +} +impl PhysPageArray { /// Create a new `PhysPageArray` from the given slice of physical addresses. pub fn try_from_slice(addrs: &[usize]) -> Result { for addr in addrs { - if !addr.is_multiple_of(4096) { - return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, 4096)); + if !addr.is_multiple_of(ALIGN) { + return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); } } - Ok(Self(addrs.into())) + Ok(Self { + inner: alloc::boxed::Box::from(addrs), + }) + } + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn iter(&self) -> impl Iterator { + self.inner.iter() } } @@ -134,6 +156,18 @@ impl From for PhysPageMapPermissions { phys_perms } } +impl From for MemoryRegionPermissions { + fn from(perms: PhysPageMapPermissions) -> Self { + let mut mem_perms = MemoryRegionPermissions::empty(); + if perms.contains(PhysPageMapPermissions::READ) { + mem_perms |= MemoryRegionPermissions::READ; + } + if perms.contains(PhysPageMapPermissions::WRITE) { + mem_perms |= MemoryRegionPermissions::WRITE; + } + mem_perms + } +} /// Trait to map and unmap physical pages into virtually contiguous address space. /// @@ -195,10 +229,14 @@ impl if offset >= ALIGN { return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); } - let size = if pages.0.is_empty() { + let size = if pages.is_empty() { 0 } else { - ALIGN - offset + (pages.0.len() - 1) * ALIGN + pages + .len() + .checked_mul(ALIGN) + .ok_or(PhysPointerError::Overflow)? + - offset }; if size < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( @@ -206,7 +244,7 @@ impl core::mem::size_of::(), )); } - for pa in &pages.0 { + for pa in pages.iter() { V::validate::(*pa)?; } Ok(Self { @@ -228,28 +266,26 @@ impl core::mem::size_of::(), )); } - let start_page = pa - (pa % ALIGN); - let end_page = pa + bytes; - let end_page_aligned = if end_page.is_multiple_of(ALIGN) { - end_page - } else { - end_page + (ALIGN - (end_page % ALIGN)) - }; - let mut pages = alloc::vec::Vec::new(); + let start_page = align_down(pa, ALIGN); + let end_page = align_up( + pa.checked_add(bytes).ok_or(PhysPointerError::Overflow)?, + ALIGN, + ); + let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; - while current_page < end_page_aligned { + while current_page < end_page { V::validate::(current_page)?; pages.push(current_page); current_page += ALIGN; } - Self::try_from_page_array(PhysPageArray(pages.into()), pa - start_page) + Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) } - /// Read the value at the given type-aware offset from the physical pointer. + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// @@ -287,10 +323,51 @@ impl } unsafe { buffer.assume_init() } }; - self.unmap_all()?; + self.unmap()?; Ok(alloc::boxed::Box::new(val)) } - /// Write the value at the given type-aware offset to the physical pointer. + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(addr, values.as_mut_ptr(), values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + addr.cast::(), + values.as_mut_ptr().cast::(), + core::mem::size_of_val(values), + ); + } + } + self.unmap()?; + Ok(()) + } + /// Write the value at the given offset to the physical pointer. /// /// # Safety /// @@ -317,7 +394,48 @@ impl } else { unsafe { core::ptr::write_unaligned(addr, value) }; } - self.unmap_all()?; + self.unmap()?; + Ok(()) + } + /// Write a slice of values at the given offset to the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn write_slice_at_offset( + &mut self, + count: usize, + values: &[T], + ) -> Result<(), PhysPointerError> { + if count + .checked_add(values.len()) + .is_none_or(|end| end > self.count) + { + return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); + } + self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let Some(map_info) = &self.map_info else { + return Err(PhysPointerError::NoMappingInfo); + }; + let addr = unsafe { map_info.base.add(self.offset) } + .cast::() + .wrapping_add(count); + if (addr as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { + core::ptr::copy_nonoverlapping(values.as_ptr(), addr, values.len()); + } + } else { + unsafe { + core::ptr::copy_nonoverlapping( + values.as_ptr().cast::(), + addr.cast::(), + core::mem::size_of_val(values), + ); + } + } + self.unmap()?; Ok(()) } /// Map the physical pages if not already mapped. @@ -328,11 +446,13 @@ impl } Ok(()) } else { - Err(PhysPointerError::AlreadyMapped(self.pages.0[0])) + Err(PhysPointerError::AlreadyMapped( + self.pages.iter().next().copied().unwrap_or(0), + )) } } /// Unmap the physical pages if mapped. - fn unmap_all(&mut self) -> Result<(), PhysPointerError> { + fn unmap(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { M::vunmap(map_info)?; @@ -340,7 +460,9 @@ impl self.map_info = None; Ok(()) } else { - Err(PhysPointerError::Unmapped(self.pages.0[0])) + Err(PhysPointerError::Unmapped( + self.pages.iter().next().copied().unwrap_or(0), + )) } } } @@ -348,7 +470,7 @@ impl impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") - .field("pages[0]", &self.pages.0[0]) + .field("pages[0]", &self.pages.iter().next().copied().unwrap_or(0)) .field("offset", &self.offset) .finish_non_exhaustive() } @@ -387,7 +509,7 @@ impl inner: PhysMutPtr::try_from_usize(pa)?, }) } - /// Read the value at the given type-aware offset from the physical pointer. + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// @@ -400,12 +522,29 @@ impl ) -> Result, PhysPointerError> { unsafe { self.inner.read_at_offset(count) } } + /// Read a slice of values at the given offset from the physical pointer. + /// + /// # Safety + /// + /// The caller should be aware that the given physical address might be concurrently accessed by + /// other entities (e.g., the normal world kernel) if there is no extra security mechanism + /// in place (e.g., by the hypervisor or hardware). + pub unsafe fn read_slice_at_offset( + &mut self, + count: usize, + values: &mut [T], + ) -> Result<(), PhysPointerError> { + unsafe { self.inner.read_slice_at_offset(count, values) } + } } impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") - .field("pages[0]", &self.inner.pages.0[0]) + .field( + "pages[0]", + &self.inner.pages.iter().next().copied().unwrap_or(0), + ) .field("offset", &self.inner.offset) .finish_non_exhaustive() } @@ -428,7 +567,7 @@ impl PhysPageMapper for MockPhysMemoryMapper { ) -> Result, PhysPointerError> { Ok(PhysPageMapInfo { base: core::ptr::null_mut(), - size: pages.0.len() * ALIGN, + size: pages.iter().count() * ALIGN, }) } unsafe fn vunmap( @@ -462,6 +601,8 @@ pub enum PhysPointerError { Unmapped(usize), #[error("No mapping information available")] NoMappingInfo, + #[error("Overflow occurred during calculation")] + Overflow, } /// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. From 4a2a7b328ceff000249f26b9c6201e070ddea715 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 22 Dec 2025 23:31:46 +0000 Subject: [PATCH 25/52] revised --- dev_tests/src/ratchet.rs | 1 + litebox_shim_optee/src/ptr.rs | 266 ++++++++++++++++++++++++++-------- 2 files changed, 205 insertions(+), 62 deletions(-) diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index 3fec680f0..21b3725fc 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -65,6 +65,7 @@ fn ratchet_maybe_uninit() -> Result<()> { ("litebox_platform_linux_userland/", 3), ("litebox_platform_lvbs/", 5), ("litebox_shim_linux/", 5), + ("litebox_shim_optee/", 1), ], |file| { Ok(file diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 4d01e3efe..4988a656c 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -15,49 +15,49 @@ //! exposing APIs like Linux kernel's `vmap()` and `vunmap()`). However, this module //! does not take these approaches due to scalability (e.g., how to deal with a system //! with terabytes of physical memory?) and security concerns (e.g., data corruption or -//! information leakage due to concurrent and persistent access). +//! information leakage due to concurrent or persistent access). //! //! Instead, the approach this module takes is to map the required physical memory -//! region on-demand when accessing them while using a buffer to copy data to/from -//! those regions. This way, this module can ensure that data must be copied into -//! LiteBox-managed memory before being used while avoiding any unknown side effects -//! due to persistent memory mapping. +//! region on-demand when accessing them while using a LiteBox-managed buffer to copy +//! data to/from those regions. This way, this module can ensure that data must be +//! copied into LiteBox-managed memory before being used while avoiding any unknown +//! side effects due to persistent memory mapping. //! //! Considerations: //! //! Ideally, this module should be able to validate whether a given physical address //! is okay to access or even exists in the first place. For example, accessing //! LiteBox's own memory with this physical pointer abstraction must be prohibited to -//! prevent the Boomerang attack. Also, some device memory is mapped to certain -//! physical address ranges and LiteBox should not touch them without in-depth -//! knowledge. However, this is a bit tricky because, in many cases, LiteBox does -//! not directly interact with the underlying hardware or BIOS/UEFI. In the case of -//! LVBS, LiteBox obtains the physical memory information from VTL0 including the -//! total physical memory size and the memory range assigned to VTL1/LiteBox. -//! Thus, this module can at least confirm a given physical address does not belong -//! to VTL1's physical memory. +//! prevent the Boomerang attack and any other undefined memory access. Also, some +//! device memory is mapped to certain physical address ranges and LiteBox should not +//! touch them without in-depth knowledge. However, this is a bit tricky because, in +//! many cases, LiteBox does not directly interact with the underlying hardware or +//! BIOS/UEFI such that it does not have complete knowledge of the physical memory +//! layout. In the case of LVBS, LiteBox obtains the physical memory information +//! from VTL0 including the total physical memory size and the memory range assigned +//! to VTL1/LiteBox. Thus, this module can at least confirm a given physical address +//! does not belong to VTL1's physical memory. //! //! This module should allow byte-level access while transparently handling page //! mapping and data access across page boundaries. This could become complicated -//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, +//! when we consider multiple page sizes (e.g., 4 KiB, 2 MiB, 1 GiB). Also, //! unaligned access is matter to be considered. //! //! In addition, often times, this physical pointer abstraction is involved with //! a list of physical addresses (i.e., scatter-gather list). For example, in //! the worse case, a two-byte value can span across two non-contiguous physical -//! pages. Thus, to enhance the performance, we may need to consider mapping -//! multiple pages at once, copy data from/to them, and unmap them later. Currently, -//! our implementation (in `litebox_platform_lvbs`) does not implement this -//! functionality yet and it just maps/unmaps one page at a time (this works but is -//! inefficient). +//! pages (the last byte of the first page and the first byte of the second page). +//! Thus, to enhance the performance, we may need to consider mapping multiple pages +//! at once, copy data from/to them, and unmap them later. //! //! When this module needs to access data across physical page boundaries, it assumes //! that those physical pages are virtually contiguous in VTL0 or normal-world address -//! space. Otherwise, this module could end up with accessing incorrect data. This is +//! space. Otherwise, this module could end up with accessing unrelated data. This is //! best-effort assumption and ensuring this is the caller's responsibility (e.g., even -//! if this module always requires a list of physical addresses, the caller can provide -//! a wrong list by mistake or intentionally). +//! if this module always requires a list of physical addresses, the caller might +//! provide a wrong list by mistake or intentionally). +use core::ops::Deref; use litebox::platform::page_mgmt::MemoryRegionPermissions; use thiserror::Error; @@ -77,7 +77,8 @@ fn align_up(len: usize, align: usize) -> usize { /// This validation is mainly to deal with the Boomerang attack where a normal-world client /// tricks the secure-world kernel (i.e., LiteBox) to access the secure-world memory. /// However, even if there is no such threat (e.g., no normal/secure world separation), -/// this validation is still beneficial to ensure the memory safety. +/// this validation is still beneficial to ensure the memory safety by doing not access +/// LiteBox-managed memory without going through its memory allocator. /// /// Succeeding these operations does not guarantee that the physical pointer is valid to /// access, just that it is outside of LiteBox-managed memory and won't be used to access @@ -88,7 +89,7 @@ pub trait ValidateAccess { /// Here, we do not use `*const T` or `*mut T` because this is a physical pointer which /// must not be dereferenced directly. /// - /// Returns `Some(pa)` if valid. If the pointer is not valid, returns `None`. + /// Returns `Ok(pa)` if valid. If the pointer is not valid, returns `Err(PhysPointerError)`. fn validate(pa: usize) -> Result; } @@ -100,6 +101,8 @@ pub struct PhysPageArray { } impl PhysPageArray { /// Create a new `PhysPageArray` from the given slice of physical addresses. + /// + /// All page addresses must be aligned to `ALIGN`. pub fn try_from_slice(addrs: &[usize]) -> Result { for addr in addrs { if !addr.is_multiple_of(ALIGN) { @@ -110,14 +113,33 @@ impl PhysPageArray { inner: alloc::boxed::Box::from(addrs), }) } + /// Check if the array is empty. pub fn is_empty(&self) -> bool { self.inner.is_empty() } + /// Return the number of physical pages in the array. pub fn len(&self) -> usize { self.inner.len() } - pub fn iter(&self) -> impl Iterator { - self.inner.iter() + /// Return the first physical address in the array if exists. + pub fn first(&self) -> Option { + self.inner.first().copied() + } +} +impl core::iter::Iterator for PhysPageArray { + type Item = usize; + fn next(&mut self) -> Option { + if self.inner.is_empty() { + None + } else { + Some(self.inner[0]) + } + } +} +impl core::ops::Deref for PhysPageArray { + type Target = [usize]; + fn deref(&self) -> &Self::Target { + &self.inner } } @@ -176,23 +198,31 @@ impl From for MemoryRegionPermissions { pub trait PhysPageMapper { /// Map the given [`PhysPageArray`] into virtually contiguous address space with the given /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. + /// /// This function is analogous to Linux kernel's `vmap()`. /// /// # Safety /// - /// The caller must ensure that `pages` are not in active use. LiteBox itself cannot fully guarantee this - /// and it needs some helps from the caller, hypervisor, or hardware. + /// The caller should ensure that `pages` are not in active use by other entities. LiteBox + /// itself cannot fully guarantee this and it needs some helps from the caller, hypervisor, + /// or hardware. + /// Multiple LiteBox threads might concurrently call this function with overlapping physical + /// pages, so the implementation should safely handle such cases. unsafe fn vmap( pages: PhysPageArray, perms: PhysPageMapPermissions, ) -> Result, PhysPointerError>; /// Unmap the previously mapped virtually contiguous address space ([`PhysPageMapInfo`]). + /// /// This function is analogous to Linux kernel's `vunmap()`. /// /// # Safety /// - /// The caller must ensure that the virtual addresses belonging to `vmap_info` are not in active use. - /// Like `vmap()`, LiteBox itself cannot fully guarantee this and it needs some helps from other parties. + /// The caller should ensure that the virtual addresses belonging to `vmap_info` are not in + /// active use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this + /// and it needs some helps from other parties. + /// Multiple LiteBox threads might concurrently call this function with overlapping physical + /// pages, so the implementation should safely handle such cases. unsafe fn vunmap( vmap_info: PhysPageMapInfo, ) -> Result<(), PhysPointerError>; @@ -222,6 +252,9 @@ impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. + /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -258,6 +291,8 @@ impl }) } /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { @@ -281,7 +316,10 @@ impl Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. + /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) } @@ -291,7 +329,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That it, it might read corrupt data. pub unsafe fn read_at_offset( &mut self, count: usize, @@ -299,8 +337,24 @@ impl if count >= self.count { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -323,7 +377,9 @@ impl } unsafe { buffer.assume_init() } }; - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(alloc::boxed::Box::new(val)) } /// Read a slice of values at the given offset from the physical pointer. @@ -332,7 +388,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( &mut self, count: usize, @@ -344,8 +400,24 @@ impl { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range(start, end, PhysPageMapPermissions::READ)?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -364,7 +436,9 @@ impl ); } } - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(()) } /// Write the value at the given offset to the physical pointer. @@ -373,7 +447,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_at_offset( &mut self, count: usize, @@ -382,8 +456,28 @@ impl if count >= self.count { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of::()).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -394,7 +488,9 @@ impl } else { unsafe { core::ptr::write_unaligned(addr, value) }; } - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(()) } /// Write a slice of values at the given offset to the physical pointer. @@ -403,7 +499,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_slice_at_offset( &mut self, count: usize, @@ -415,8 +511,28 @@ impl { return Err(PhysPointerError::IndexOutOfBounds(count, self.count)); } - self.map_all(PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE)?; + let skip = self + .offset + .checked_add( + count + .checked_mul(core::mem::size_of::()) + .ok_or(PhysPointerError::Overflow)?, + ) + .ok_or(PhysPointerError::Overflow)?; + let start = skip / ALIGN; + let end = (skip + core::mem::size_of_val(values)).div_ceil(ALIGN); + unsafe { + self.map_range( + start, + end, + PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE, + )?; + } + // Don't forget to call unmap() before returning to the caller let Some(map_info) = &self.map_info else { + unsafe { + self.unmap()?; + } return Err(PhysPointerError::NoMappingInfo); }; let addr = unsafe { map_info.base.add(self.offset) } @@ -435,24 +551,45 @@ impl ); } } - self.unmap()?; + unsafe { + self.unmap()?; + } Ok(()) } - /// Map the physical pages if not already mapped. - fn map_all(&mut self, perms: PhysPageMapPermissions) -> Result<(), PhysPointerError> { + /// Map the physical pages from `start` to `end` indexes. + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn map_range( + &mut self, + start: usize, + end: usize, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + if start >= end || end > self.pages.len() { + return Err(PhysPointerError::IndexOutOfBounds(end, self.pages.len())); + } if self.map_info.is_none() { + let sub_pages = PhysPageArray::try_from_slice(&self.pages.deref()[start..end])?; unsafe { - self.map_info = Some(M::vmap(self.pages.clone(), perms)?); + self.map_info = Some(M::vmap(sub_pages, perms)?); } Ok(()) } else { Err(PhysPointerError::AlreadyMapped( - self.pages.iter().next().copied().unwrap_or(0), + self.pages.first().unwrap_or(0), )) } } /// Unmap the physical pages if mapped. - fn unmap(&mut self) -> Result<(), PhysPointerError> { + /// + /// # Safety + /// + /// This function assumes that the underlying platform safely handles concurrent mapping/unmapping + /// requests for the same physical pages. + unsafe fn unmap(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { M::vunmap(map_info)?; @@ -460,9 +597,7 @@ impl self.map_info = None; Ok(()) } else { - Err(PhysPointerError::Unmapped( - self.pages.iter().next().copied().unwrap_or(0), - )) + Err(PhysPointerError::Unmapped(self.pages.first().unwrap_or(0))) } } } @@ -470,7 +605,7 @@ impl impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") - .field("pages[0]", &self.pages.iter().next().copied().unwrap_or(0)) + .field("pages[0]", &self.pages.first().unwrap_or(0)) .field("offset", &self.offset) .finish_non_exhaustive() } @@ -486,7 +621,10 @@ pub struct PhysConstPtr { impl PhysConstPtr { - /// Create a new `PhysConstPtr` from the given physical page array and offset. + /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// + /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. + /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -495,15 +633,20 @@ impl inner: PhysMutPtr::try_from_page_array(pages, offset)?, }) } - /// Create a new `PhysConstPtr` from the given contiguous physical address and length. + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + /// + /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, }) } - /// Create a new `PhysConstPtr` from the given physical address for a single object. + /// Create a new `PhysMutPtr` from the given physical address for a single object. + /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// + /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. pub fn try_from_usize(pa: usize) -> Result { Ok(Self { inner: PhysMutPtr::try_from_usize(pa)?, @@ -515,7 +658,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_at_offset( &mut self, count: usize, @@ -528,7 +671,7 @@ impl /// /// The caller should be aware that the given physical address might be concurrently accessed by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( &mut self, count: usize, @@ -541,17 +684,14 @@ impl impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") - .field( - "pages[0]", - &self.inner.pages.iter().next().copied().unwrap_or(0), - ) + .field("pages[0]", &self.inner.pages.first().unwrap_or(0)) .field("offset", &self.inner.offset) .finish_non_exhaustive() } } -// TODO: Sample no-op implementations to be removed. Implement a validation mechanism for -// VTL0 physical addresses (e.g., ensure this physical address does not belong to VTL1) +/// This is a mock implementation that does no validation. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `ValidateAccess` implementation. pub struct NoValidation; impl ValidateAccess for NoValidation { fn validate(pa: usize) -> Result { @@ -559,6 +699,8 @@ impl ValidateAccess for NoValidation { } } +/// This is a mock implementation that does no actual mapping. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. pub struct MockPhysMemoryMapper; impl PhysPageMapper for MockPhysMemoryMapper { unsafe fn vmap( From 1e2b158570b761c25d387c4eabdf5fe00e4b1ded Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 23 Dec 2025 15:50:29 +0000 Subject: [PATCH 26/52] check page contiguity --- litebox_shim_optee/src/ptr.rs | 78 ++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 4988a656c..701c1f3bb 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -109,6 +109,9 @@ impl PhysPageArray { return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); } } + // TODO: Remove this check once our platform implementations support virtually + // contiguous non-contiguous physical page mapping. + Self::check_contiguity(addrs)?; Ok(Self { inner: alloc::boxed::Box::from(addrs), }) @@ -125,6 +128,21 @@ impl PhysPageArray { pub fn first(&self) -> Option { self.inner.first().copied() } + /// Checks whether the given physical addresses are contiguous with respect to ALIGN. + /// + /// Note: This is a temporary check to let this module work with our platform implementations + /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that + /// do not support non-contiguous physical page mapping with contiguous virtual addresses. + fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0]; + let second = window[1]; + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) + } } impl core::iter::Iterator for PhysPageArray { type Item = usize; @@ -690,35 +708,6 @@ impl core::fmt::Debug for PhysConstPtr(pa: usize) -> Result { - Ok(pa) - } -} - -/// This is a mock implementation that does no actual mapping. Each platform which supports -/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. -pub struct MockPhysMemoryMapper; -impl PhysPageMapper for MockPhysMemoryMapper { - unsafe fn vmap( - pages: PhysPageArray, - _perms: PhysPageMapPermissions, - ) -> Result, PhysPointerError> { - Ok(PhysPageMapInfo { - base: core::ptr::null_mut(), - size: pages.iter().count() * ALIGN, - }) - } - unsafe fn vunmap( - _vmap_info: PhysPageMapInfo, - ) -> Result<(), PhysPointerError> { - Ok(()) - } -} - /// Possible errors for physical page access #[non_exhaustive] #[derive(Error, Debug)] @@ -745,6 +734,37 @@ pub enum PhysPointerError { NoMappingInfo, #[error("Overflow occurred during calculation")] Overflow, + #[error("Non-contiguous physical pages in the array")] + NonContiguousPages, +} + +/// This is a mock implementation that does no validation. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `ValidateAccess` implementation. +pub struct NoValidation; +impl ValidateAccess for NoValidation { + fn validate(pa: usize) -> Result { + Ok(pa) + } +} + +/// This is a mock implementation that does no actual mapping. Each platform which supports +/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. +pub struct MockPhysMemoryMapper; +impl PhysPageMapper for MockPhysMemoryMapper { + unsafe fn vmap( + _pages: PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result, PhysPointerError> { + Ok(PhysPageMapInfo { + base: core::ptr::null_mut(), + size: 0, + }) + } + unsafe fn vunmap( + _vmap_info: PhysPageMapInfo, + ) -> Result<(), PhysPointerError> { + Ok(()) + } } /// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. From 27152aa1510613ae96a5c2221502581852cdfefd Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 23 Dec 2025 23:23:02 +0000 Subject: [PATCH 27/52] VmapProvider --- Cargo.lock | 1 + litebox/src/platform/mod.rs | 1 + litebox/src/platform/vmap.rs | 207 ++++++++++++++ litebox_common_optee/Cargo.toml | 1 + litebox_platform_linux_userland/src/lib.rs | 26 ++ litebox_platform_lvbs/src/lib.rs | 21 ++ litebox_shim_optee/src/lib.rs | 3 + litebox_shim_optee/src/msg_handler.rs | 5 +- litebox_shim_optee/src/ptr.rs | 300 ++------------------- 9 files changed, 292 insertions(+), 273 deletions(-) create mode 100644 litebox/src/platform/vmap.rs diff --git a/Cargo.lock b/Cargo.lock index 2e9714a7a..05e678aa8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -799,6 +799,7 @@ dependencies = [ "litebox_common_linux", "modular-bitfield", "num_enum", + "thiserror", ] [[package]] diff --git a/litebox/src/platform/mod.rs b/litebox/src/platform/mod.rs index b05783629..c12d42958 100644 --- a/litebox/src/platform/mod.rs +++ b/litebox/src/platform/mod.rs @@ -7,6 +7,7 @@ pub mod common_providers; pub mod page_mgmt; pub mod trivial_providers; +pub mod vmap; #[cfg(test)] pub(crate) mod mock; diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs new file mode 100644 index 000000000..029a4292f --- /dev/null +++ b/litebox/src/platform/vmap.rs @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +use crate::platform::page_mgmt::MemoryRegionPermissions; +use thiserror::Error; + +/// A provider to map and unmap physical pages with virtually contiguous addresses. +/// +/// `ALIGN`: The page frame size. +/// +/// This provider is written to implement `litebox_shim_optee::ptr::PhysMutPtr` and +/// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need +/// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). +pub trait VmapProvider { + /// Data structure for an array of physical pages which are virtually contiguous. + type PhysPageArray; + /// Data structure to maintain the mapping information returned by `vmap()`. + type PhysPageMapInfo; + /// Map the given [`PhysPageArray`] into virtually contiguous addresses with the given + /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function + /// expects that it can access and update the page table using `&self`. + /// + /// This function is analogous to Linux kernel's `vmap()`. + /// + /// # Safety + /// + /// The caller should ensure that `pages` are not in active use by other entities. + /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps + /// from the caller, hypervisor, or hardware. + /// Multiple LiteBox threads might concurrently call this function (and `vunmap()`) with + /// overlapping physical pages, so the implementation should safely handle such cases. + unsafe fn vmap( + &self, + pages: Self::PhysPageArray, + perms: PhysPageMapPermissions, + ) -> Result; + /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). + /// Use `&self` to access and update the page table. + /// + /// This function is analogous to Linux kernel's `vunmap()`. + /// + /// # Safety + /// + /// The caller should ensure that the virtual addresses in `vmap_info` are not in active + /// use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this and + /// it needs some helps from other parties. + /// Multiple LiteBox threads might concurrently call this function (and `vmap()`) with + /// overlapping physical pages, so the implementation should safely handle such cases. + unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; + /// Validate that the given physical address (with type) does not belong to LiteBox-managed + /// memory. Use `&self` to get the memory layout of the platform (i.e., the physical memory + /// range assigned to LiteBox). + /// + /// This function does not use `*const T` or `*mut T` because it deals with a physical address + /// which must not be dereferenced directly. + /// + /// Returns `Ok(pa)` if valid. If the address is not valid, returns `Err(PhysPointerError)`. + fn validate(&self, pa: usize) -> Result; +} + +/// Data structure for an array of physical pages. These physical pages should be virtually contiguous. +#[derive(Clone)] +pub struct PhysPageArray { + inner: alloc::boxed::Box<[usize]>, +} +impl PhysPageArray { + /// Create a new `PhysPageArray` from the given slice of physical addresses. + /// + /// All page addresses must be aligned to `ALIGN`. + pub fn try_from_slice(addrs: &[usize]) -> Result { + for addr in addrs { + if !addr.is_multiple_of(ALIGN) { + return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); + } + } + // TODO: Remove this check once our platform implementations support virtually + // contiguous non-contiguous physical page mapping. + Self::check_contiguity(addrs)?; + Ok(Self { + inner: alloc::boxed::Box::from(addrs), + }) + } + /// Check if the array is empty. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + /// Return the number of physical pages in the array. + pub fn len(&self) -> usize { + self.inner.len() + } + /// Return the first physical address in the array if exists. + pub fn first(&self) -> Option { + self.inner.first().copied() + } + /// Checks whether the given physical addresses are contiguous with respect to ALIGN. + /// + /// Note: This is a temporary check to let this module work with our platform implementations + /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that + /// do not support non-contiguous physical page mapping with contiguous virtual addresses. + fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0]; + let second = window[1]; + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) + } +} +impl core::iter::Iterator for PhysPageArray { + type Item = usize; + fn next(&mut self) -> Option { + if self.inner.is_empty() { + None + } else { + Some(self.inner[0]) + } + } +} +impl core::ops::Deref for PhysPageArray { + type Target = [usize]; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +/// Data structure to maintain the mapping information returned by `vmap()`. +/// +/// `base` is the virtual address of the mapped region which is page aligned. +/// `size` is the size of the mapped region in bytes. +#[derive(Clone)] +pub struct PhysPageMapInfo { + pub base: *mut u8, + pub size: usize, +} + +bitflags::bitflags! { + /// Physical page map permissions which is a restricted version of + /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. + /// + /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED + /// permissions are explicitly prohibited. + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub struct PhysPageMapPermissions: u8 { + /// Readable + const READ = 1 << 0; + /// Writable + const WRITE = 1 << 1; + } +} +impl From for PhysPageMapPermissions { + fn from(perms: MemoryRegionPermissions) -> Self { + let mut phys_perms = PhysPageMapPermissions::empty(); + if perms.contains(MemoryRegionPermissions::READ) { + phys_perms |= PhysPageMapPermissions::READ; + } + if perms.contains(MemoryRegionPermissions::WRITE) { + phys_perms |= PhysPageMapPermissions::WRITE; + } + phys_perms + } +} +impl From for MemoryRegionPermissions { + fn from(perms: PhysPageMapPermissions) -> Self { + let mut mem_perms = MemoryRegionPermissions::empty(); + if perms.contains(PhysPageMapPermissions::READ) { + mem_perms |= MemoryRegionPermissions::READ; + } + if perms.contains(PhysPageMapPermissions::WRITE) { + mem_perms |= MemoryRegionPermissions::WRITE; + } + mem_perms + } +} + +/// Possible errors for physical pointer access with `VmapProvider` +#[non_exhaustive] +#[derive(Error, Debug)] +pub enum PhysPointerError { + #[error("Physical address {0:#x} is invalid to access")] + InvalidPhysicalAddress(usize), + #[error("Physical address {0:#x} is not aligned to {1} bytes")] + UnalignedPhysicalAddress(usize, usize), + #[error("Offset {0:#x} is not aligned to {1} bytes")] + UnalignedOffset(usize, usize), + #[error("Base offset {0:#x} is greater than or equal to alignment ({1} bytes)")] + InvalidBaseOffset(usize, usize), + #[error( + "The total size of the given pages ({0} bytes) is insufficient for the requested type ({1} bytes)" + )] + InsufficientPhysicalPages(usize, usize), + #[error("Index {0} is out of bounds (count: {1})")] + IndexOutOfBounds(usize, usize), + #[error("Physical address {0:#x} is already mapped")] + AlreadyMapped(usize), + #[error("Physical address {0:#x} is unmapped")] + Unmapped(usize), + #[error("No mapping information available")] + NoMappingInfo, + #[error("Overflow occurred during calculation")] + Overflow, + #[error("Non-contiguous physical pages in the array")] + NonContiguousPages, + #[error("The operation is unsupported on this platform")] + UnsupportedOperation, +} diff --git a/litebox_common_optee/Cargo.toml b/litebox_common_optee/Cargo.toml index 5b88e7c9f..901997b43 100644 --- a/litebox_common_optee/Cargo.toml +++ b/litebox_common_optee/Cargo.toml @@ -9,6 +9,7 @@ litebox = { path = "../litebox/", version = "0.1.0" } litebox_common_linux = { path = "../litebox_common_linux/", version = "0.1.0" } modular-bitfield = { version = "0.12.0", default-features = false } num_enum = { version = "0.7.3", default-features = false } +thiserror = { version = "2.0.6", default-features = false } [lints] workspace = true diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 20ee472de..5b6901a12 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -12,6 +12,9 @@ use std::time::Duration; use litebox::fs::OFlags; use litebox::platform::UnblockedOrTimedOut; use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions}; +use litebox::platform::vmap::{ + PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; use litebox::utils::{ReinterpretSignedExt, ReinterpretUnsignedExt as _, TruncateExt}; @@ -2143,6 +2146,29 @@ impl litebox::platform::CrngProvider for LinuxUserland { } } +/// Dummy `VmapProvider`. +/// +/// In general, userland platforms do not support `vmap` and `vunmap` (which are kernel functions). +/// We might need to emulate these functions' behaviors using virtual addresses for development or +/// testing, or use a kernel module to provide this functionality (if needed). +impl VmapProvider for LinuxUserland { + type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result { + Err(PhysPointerError::UnsupportedOperation) + } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + fn validate(&self, _pa: usize) -> Result { + Err(PhysPointerError::UnsupportedOperation) + } +} + #[cfg(test)] mod tests { use core::sync::atomic::AtomicU32; diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 7498447da..55408f574 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -14,6 +14,9 @@ use core::{ sync::atomic::{AtomicU32, AtomicU64}, }; use litebox::platform::page_mgmt::DeallocationError; +use litebox::platform::vmap::{ + PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; use litebox::platform::{ DebugLogProvider, IPInterfaceProvider, ImmediatelyWokenUp, PageManagementProvider, Punchthrough, RawMutexProvider, StdioProvider, TimeProvider, UnblockedOrTimedOut, @@ -755,6 +758,24 @@ impl StdioProvider for LinuxKernel { } } +impl VmapProvider for LinuxKernel { + type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result { + todo!("use map_vtl0_phys_range()") + } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { + todo!("use unmap_vtl0_pages()") + } + fn validate(&self, _pa: usize) -> Result { + todo!("use vtl1_phys_frame_range to validate") + } +} + // NOTE: The below code is a naive workaround to let LVBS code to access the platform. // Rather than doing this, we should implement LVBS interface/provider for the platform. diff --git a/litebox_shim_optee/src/lib.rs b/litebox_shim_optee/src/lib.rs index 57743a6e0..3a721a1c6 100644 --- a/litebox_shim_optee/src/lib.rs +++ b/litebox_shim_optee/src/lib.rs @@ -844,3 +844,6 @@ pub fn session_id_pool<'a>() -> &'a SessionIdPool { static SESSION_ID_POOL: OnceBox = OnceBox::new(); SESSION_ID_POOL.get_or_init(|| alloc::boxed::Box::new(SessionIdPool::new())) } + +pub type NormalWorldConstPtr = crate::ptr::PhysConstPtr; +pub type NormalWorldMutPtr = crate::ptr::PhysMutPtr; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 89533a1ac..5147c6286 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + //! OP-TEE's message passing is a bit complex because it involves with multiple actors //! (normal world: client app and driver; secure world: OP-TEE OS and TAs), //! consists multiple layers, and relies on shared memory references (i.e., no serialization). @@ -12,7 +15,7 @@ //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register //! shared memory). -use crate::ptr::NormalWorldConstPtr; +use crate::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 701c1f3bb..4a00ee9fd 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -1,7 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + //! Physical Pointer Abstraction with On-demand Mapping //! -//! This module implements types and traits to support accessing physical addresses -//! (e.g., VTL0 or normal-world physical memory) from LiteBox with on-demand mapping. +//! This module adds supports for accessing physical addresses (e.g., VTL0 or +//! normal-world physical memory) from LiteBox with on-demand mapping. //! In the context of LVBS and OP-TEE, accessing physical memory is necessary //! because VTL0 and VTL1 as well as normal world and secure world do not share //! the same virtual address space, but they still have to share data through memory. @@ -10,12 +13,10 @@ //! //! To simplify all these, we could persistently map the entire VTL0/normal-world //! physical memory into VTL1/secure-world address space at once and just access them -//! through corresponding virtual addresses. Also, we could define some APIs to let -//! LiteBox (shim) map/unmap arbitrary physical addresses (i.e., implementing and -//! exposing APIs like Linux kernel's `vmap()` and `vunmap()`). However, this module -//! does not take these approaches due to scalability (e.g., how to deal with a system -//! with terabytes of physical memory?) and security concerns (e.g., data corruption or -//! information leakage due to concurrent or persistent access). +//! through corresponding virtual addresses. However, this module does not take these +//! approaches due to scalability (e.g., how to deal with a system with terabytes of +//! physical memory?) and security concerns (e.g., data corruption or information +//! leakage due to concurrent or persistent access). //! //! Instead, the approach this module takes is to map the required physical memory //! region on-demand when accessing them while using a LiteBox-managed buffer to copy @@ -57,9 +58,14 @@ //! if this module always requires a list of physical addresses, the caller might //! provide a wrong list by mistake or intentionally). +// TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, +// we can move them to a different crate (e.g., `litebox`) if needed. + use core::ops::Deref; -use litebox::platform::page_mgmt::MemoryRegionPermissions; -use thiserror::Error; +use litebox::platform::vmap::{ + PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, +}; +use litebox_platform_multiplex::{Platform, platform}; #[inline] fn align_down(address: usize, align: usize) -> usize { @@ -71,204 +77,25 @@ fn align_up(len: usize, align: usize) -> usize { len.next_multiple_of(align) } -/// Trait to validate that a physical pointer does not belong to LiteBox-managed memory -/// (including both kernel and userspace memory). -/// -/// This validation is mainly to deal with the Boomerang attack where a normal-world client -/// tricks the secure-world kernel (i.e., LiteBox) to access the secure-world memory. -/// However, even if there is no such threat (e.g., no normal/secure world separation), -/// this validation is still beneficial to ensure the memory safety by doing not access -/// LiteBox-managed memory without going through its memory allocator. -/// -/// Succeeding these operations does not guarantee that the physical pointer is valid to -/// access, just that it is outside of LiteBox-managed memory and won't be used to access -/// it as an unmanaged channel. -pub trait ValidateAccess { - /// Validate that the given physical pointer does not belong to LiteBox-managed memory. - /// - /// Here, we do not use `*const T` or `*mut T` because this is a physical pointer which - /// must not be dereferenced directly. - /// - /// Returns `Ok(pa)` if valid. If the pointer is not valid, returns `Err(PhysPointerError)`. - fn validate(pa: usize) -> Result; -} - -/// Data structure for an array of physical pages. These physical pages should be -/// virtually contiguous in the source address space. -#[derive(Clone)] -pub struct PhysPageArray { - inner: alloc::boxed::Box<[usize]>, -} -impl PhysPageArray { - /// Create a new `PhysPageArray` from the given slice of physical addresses. - /// - /// All page addresses must be aligned to `ALIGN`. - pub fn try_from_slice(addrs: &[usize]) -> Result { - for addr in addrs { - if !addr.is_multiple_of(ALIGN) { - return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); - } - } - // TODO: Remove this check once our platform implementations support virtually - // contiguous non-contiguous physical page mapping. - Self::check_contiguity(addrs)?; - Ok(Self { - inner: alloc::boxed::Box::from(addrs), - }) - } - /// Check if the array is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - /// Return the number of physical pages in the array. - pub fn len(&self) -> usize { - self.inner.len() - } - /// Return the first physical address in the array if exists. - pub fn first(&self) -> Option { - self.inner.first().copied() - } - /// Checks whether the given physical addresses are contiguous with respect to ALIGN. - /// - /// Note: This is a temporary check to let this module work with our platform implementations - /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that - /// do not support non-contiguous physical page mapping with contiguous virtual addresses. - fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { - for window in addrs.windows(2) { - let first = window[0]; - let second = window[1]; - if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { - return Err(PhysPointerError::NonContiguousPages); - } - } - Ok(()) - } -} -impl core::iter::Iterator for PhysPageArray { - type Item = usize; - fn next(&mut self) -> Option { - if self.inner.is_empty() { - None - } else { - Some(self.inner[0]) - } - } -} -impl core::ops::Deref for PhysPageArray { - type Target = [usize]; - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -/// Data structure to maintain the mapping information returned by `vmap()`. -/// `base` is the virtual address of the mapped region which is page aligned. -/// `size` is the size of the mapped region in bytes. -#[derive(Clone)] -pub struct PhysPageMapInfo { - pub base: *mut u8, - pub size: usize, -} - -bitflags::bitflags! { - /// Physical page map permissions which is a restricted version of - /// [`litebox::platform::page_mgmt::MemoryRegionPermissions`]. - /// - /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED - /// permissions are explicitly prohibited. - #[derive(Clone, Copy, Debug, PartialEq, Eq)] - pub struct PhysPageMapPermissions: u8 { - /// Readable - const READ = 1 << 0; - /// Writable - const WRITE = 1 << 1; - } -} -impl From for PhysPageMapPermissions { - fn from(perms: MemoryRegionPermissions) -> Self { - let mut phys_perms = PhysPageMapPermissions::empty(); - if perms.contains(MemoryRegionPermissions::READ) { - phys_perms |= PhysPageMapPermissions::READ; - } - if perms.contains(MemoryRegionPermissions::WRITE) { - phys_perms |= PhysPageMapPermissions::WRITE; - } - phys_perms - } -} -impl From for MemoryRegionPermissions { - fn from(perms: PhysPageMapPermissions) -> Self { - let mut mem_perms = MemoryRegionPermissions::empty(); - if perms.contains(PhysPageMapPermissions::READ) { - mem_perms |= MemoryRegionPermissions::READ; - } - if perms.contains(PhysPageMapPermissions::WRITE) { - mem_perms |= MemoryRegionPermissions::WRITE; - } - mem_perms - } -} - -/// Trait to map and unmap physical pages into virtually contiguous address space. -/// -/// The implementation of this trait is platform-specific because it depends on how -/// the underlying platform manages page tables and memory regions. -pub trait PhysPageMapper { - /// Map the given [`PhysPageArray`] into virtually contiguous address space with the given - /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. - /// - /// This function is analogous to Linux kernel's `vmap()`. - /// - /// # Safety - /// - /// The caller should ensure that `pages` are not in active use by other entities. LiteBox - /// itself cannot fully guarantee this and it needs some helps from the caller, hypervisor, - /// or hardware. - /// Multiple LiteBox threads might concurrently call this function with overlapping physical - /// pages, so the implementation should safely handle such cases. - unsafe fn vmap( - pages: PhysPageArray, - perms: PhysPageMapPermissions, - ) -> Result, PhysPointerError>; - /// Unmap the previously mapped virtually contiguous address space ([`PhysPageMapInfo`]). - /// - /// This function is analogous to Linux kernel's `vunmap()`. - /// - /// # Safety - /// - /// The caller should ensure that the virtual addresses belonging to `vmap_info` are not in - /// active use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this - /// and it needs some helps from other parties. - /// Multiple LiteBox threads might concurrently call this function with overlapping physical - /// pages, so the implementation should safely handle such cases. - unsafe fn vunmap( - vmap_info: PhysPageMapInfo, - ) -> Result<(), PhysPointerError>; -} - /// Represent a physical pointer to an object with on-demand mapping. /// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in /// this array should be virtually contiguous. /// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. /// - `count`: The number of objects of type `T` that can be accessed from this pointer. +/// - `map_info`: The mapping information of the currently mapped physical pages, if any. /// - `T`: The type of the object being pointed to. `pages` with respect to `offset` should cover enough /// memory for an object of type `T`. -/// - `V`: The validator type implementing [`ValidateAccess`] trait to validate the physical addresses #[derive(Clone)] #[repr(C)] -pub struct PhysMutPtr { +pub struct PhysMutPtr { pages: PhysPageArray, offset: usize, count: usize, map_info: Option>, _type: core::marker::PhantomData, - _mapper: core::marker::PhantomData, - _validator: core::marker::PhantomData, } -impl - PhysMutPtr -{ +impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. @@ -296,7 +123,7 @@ impl )); } for pa in pages.iter() { - V::validate::(*pa)?; + >::validate::(platform(), *pa)?; } Ok(Self { pages, @@ -304,8 +131,6 @@ impl count: size / core::mem::size_of::(), map_info: None, _type: core::marker::PhantomData, - _mapper: core::marker::PhantomData, - _validator: core::marker::PhantomData, }) } /// Create a new `PhysMutPtr` from the given contiguous physical address and length. @@ -327,7 +152,7 @@ impl let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; while current_page < end_page { - V::validate::(current_page)?; + >::validate::(platform(), current_page)?; pages.push(current_page); current_page += ALIGN; } @@ -592,7 +417,7 @@ impl if self.map_info.is_none() { let sub_pages = PhysPageArray::try_from_slice(&self.pages.deref()[start..end])?; unsafe { - self.map_info = Some(M::vmap(sub_pages, perms)?); + self.map_info = Some(platform().vmap(sub_pages, perms)?); } Ok(()) } else { @@ -610,7 +435,7 @@ impl unsafe fn unmap(&mut self) -> Result<(), PhysPointerError> { if let Some(map_info) = self.map_info.take() { unsafe { - M::vunmap(map_info)?; + platform().vunmap(map_info)?; } self.map_info = None; Ok(()) @@ -620,7 +445,7 @@ impl } } -impl core::fmt::Debug for PhysMutPtr { +impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") .field("pages[0]", &self.pages.first().unwrap_or(0)) @@ -633,12 +458,10 @@ impl core::fmt::Debug for PhysMutPtr { - inner: PhysMutPtr, +pub struct PhysConstPtr { + inner: PhysMutPtr, } -impl - PhysConstPtr -{ +impl PhysConstPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. @@ -699,7 +522,7 @@ impl } } -impl core::fmt::Debug for PhysConstPtr { +impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") .field("pages[0]", &self.inner.pages.first().unwrap_or(0)) @@ -707,70 +530,3 @@ impl core::fmt::Debug for PhysConstPtr(pa: usize) -> Result { - Ok(pa) - } -} - -/// This is a mock implementation that does no actual mapping. Each platform which supports -/// `PhysMutPtr` and `PhysConstPtr` should provide its `PhysPageMapper` implementation. -pub struct MockPhysMemoryMapper; -impl PhysPageMapper for MockPhysMemoryMapper { - unsafe fn vmap( - _pages: PhysPageArray, - _perms: PhysPageMapPermissions, - ) -> Result, PhysPointerError> { - Ok(PhysPageMapInfo { - base: core::ptr::null_mut(), - size: 0, - }) - } - unsafe fn vunmap( - _vmap_info: PhysPageMapInfo, - ) -> Result<(), PhysPointerError> { - Ok(()) - } -} - -/// Normal world constant pointer type using MockPhysMemoryMapper for testing purposes. -pub type NormalWorldConstPtr = - PhysConstPtr; - -/// Normal world mutable pointer type using MockPhysMemoryMapper for testing purposes. -pub type NormalWorldMutPtr = - PhysMutPtr; From 0c32c92aa012b7976705237eae4796f91e2645fc Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 24 Dec 2025 03:59:35 +0000 Subject: [PATCH 28/52] addressed comments --- litebox/src/platform/vmap.rs | 20 +++++++-- litebox_platform_linux_userland/src/lib.rs | 4 ++ litebox_platform_lvbs/src/lib.rs | 4 ++ litebox_shim_optee/src/ptr.rs | 50 +++++++++++++++------- 4 files changed, 59 insertions(+), 19 deletions(-) diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index 029a4292f..adc55ba23 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -14,8 +14,10 @@ use thiserror::Error; pub trait VmapProvider { /// Data structure for an array of physical pages which are virtually contiguous. type PhysPageArray; + /// Data structure to maintain the mapping information returned by `vmap()`. type PhysPageMapInfo; + /// Map the given [`PhysPageArray`] into virtually contiguous addresses with the given /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function /// expects that it can access and update the page table using `&self`. @@ -24,7 +26,8 @@ pub trait VmapProvider { /// /// # Safety /// - /// The caller should ensure that `pages` are not in active use by other entities. + /// The caller should ensure that `pages` are not in active use by other entities + /// (especially, there should be no read/write or write/write conflicts). /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps /// from the caller, hypervisor, or hardware. /// Multiple LiteBox threads might concurrently call this function (and `vunmap()`) with @@ -34,6 +37,7 @@ pub trait VmapProvider { pages: Self::PhysPageArray, perms: PhysPageMapPermissions, ) -> Result; + /// Unmap the previously mapped virtually contiguous addresses ([`PhysPageMapInfo`]). /// Use `&self` to access and update the page table. /// @@ -47,12 +51,13 @@ pub trait VmapProvider { /// Multiple LiteBox threads might concurrently call this function (and `vmap()`) with /// overlapping physical pages, so the implementation should safely handle such cases. unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; + /// Validate that the given physical address (with type) does not belong to LiteBox-managed /// memory. Use `&self` to get the memory layout of the platform (i.e., the physical memory /// range assigned to LiteBox). /// /// This function does not use `*const T` or `*mut T` because it deals with a physical address - /// which must not be dereferenced directly. + /// which should not be dereferenced directly. /// /// Returns `Ok(pa)` if valid. If the address is not valid, returns `Err(PhysPointerError)`. fn validate(&self, pa: usize) -> Result; @@ -63,10 +68,11 @@ pub trait VmapProvider { pub struct PhysPageArray { inner: alloc::boxed::Box<[usize]>, } + impl PhysPageArray { /// Create a new `PhysPageArray` from the given slice of physical addresses. /// - /// All page addresses must be aligned to `ALIGN`. + /// All page addresses should be aligned to `ALIGN`. pub fn try_from_slice(addrs: &[usize]) -> Result { for addr in addrs { if !addr.is_multiple_of(ALIGN) { @@ -80,18 +86,22 @@ impl PhysPageArray { inner: alloc::boxed::Box::from(addrs), }) } + /// Check if the array is empty. pub fn is_empty(&self) -> bool { self.inner.is_empty() } + /// Return the number of physical pages in the array. pub fn len(&self) -> usize { self.inner.len() } + /// Return the first physical address in the array if exists. pub fn first(&self) -> Option { self.inner.first().copied() } + /// Checks whether the given physical addresses are contiguous with respect to ALIGN. /// /// Note: This is a temporary check to let this module work with our platform implementations @@ -108,6 +118,7 @@ impl PhysPageArray { Ok(()) } } + impl core::iter::Iterator for PhysPageArray { type Item = usize; fn next(&mut self) -> Option { @@ -118,6 +129,7 @@ impl core::iter::Iterator for PhysPageArray { } } } + impl core::ops::Deref for PhysPageArray { type Target = [usize]; fn deref(&self) -> &Self::Target { @@ -149,6 +161,7 @@ bitflags::bitflags! { const WRITE = 1 << 1; } } + impl From for PhysPageMapPermissions { fn from(perms: MemoryRegionPermissions) -> Self { let mut phys_perms = PhysPageMapPermissions::empty(); @@ -161,6 +174,7 @@ impl From for PhysPageMapPermissions { phys_perms } } + impl From for MemoryRegionPermissions { fn from(perms: PhysPageMapPermissions) -> Self { let mut mem_perms = MemoryRegionPermissions::empty(); diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 5b6901a12..4f403a74a 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -2153,7 +2153,9 @@ impl litebox::platform::CrngProvider for LinuxUserland { /// testing, or use a kernel module to provide this functionality (if needed). impl VmapProvider for LinuxUserland { type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( &self, _pages: Self::PhysPageArray, @@ -2161,9 +2163,11 @@ impl VmapProvider for LinuxUserland { ) -> Result { Err(PhysPointerError::UnsupportedOperation) } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } + fn validate(&self, _pa: usize) -> Result { Err(PhysPointerError::UnsupportedOperation) } diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 55408f574..721ea39d5 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -760,7 +760,9 @@ impl StdioProvider for LinuxKernel { impl VmapProvider for LinuxKernel { type PhysPageArray = PhysPageArray; + type PhysPageMapInfo = PhysPageMapInfo; + unsafe fn vmap( &self, _pages: Self::PhysPageArray, @@ -768,9 +770,11 @@ impl VmapProvider for LinuxKerne ) -> Result { todo!("use map_vtl0_phys_range()") } + unsafe fn vunmap(&self, _vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError> { todo!("use unmap_vtl0_pages()") } + fn validate(&self, _pa: usize) -> Result { todo!("use vtl1_phys_frame_range to validate") } diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 4a00ee9fd..0f97e1c15 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -98,8 +98,10 @@ pub struct PhysMutPtr { impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// - /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. - /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller + /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of + /// type `T` starting from `offset`. If these conditions are not met, this function returns + /// `Err(PhysPointerError)`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -133,10 +135,12 @@ impl PhysMutPtr { _type: core::marker::PhantomData, }) } + /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. - /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysMutPtr` may read/write incorrect data. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( @@ -158,6 +162,7 @@ impl PhysMutPtr { } Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) } + /// Create a new `PhysMutPtr` from the given physical address for a single object. /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. @@ -166,13 +171,14 @@ impl PhysMutPtr { pub fn try_from_usize(pa: usize) -> Result { Self::try_from_contiguous_pages(pa, core::mem::size_of::()) } + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism - /// in place (e.g., by the hypervisor or hardware). That it, it might read corrupt data. + /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_at_offset( &mut self, count: usize, @@ -225,11 +231,12 @@ impl PhysMutPtr { } Ok(alloc::boxed::Box::new(val)) } + /// Read a slice of values at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( @@ -284,11 +291,12 @@ impl PhysMutPtr { } Ok(()) } + /// Write the value at the given offset to the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently writtenby /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_at_offset( @@ -336,11 +344,12 @@ impl PhysMutPtr { } Ok(()) } + /// Write a slice of values at the given offset to the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_slice_at_offset( @@ -399,6 +408,7 @@ impl PhysMutPtr { } Ok(()) } + /// Map the physical pages from `start` to `end` indexes. /// /// # Safety @@ -426,6 +436,7 @@ impl PhysMutPtr { )) } } + /// Unmap the physical pages if mapped. /// /// # Safety @@ -462,10 +473,12 @@ pub struct PhysConstPtr { inner: PhysMutPtr, } impl PhysConstPtr { - /// Create a new `PhysMutPtr` from the given physical page array and offset. + /// Create a new `PhysConstPtr` from the given physical page array and offset. /// - /// All addresses in `pages` must be valid and aligned to `ALIGN`, and `offset` must be smaller than `ALIGN`. - /// Also, `pages` must contain enough pages to cover at least one object of type `T` starting from `offset`. + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller + /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of + /// type `T` starting from `offset`. If these conditions are not met, this function returns + /// `Err(PhysPointerError)`. pub fn try_from_page_array( pages: PhysPageArray, offset: usize, @@ -474,16 +487,19 @@ impl PhysConstPtr { inner: PhysMutPtr::try_from_page_array(pages, offset)?, }) } - /// Create a new `PhysMutPtr` from the given contiguous physical address and length. + + /// Create a new `PhysConstPtr` from the given contiguous physical address and length. /// /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. - /// The caller must ensure that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. + /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, + /// later accesses through `PhysConstPtr` may read incorrect data. pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, }) } - /// Create a new `PhysMutPtr` from the given physical address for a single object. + + /// Create a new `PhysConstPtr` from the given physical address for a single object. /// /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. /// @@ -493,11 +509,12 @@ impl PhysConstPtr { inner: PhysMutPtr::try_from_usize(pa)?, }) } + /// Read the value at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_at_offset( @@ -506,11 +523,12 @@ impl PhysConstPtr { ) -> Result, PhysPointerError> { unsafe { self.inner.read_at_offset(count) } } + /// Read a slice of values at the given offset from the physical pointer. /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently accessed by + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, it might read corrupt data. pub unsafe fn read_slice_at_offset( From b40a151ed36f629282717d911d719cd447bce329 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 24 Dec 2025 05:29:11 +0000 Subject: [PATCH 29/52] impl Drop for PhysPtrs --- litebox_shim_optee/src/ptr.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 0f97e1c15..2ad25c694 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -87,7 +87,7 @@ fn align_up(len: usize, align: usize) -> usize { /// memory for an object of type `T`. #[derive(Clone)] #[repr(C)] -pub struct PhysMutPtr { +pub struct PhysMutPtr { pages: PhysPageArray, offset: usize, count: usize, @@ -456,6 +456,12 @@ impl PhysMutPtr { } } +impl Drop for PhysMutPtr { + fn drop(&mut self) { + let _ = unsafe { self.unmap() }; + } +} + impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") @@ -469,9 +475,10 @@ impl core::fmt::Debug for PhysMutPtr { /// exposes only read access. #[derive(Clone)] #[repr(C)] -pub struct PhysConstPtr { +pub struct PhysConstPtr { inner: PhysMutPtr, } + impl PhysConstPtr { /// Create a new `PhysConstPtr` from the given physical page array and offset. /// @@ -540,6 +547,12 @@ impl PhysConstPtr { } } +impl Drop for PhysConstPtr { + fn drop(&mut self) { + let _ = unsafe { self.inner.unmap() }; + } +} + impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") From 5063f728c117ba688324bedb9f5f3d4835eb2d64 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 24 Dec 2025 15:44:04 +0000 Subject: [PATCH 30/52] VmapProvider validate and protect --- litebox/src/platform/vmap.rs | 31 +++++++++++++++++----- litebox_platform_linux_userland/src/lib.rs | 10 ++++++- litebox_platform_lvbs/src/lib.rs | 10 ++++++- litebox_shim_optee/src/ptr.rs | 5 +--- 4 files changed, 44 insertions(+), 12 deletions(-) diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index adc55ba23..fe14496cb 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -52,15 +52,34 @@ pub trait VmapProvider { /// overlapping physical pages, so the implementation should safely handle such cases. unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; - /// Validate that the given physical address (with type) does not belong to LiteBox-managed - /// memory. Use `&self` to get the memory layout of the platform (i.e., the physical memory + /// Validate that the given physical pages do not belong to LiteBox-managed memory. + /// Use `&self` to get the memory layout of the platform (i.e., the physical memory /// range assigned to LiteBox). /// - /// This function does not use `*const T` or `*mut T` because it deals with a physical address - /// which should not be dereferenced directly. + /// This function is a no-op if there is no other world or VM sharing the physical memory. /// - /// Returns `Ok(pa)` if valid. If the address is not valid, returns `Err(PhysPointerError)`. - fn validate(&self, pa: usize) -> Result; + /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. + fn validate(&self, pages: Self::PhysPageArray) -> Result<(), PhysPointerError>; + + /// Protect the given physical pages to ensure concurrent read or exclusive write access. + /// Read protection prevents others from modifying the pages. Read/write protection prevents + /// others from accessing the pages. + /// This can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// + /// This function is a no-op if there is no other world or VM sharing the physical memory. + /// + /// Returns `Ok(())` if it successfully protects the pages. If it fails, returns + /// `Err(PhysPointerError)`. + /// + /// # Safety + /// + /// Since this function is expected to use hypercalls or other privileged hardware features, + /// the caller must ensure that it is safe to perform such operations at the time of the call. + unsafe fn protect( + &self, + pages: Self::PhysPageArray, + perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError>; } /// Data structure for an array of physical pages. These physical pages should be virtually contiguous. diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 4f403a74a..02967afba 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -2168,7 +2168,15 @@ impl VmapProvider for LinuxUserland { Err(PhysPointerError::UnsupportedOperation) } - fn validate(&self, _pa: usize) -> Result { + fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { + Err(PhysPointerError::UnsupportedOperation) + } + + unsafe fn protect( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } } diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 721ea39d5..7b3e49366 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -775,9 +775,17 @@ impl VmapProvider for LinuxKerne todo!("use unmap_vtl0_pages()") } - fn validate(&self, _pa: usize) -> Result { + fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { todo!("use vtl1_phys_frame_range to validate") } + + unsafe fn protect( + &self, + _pages: Self::PhysPageArray, + _perms: PhysPageMapPermissions, + ) -> Result<(), PhysPointerError> { + todo!("use hypercall to protect/unprotect physical pages") + } } // NOTE: The below code is a naive workaround to let LVBS code to access the platform. diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 2ad25c694..32ea9f8de 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -124,9 +124,7 @@ impl PhysMutPtr { core::mem::size_of::(), )); } - for pa in pages.iter() { - >::validate::(platform(), *pa)?; - } + >::validate(platform(), pages.clone())?; Ok(Self { pages, offset, @@ -156,7 +154,6 @@ impl PhysMutPtr { let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; while current_page < end_page { - >::validate::(platform(), current_page)?; pages.push(current_page); current_page += ALIGN; } From b4ab389697cbb6b18a1695da6c7eaf6aa641d9d0 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 26 Dec 2025 18:39:09 +0000 Subject: [PATCH 31/52] use existing NonZeroAddress --- litebox/src/mm/linux.rs | 1 + litebox/src/platform/vmap.rs | 96 ++++------------------ litebox_platform_linux_userland/src/lib.rs | 10 +-- litebox_platform_lvbs/src/lib.rs | 31 +++++-- litebox_shim_optee/src/ptr.rs | 49 +++++++---- 5 files changed, 81 insertions(+), 106 deletions(-) diff --git a/litebox/src/mm/linux.rs b/litebox/src/mm/linux.rs index fa56b151e..c19408d0e 100644 --- a/litebox/src/mm/linux.rs +++ b/litebox/src/mm/linux.rs @@ -213,6 +213,7 @@ impl core::ops::Add for NonZeroPageSize { } /// A non-zero address that is `ALIGN`-aligned. +#[derive(Clone, Copy)] pub struct NonZeroAddress(usize); impl NonZeroAddress { diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index fe14496cb..b152508fb 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -12,13 +12,13 @@ use thiserror::Error; /// `litebox_shim_optee::ptr::PhysConstPtr`. It can benefit other modules which need /// Linux kernel's `vmap()` and `vunmap()` functionalities (e.g., HVCI/HEKI, drivers). pub trait VmapProvider { - /// Data structure for an array of physical pages which are virtually contiguous. - type PhysPageArray; + /// Data structure for an array of physical page addresses which are virtually contiguous. + type PhysPageAddrArray; /// Data structure to maintain the mapping information returned by `vmap()`. type PhysPageMapInfo; - /// Map the given [`PhysPageArray`] into virtually contiguous addresses with the given + /// Map the given `PhysPageAddrArray` into virtually contiguous addresses with the given /// [`PhysPageMapPermissions`] while returning [`PhysPageMapInfo`]. This function /// expects that it can access and update the page table using `&self`. /// @@ -34,7 +34,7 @@ pub trait VmapProvider { /// overlapping physical pages, so the implementation should safely handle such cases. unsafe fn vmap( &self, - pages: Self::PhysPageArray, + pages: Self::PhysPageAddrArray, perms: PhysPageMapPermissions, ) -> Result; @@ -59,7 +59,7 @@ pub trait VmapProvider { /// This function is a no-op if there is no other world or VM sharing the physical memory. /// /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. - fn validate(&self, pages: Self::PhysPageArray) -> Result<(), PhysPointerError>; + fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError>; /// Protect the given physical pages to ensure concurrent read or exclusive write access. /// Read protection prevents others from modifying the pages. Read/write protection prevents @@ -77,84 +77,18 @@ pub trait VmapProvider { /// the caller must ensure that it is safe to perform such operations at the time of the call. unsafe fn protect( &self, - pages: Self::PhysPageArray, + pages: Self::PhysPageAddrArray, perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError>; } -/// Data structure for an array of physical pages. These physical pages should be virtually contiguous. -#[derive(Clone)] -pub struct PhysPageArray { - inner: alloc::boxed::Box<[usize]>, -} - -impl PhysPageArray { - /// Create a new `PhysPageArray` from the given slice of physical addresses. - /// - /// All page addresses should be aligned to `ALIGN`. - pub fn try_from_slice(addrs: &[usize]) -> Result { - for addr in addrs { - if !addr.is_multiple_of(ALIGN) { - return Err(PhysPointerError::UnalignedPhysicalAddress(*addr, ALIGN)); - } - } - // TODO: Remove this check once our platform implementations support virtually - // contiguous non-contiguous physical page mapping. - Self::check_contiguity(addrs)?; - Ok(Self { - inner: alloc::boxed::Box::from(addrs), - }) - } - - /// Check if the array is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Return the number of physical pages in the array. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Return the first physical address in the array if exists. - pub fn first(&self) -> Option { - self.inner.first().copied() - } - - /// Checks whether the given physical addresses are contiguous with respect to ALIGN. - /// - /// Note: This is a temporary check to let this module work with our platform implementations - /// which map physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that - /// do not support non-contiguous physical page mapping with contiguous virtual addresses. - fn check_contiguity(addrs: &[usize]) -> Result<(), PhysPointerError> { - for window in addrs.windows(2) { - let first = window[0]; - let second = window[1]; - if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { - return Err(PhysPointerError::NonContiguousPages); - } - } - Ok(()) - } -} - -impl core::iter::Iterator for PhysPageArray { - type Item = usize; - fn next(&mut self) -> Option { - if self.inner.is_empty() { - None - } else { - Some(self.inner[0]) - } - } -} - -impl core::ops::Deref for PhysPageArray { - type Target = [usize]; - fn deref(&self) -> &Self::Target { - &self.inner - } -} +/// Data structure representing a physical address with page alignment. +/// +/// Currently, this is an alias to `crate::mm::linux::NonZeroAddress`. This might change if +/// we selectively conduct sanity checks based on whether an address is virtual or physical +/// (e.g., whether a virtual address is canonical, whether a physical address is tagged with +/// a valid key ID, etc.). +pub type PhysPageAddr = crate::mm::linux::NonZeroAddress; /// Data structure to maintain the mapping information returned by `vmap()`. /// @@ -172,12 +106,14 @@ bitflags::bitflags! { /// /// This module only supports READ and WRITE permissions. Both EXECUTE and SHARED /// permissions are explicitly prohibited. + #[non_exhaustive] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct PhysPageMapPermissions: u8 { /// Readable const READ = 1 << 0; /// Writable const WRITE = 1 << 1; + const _ = !0; } } @@ -237,4 +173,6 @@ pub enum PhysPointerError { NonContiguousPages, #[error("The operation is unsupported on this platform")] UnsupportedOperation, + #[error("Unsupported permissions: {0:#x}")] + UnsupportedPermissions(u8), } diff --git a/litebox_platform_linux_userland/src/lib.rs b/litebox_platform_linux_userland/src/lib.rs index 02967afba..0abb52655 100644 --- a/litebox_platform_linux_userland/src/lib.rs +++ b/litebox_platform_linux_userland/src/lib.rs @@ -13,7 +13,7 @@ use litebox::fs::OFlags; use litebox::platform::UnblockedOrTimedOut; use litebox::platform::page_mgmt::{FixedAddressBehavior, MemoryRegionPermissions}; use litebox::platform::vmap::{ - PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox::platform::{ImmediatelyWokenUp, RawConstPointer as _}; use litebox::shim::ContinueOperation; @@ -2152,13 +2152,13 @@ impl litebox::platform::CrngProvider for LinuxUserland { /// We might need to emulate these functions' behaviors using virtual addresses for development or /// testing, or use a kernel module to provide this functionality (if needed). impl VmapProvider for LinuxUserland { - type PhysPageArray = PhysPageArray; + type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; type PhysPageMapInfo = PhysPageMapInfo; unsafe fn vmap( &self, - _pages: Self::PhysPageArray, + _pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result { Err(PhysPointerError::UnsupportedOperation) @@ -2168,13 +2168,13 @@ impl VmapProvider for LinuxUserland { Err(PhysPointerError::UnsupportedOperation) } - fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { + fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) } unsafe fn protect( &self, - _pages: Self::PhysPageArray, + _pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError> { Err(PhysPointerError::UnsupportedOperation) diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 7b3e49366..068c93889 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -15,7 +15,7 @@ use core::{ }; use litebox::platform::page_mgmt::DeallocationError; use litebox::platform::vmap::{ - PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox::platform::{ DebugLogProvider, IPInterfaceProvider, ImmediatelyWokenUp, PageManagementProvider, @@ -758,16 +758,37 @@ impl StdioProvider for LinuxKernel { } } +/// Checks whether the given physical addresses are contiguous with respect to ALIGN. +/// +/// Note: This is a temporary check to let `VmapProvider` work with this platform +/// which maps physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that +/// does not support non-contiguous physical page mapping with contiguous virtual addresses. +fn check_contiguity( + addrs: &[PhysPageAddr], +) -> Result<(), PhysPointerError> { + for window in addrs.windows(2) { + let first = window[0].as_usize(); + let second = window[1].as_usize(); + if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { + return Err(PhysPointerError::NonContiguousPages); + } + } + Ok(()) +} + impl VmapProvider for LinuxKernel { - type PhysPageArray = PhysPageArray; + type PhysPageAddrArray = alloc::boxed::Box<[PhysPageAddr]>; type PhysPageMapInfo = PhysPageMapInfo; unsafe fn vmap( &self, - _pages: Self::PhysPageArray, + pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result { + // TODO: Remove this check once this platform supports virtually contiguous + // non-contiguous physical page mapping. + check_contiguity(&pages)?; todo!("use map_vtl0_phys_range()") } @@ -775,13 +796,13 @@ impl VmapProvider for LinuxKerne todo!("use unmap_vtl0_pages()") } - fn validate(&self, _pages: Self::PhysPageArray) -> Result<(), PhysPointerError> { + fn validate(&self, _pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError> { todo!("use vtl1_phys_frame_range to validate") } unsafe fn protect( &self, - _pages: Self::PhysPageArray, + _pages: Self::PhysPageAddrArray, _perms: PhysPageMapPermissions, ) -> Result<(), PhysPointerError> { todo!("use hypercall to protect/unprotect physical pages") diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 32ea9f8de..3f3007f69 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -61,9 +61,8 @@ // TODO: Since the below `PhysMutPtr` and `PhysConstPtr` are not OP-TEE specific, // we can move them to a different crate (e.g., `litebox`) if needed. -use core::ops::Deref; use litebox::platform::vmap::{ - PhysPageArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, + PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; use litebox_platform_multiplex::{Platform, platform}; @@ -78,8 +77,8 @@ fn align_up(len: usize, align: usize) -> usize { } /// Represent a physical pointer to an object with on-demand mapping. -/// - `pages`: An array of page-aligned physical addresses ([`PhysPageArray`]). Physical addresses in -/// this array should be virtually contiguous. +/// - `pages`: An array of page-aligned physical addresses. Physical addresses in this array should be +/// virtually contiguous. /// - `offset`: The offset within `pages[0]` where the object starts. It should be smaller than `ALIGN`. /// - `count`: The number of objects of type `T` that can be accessed from this pointer. /// - `map_info`: The mapping information of the currently mapped physical pages, if any. @@ -88,7 +87,7 @@ fn align_up(len: usize, align: usize) -> usize { #[derive(Clone)] #[repr(C)] pub struct PhysMutPtr { - pages: PhysPageArray, + pages: alloc::boxed::Box<[PhysPageAddr]>, offset: usize, count: usize, map_info: Option>, @@ -103,7 +102,7 @@ impl PhysMutPtr { /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. pub fn try_from_page_array( - pages: PhysPageArray, + pages: &[PhysPageAddr], offset: usize, ) -> Result { if offset >= ALIGN { @@ -124,9 +123,9 @@ impl PhysMutPtr { core::mem::size_of::(), )); } - >::validate(platform(), pages.clone())?; + >::validate(platform(), pages.into())?; Ok(Self { - pages, + pages: pages.into(), offset, count: size / core::mem::size_of::(), map_info: None, @@ -154,10 +153,13 @@ impl PhysMutPtr { let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); let mut current_page = start_page; while current_page < end_page { - pages.push(current_page); + pages.push( + PhysPageAddr::::new(current_page) + .ok_or(PhysPointerError::InvalidPhysicalAddress(current_page))?, + ); current_page += ALIGN; } - Self::try_from_page_array(PhysPageArray::try_from_slice(&pages)?, pa - start_page) + Self::try_from_page_array(&pages, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. @@ -421,15 +423,23 @@ impl PhysMutPtr { if start >= end || end > self.pages.len() { return Err(PhysPointerError::IndexOutOfBounds(end, self.pages.len())); } + let accept_perms = PhysPageMapPermissions::READ | PhysPageMapPermissions::WRITE; + if perms.bits() & !accept_perms.bits() != 0 { + return Err(PhysPointerError::UnsupportedPermissions(perms.bits())); + } if self.map_info.is_none() { - let sub_pages = PhysPageArray::try_from_slice(&self.pages.deref()[start..end])?; + let sub_pages = &self.pages[start..end]; unsafe { - self.map_info = Some(platform().vmap(sub_pages, perms)?); + self.map_info = Some(>::vmap( + platform(), + sub_pages.into(), + perms, + )?); } Ok(()) } else { Err(PhysPointerError::AlreadyMapped( - self.pages.first().unwrap_or(0), + self.pages.first().map_or(0, |p| p.as_usize()), )) } } @@ -448,7 +458,9 @@ impl PhysMutPtr { self.map_info = None; Ok(()) } else { - Err(PhysPointerError::Unmapped(self.pages.first().unwrap_or(0))) + Err(PhysPointerError::Unmapped( + self.pages.first().map_or(0, |p| p.as_usize()), + )) } } } @@ -462,7 +474,7 @@ impl Drop for PhysMutPtr { impl core::fmt::Debug for PhysMutPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysMutPtr") - .field("pages[0]", &self.pages.first().unwrap_or(0)) + .field("pages[0]", &self.pages.first().map_or(0, |p| p.as_usize())) .field("offset", &self.offset) .finish_non_exhaustive() } @@ -484,7 +496,7 @@ impl PhysConstPtr { /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. pub fn try_from_page_array( - pages: PhysPageArray, + pages: &[PhysPageAddr], offset: usize, ) -> Result { Ok(Self { @@ -553,7 +565,10 @@ impl Drop for PhysConstPtr { impl core::fmt::Debug for PhysConstPtr { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PhysConstPtr") - .field("pages[0]", &self.inner.pages.first().unwrap_or(0)) + .field( + "pages[0]", + &self.inner.pages.first().map_or(0, |p| p.as_usize()), + ) .field("offset", &self.inner.offset) .finish_non_exhaustive() } From 3bb2fc2e3ef82dcf9df3aa54e29839ecc892f67b Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 29 Dec 2025 17:54:12 +0000 Subject: [PATCH 32/52] rename --- litebox_shim_optee/src/msg_handler.rs | 7 +- litebox_shim_optee/src/ptr.rs | 111 +++++++++++++------------- 2 files changed, 58 insertions(+), 60 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 5147c6286..27027113f 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -75,9 +75,8 @@ pub fn handle_optee_smc_args( | OpteeSmcFunction::CallWithRegdArg => { let msg_arg_addr = smc.optee_msg_arg_phys_addr()?; let msg_arg_addr = usize::try_from(msg_arg_addr).unwrap(); - let mut ptr = - NormalWorldConstPtr::::try_from_usize(msg_arg_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; Ok(( OpteeSmcResult::Generic { @@ -281,7 +280,7 @@ impl ShmRefMap { let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { let mut cur_ptr = - NormalWorldConstPtr::::try_from_usize(cur_addr) + NormalWorldConstPtr::::with_usize(cur_addr) .map_err(|_| OpteeSmcReturn::EBadAddr)?; let pages_data = unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 3f3007f69..a2fe3f9cf 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -64,7 +64,7 @@ use litebox::platform::vmap::{ PhysPageAddr, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError, VmapProvider, }; -use litebox_platform_multiplex::{Platform, platform}; +use litebox_platform_multiplex::platform; #[inline] fn align_down(address: usize, align: usize) -> usize { @@ -101,10 +101,7 @@ impl PhysMutPtr { /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. - pub fn try_from_page_array( - pages: &[PhysPageAddr], - offset: usize, - ) -> Result { + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { if offset >= ALIGN { return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); } @@ -123,7 +120,7 @@ impl PhysMutPtr { core::mem::size_of::(), )); } - >::validate(platform(), pages.into())?; + platform().validate(pages.into())?; Ok(Self { pages: pages.into(), offset, @@ -135,10 +132,11 @@ impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// - /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// This is a shortcut for + /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, /// later accesses through `PhysMutPtr` may read/write incorrect data. - pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( bytes, @@ -159,16 +157,16 @@ impl PhysMutPtr { ); current_page += ALIGN; } - Self::try_from_page_array(&pages, pa - start_page) + Self::new(&pages, pa - start_page) } /// Create a new `PhysMutPtr` from the given physical address for a single object. /// - /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// This is a shortcut for `PhysMutPtr::with_contiguous_pages(pa, size_of::())`. /// /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. - pub fn try_from_usize(pa: usize) -> Result { - Self::try_from_contiguous_pages(pa, core::mem::size_of::()) + pub fn with_usize(pa: usize) -> Result { + Self::with_contiguous_pages(pa, core::mem::size_of::()) } /// Read the value at the given offset from the physical pointer. @@ -199,25 +197,23 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(src) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); + let src = src.wrapping_add(count); let val = { let mut buffer = core::mem::MaybeUninit::::uninit(); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { + if (src as usize).is_multiple_of(core::mem::align_of::()) { unsafe { - core::ptr::copy_nonoverlapping(addr, buffer.as_mut_ptr(), 1); + core::ptr::copy_nonoverlapping(src, buffer.as_mut_ptr(), 1); } } else { unsafe { core::ptr::copy_nonoverlapping( - addr.cast::(), + src.cast::(), buffer.as_mut_ptr().cast::(), core::mem::size_of::(), ); @@ -263,23 +259,21 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(src) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { + let src = src.wrapping_add(count); + if (src as usize).is_multiple_of(core::mem::align_of::()) { unsafe { - core::ptr::copy_nonoverlapping(addr, values.as_mut_ptr(), values.len()); + core::ptr::copy_nonoverlapping(src, values.as_mut_ptr(), values.len()); } } else { unsafe { core::ptr::copy_nonoverlapping( - addr.cast::(), + src.cast::(), values.as_mut_ptr().cast::(), core::mem::size_of_val(values), ); @@ -324,19 +318,17 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(dst) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { - unsafe { core::ptr::write(addr, value) }; + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { + unsafe { core::ptr::write(dst, value) }; } else { - unsafe { core::ptr::write_unaligned(addr, value) }; + unsafe { core::ptr::write_unaligned(dst, value) }; } unsafe { self.unmap()?; @@ -380,24 +372,22 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(map_info) = &self.map_info else { + let Some(dst) = (unsafe { self.base_ptr() }) else { unsafe { self.unmap()?; } return Err(PhysPointerError::NoMappingInfo); }; - let addr = unsafe { map_info.base.add(self.offset) } - .cast::() - .wrapping_add(count); - if (addr as usize).is_multiple_of(core::mem::align_of::()) { + let dst = dst.wrapping_add(count); + if (dst as usize).is_multiple_of(core::mem::align_of::()) { unsafe { - core::ptr::copy_nonoverlapping(values.as_ptr(), addr, values.len()); + core::ptr::copy_nonoverlapping(values.as_ptr(), dst, values.len()); } } else { unsafe { core::ptr::copy_nonoverlapping( values.as_ptr().cast::(), - addr.cast::(), + dst.cast::(), core::mem::size_of_val(values), ); } @@ -430,11 +420,9 @@ impl PhysMutPtr { if self.map_info.is_none() { let sub_pages = &self.pages[start..end]; unsafe { - self.map_info = Some(>::vmap( - platform(), - sub_pages.into(), - perms, - )?); + platform().vmap(sub_pages.into(), perms).map(|info| { + self.map_info = Some(info); + })?; } Ok(()) } else { @@ -463,6 +451,19 @@ impl PhysMutPtr { )) } } + + /// Get the base virtual pointer if mapped. + /// + /// # Safety + /// + /// This function performs pointer arithmetic on the mapped base pointer. + #[inline] + unsafe fn base_ptr(&self) -> Option<*mut T> { + let Some(map_info) = &self.map_info else { + return None; + }; + Some(unsafe { map_info.base.add(self.offset) }.cast::()) + } } impl Drop for PhysMutPtr { @@ -495,34 +496,32 @@ impl PhysConstPtr { /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of /// type `T` starting from `offset`. If these conditions are not met, this function returns /// `Err(PhysPointerError)`. - pub fn try_from_page_array( - pages: &[PhysPageAddr], - offset: usize, - ) -> Result { + pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { Ok(Self { - inner: PhysMutPtr::try_from_page_array(pages, offset)?, + inner: PhysMutPtr::new(pages, offset)?, }) } /// Create a new `PhysConstPtr` from the given contiguous physical address and length. /// - /// This is a shortcut for `try_from_page_array([align_down(pa), ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// This is a shortcut for + /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, /// later accesses through `PhysConstPtr` may read incorrect data. - pub fn try_from_contiguous_pages(pa: usize, bytes: usize) -> Result { + pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { - inner: PhysMutPtr::try_from_contiguous_pages(pa, bytes)?, + inner: PhysMutPtr::with_contiguous_pages(pa, bytes)?, }) } /// Create a new `PhysConstPtr` from the given physical address for a single object. /// - /// This is a shortcut for `try_from_contiguous_pages(pa, size_of::())`. + /// This is a shortcut for `PhysConstPtr::with_contiguous_pages(pa, size_of::())`. /// /// Note: This module doesn't provide `as_usize` because LiteBox should not dereference physical addresses directly. - pub fn try_from_usize(pa: usize) -> Result { + pub fn with_usize(pa: usize) -> Result { Ok(Self { - inner: PhysMutPtr::try_from_usize(pa)?, + inner: PhysMutPtr::with_usize(pa)?, }) } From fadf32da948b4a6f8540db030920ae055384e34c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 29 Dec 2025 22:38:56 +0000 Subject: [PATCH 33/52] use PhysPageAddr for ShmRefMap --- litebox_shim_optee/src/msg_handler.rs | 56 ++++++++++++++++----------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 27027113f..8d3a34529 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -19,6 +19,7 @@ use crate::NormalWorldConstPtr; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; +use litebox::platform::vmap::PhysPageAddr; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, @@ -202,19 +203,14 @@ pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result, - pub page_offset: u64, -} - /// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to /// share with the secure world (VTL1). Each [`ShmRefPagesData`] occupies one memory page /// where `pages_list` contains a list of physical page addresses and `next_page_data` /// contains the physical address of the next [`ShmRefPagesData`] if any. Entries of `pages_list` /// and `next_page_data` contain zero if the list ends. These physical page addresses are /// virtually contiguous in the normal world. All these address values must be page aligned. +/// +/// `pages_data` from [Linux](https://elixir.bootlin.com/linux/v6.18.2/source/drivers/tee/optee/smc_abi.c#L409) #[derive(Clone, Copy)] #[repr(C)] struct ShmRefPagesData { @@ -226,22 +222,33 @@ impl ShmRefPagesData { PAGE_SIZE / core::mem::size_of::() - core::mem::size_of::(); } +/// Data structure to maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. +/// `pages` contains an array of physical page addresses. +/// `page_offset` indicates the page offset of the first page (i.e., `pages[0]`) which should be +/// smaller than `ALIGN`. +#[expect(unused)] +#[derive(Clone)] +struct ShmRefInfo { + pub pages: Box<[PhysPageAddr]>, + pub page_offset: usize, +} + /// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. /// This data structure is for registering shared memory regions before they are /// used during OP-TEE calls with parameters referencing shared memory. /// Any normal memory references without this registration will be rejected. -struct ShmRefMap { - inner: spin::mutex::SpinMutex>, +struct ShmRefMap { + inner: spin::mutex::SpinMutex>>, } -impl ShmRefMap { +impl ShmRefMap { pub fn new() -> Self { Self { inner: spin::mutex::SpinMutex::new(HashMap::new()), } } - pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { + pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { let mut guard = self.inner.lock(); if guard.contains_key(&shm_ref) { Err(OpteeSmcReturn::ENotAvail) @@ -251,13 +258,13 @@ impl ShmRefMap { } } - pub fn remove(&self, shm_ref: u64) -> Option { + pub fn remove(&self, shm_ref: u64) -> Option> { let mut guard = self.inner.lock(); guard.remove(&shm_ref) } #[expect(unused)] - pub fn get(&self, shm_ref: u64) -> Option { + pub fn get(&self, shm_ref: u64) -> Option> { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() } @@ -275,22 +282,25 @@ impl ShmRefMap { aligned_size: u64, shm_ref: u64, ) -> Result<(), OpteeSmcReturn> { - let num_pages = usize::try_from(aligned_size).unwrap() / PAGE_SIZE; + if page_offset >= ALIGN as u64 || aligned_size == 0 { + return Err(OpteeSmcReturn::EBadAddr); + } + let num_pages = usize::try_from(aligned_size).unwrap() / ALIGN; let mut pages = Vec::with_capacity(num_pages); let mut cur_addr = usize::try_from(shm_ref_pages_data_phys_addr).unwrap(); loop { - let mut cur_ptr = - NormalWorldConstPtr::::with_usize(cur_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut cur_ptr = NormalWorldConstPtr::::with_usize(cur_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; let pages_data = unsafe { cur_ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; for page in &pages_data.pages_list { if *page == 0 || pages.len() == num_pages { break; - } else if !page.is_multiple_of(u64::try_from(PAGE_SIZE).unwrap()) { - return Err(OpteeSmcReturn::EBadAddr); } else { - pages.push(*page); + pages.push( + PhysPageAddr::new(usize::try_from(*page).unwrap()) + .ok_or(OpteeSmcReturn::EBadAddr)?, + ); } } if pages_data.next_page_data == 0 || pages.len() == num_pages { @@ -304,14 +314,14 @@ impl ShmRefMap { shm_ref, ShmRefInfo { pages: pages.into_boxed_slice(), - page_offset, + page_offset: usize::try_from(page_offset).unwrap(), }, )?; Ok(()) } } -fn shm_ref_map() -> &'static ShmRefMap { - static SHM_REF_MAP: OnceBox = OnceBox::new(); +fn shm_ref_map() -> &'static ShmRefMap { + static SHM_REF_MAP: OnceBox> = OnceBox::new(); SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } From 7c387a9d415c9e765603011e54f869c362d6d096 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Thu, 8 Jan 2026 05:53:33 +0000 Subject: [PATCH 34/52] clarification --- litebox/src/platform/vmap.rs | 23 +++++---- litebox_common_optee/src/lib.rs | 1 - litebox_platform_lvbs/src/lib.rs | 4 +- litebox_shim_optee/src/msg_handler.rs | 70 +++++++++++++++------------ litebox_shim_optee/src/ptr.rs | 38 +++++++-------- 5 files changed, 69 insertions(+), 67 deletions(-) diff --git a/litebox/src/platform/vmap.rs b/litebox/src/platform/vmap.rs index b152508fb..a5e676846 100644 --- a/litebox/src/platform/vmap.rs +++ b/litebox/src/platform/vmap.rs @@ -30,8 +30,8 @@ pub trait VmapProvider { /// (especially, there should be no read/write or write/write conflicts). /// Unfortunately, LiteBox itself cannot fully guarantee this and it needs some helps /// from the caller, hypervisor, or hardware. - /// Multiple LiteBox threads might concurrently call this function (and `vunmap()`) with - /// overlapping physical pages, so the implementation should safely handle such cases. + /// Multiple LiteBox threads might concurrently call this function with overlapping + /// physical pages, so the implementation should safely handle such cases. unsafe fn vmap( &self, pages: Self::PhysPageAddrArray, @@ -46,13 +46,10 @@ pub trait VmapProvider { /// # Safety /// /// The caller should ensure that the virtual addresses in `vmap_info` are not in active - /// use by other entities. Like `vmap()`, LiteBox itself cannot fully guarantee this and - /// it needs some helps from other parties. - /// Multiple LiteBox threads might concurrently call this function (and `vmap()`) with - /// overlapping physical pages, so the implementation should safely handle such cases. + /// use by other entities. unsafe fn vunmap(&self, vmap_info: Self::PhysPageMapInfo) -> Result<(), PhysPointerError>; - /// Validate that the given physical pages do not belong to LiteBox-managed memory. + /// Validate that the given physical pages do not belong to LiteBox-owned memory. /// Use `&self` to get the memory layout of the platform (i.e., the physical memory /// range assigned to LiteBox). /// @@ -61,12 +58,13 @@ pub trait VmapProvider { /// Returns `Ok(())` if valid. If the pages are not valid, returns `Err(PhysPointerError)`. fn validate(&self, pages: Self::PhysPageAddrArray) -> Result<(), PhysPointerError>; - /// Protect the given physical pages to ensure concurrent read or exclusive write access. - /// Read protection prevents others from modifying the pages. Read/write protection prevents - /// others from accessing the pages. - /// This can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// Protect the given physical pages to ensure concurrent read or exclusive write access: + /// - Read protection: prevent others from writing to the pages. + /// - Read/write protection: prevent others from reading or writing to the pages. + /// - No protection: allow others to read and write the pages. /// - /// This function is a no-op if there is no other world or VM sharing the physical memory. + /// This function can be implemented using EPT/NPT, TZASC, PMP, or some other hardware mechanisms. + /// It is a no-op if there is no other world or VM sharing the physical memory. /// /// Returns `Ok(())` if it successfully protects the pages. If it fails, returns /// `Err(PhysPointerError)`. @@ -75,6 +73,7 @@ pub trait VmapProvider { /// /// Since this function is expected to use hypercalls or other privileged hardware features, /// the caller must ensure that it is safe to perform such operations at the time of the call. + /// Also, the caller should unprotect the pages when they are no longer needed to be protected. unsafe fn protect( &self, pages: Self::PhysPageAddrArray, diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 0af2e652c..26ae7cf2d 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1412,7 +1412,6 @@ impl OpteeSmcArgs { /// Get the physical address of `OpteeMsgArg`. The secure world is expected to map and copy /// this structure. - #[cfg(target_pointer_width = "64")] pub fn optee_msg_arg_phys_addr(&self) -> Result { // To avoid potential sign extension and overflow issues, OP-TEE stores the low and // high 32 bits of a 64-bit address in `args[2]` and `args[1]`, respectively. diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 068c93889..ac326e994 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -761,8 +761,8 @@ impl StdioProvider for LinuxKernel { /// Checks whether the given physical addresses are contiguous with respect to ALIGN. /// /// Note: This is a temporary check to let `VmapProvider` work with this platform -/// which maps physical pages with a fixed offset (`MemoryProvider::GVA_OFFSET`) such that -/// does not support non-contiguous physical page mapping with contiguous virtual addresses. +/// which does not yet support virtually contiguous mapping of non-contiguous physical pages +/// (for now, it maps physical pages with a fixed offset). fn check_contiguity( addrs: &[PhysPageAddr], ) -> Result<(), PhysPointerError> { diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 8d3a34529..c3bbcfdc4 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -60,15 +60,23 @@ fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } +/// The result of handling an OP-TEE SMC call along with an extracted OP-TEE message argument to handle. +pub struct OpteeSmcHandled<'a> { + pub result: OpteeSmcResult<'a>, + pub msg_to_handle: Option, +} + /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// It returns an `OpteeSmcResult` representing the result of the SMC call and /// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE messagewhich should be handled by /// `handle_optee_msg_arg` or `handle_ta_request`. +/// /// # Panics +/// /// Panics if the normal world physical address in `smc` cannot be converted to `usize`. pub fn handle_optee_smc_args( smc: &mut OpteeSmcArgs, -) -> Result<(OpteeSmcResult<'_>, Option), OpteeSmcReturn> { +) -> Result, OpteeSmcReturn> { let func_id = smc.func_id()?; match func_id { OpteeSmcFunction::CallWithArg @@ -79,41 +87,41 @@ pub fn handle_optee_smc_args( let mut ptr = NormalWorldConstPtr::::with_usize(msg_arg_addr) .map_err(|_| OpteeSmcReturn::EBadAddr)?; let msg_arg = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; - Ok(( - OpteeSmcResult::Generic { + Ok(OpteeSmcHandled { + result: OpteeSmcResult::Generic { status: OpteeSmcReturn::Ok, }, - Some(*msg_arg), - )) + msg_to_handle: Some(*msg_arg), + }) } OpteeSmcFunction::ExchangeCapabilities => { // TODO: update the below when we support more features let default_cap = OpteeSecureWorldCapabilities::DYNAMIC_SHM | OpteeSecureWorldCapabilities::MEMREF_NULL | OpteeSecureWorldCapabilities::RPC_ARG; - Ok(( - OpteeSmcResult::ExchangeCapabilities { + Ok(OpteeSmcHandled { + result: OpteeSmcResult::ExchangeCapabilities { status: OpteeSmcReturn::Ok, capabilities: default_cap, max_notif_value: MAX_NOTIF_VALUE, data: NUM_RPC_PARMS, }, - None, - )) + msg_to_handle: None, + }) } OpteeSmcFunction::DisableShmCache => { // Currently, we do not support this feature. - Ok(( - OpteeSmcResult::DisableShmCache { + Ok(OpteeSmcHandled { + result: OpteeSmcResult::DisableShmCache { status: OpteeSmcReturn::ENotAvail, shm_upper32: 0, shm_lower32: 0, }, - None, - )) + msg_to_handle: None, + }) } - OpteeSmcFunction::GetOsUuid => Ok(( - OpteeSmcResult::Uuid { + OpteeSmcFunction::GetOsUuid => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Uuid { data: &[ OPTEE_MSG_OS_OPTEE_UUID_0, OPTEE_MSG_OS_OPTEE_UUID_1, @@ -121,10 +129,10 @@ pub fn handle_optee_smc_args( OPTEE_MSG_OS_OPTEE_UUID_3, ], }, - None, - )), - OpteeSmcFunction::CallsUid => Ok(( - OpteeSmcResult::Uuid { + msg_to_handle: None, + }), + OpteeSmcFunction::CallsUid => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Uuid { data: &[ OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, @@ -132,29 +140,29 @@ pub fn handle_optee_smc_args( OPTEE_MSG_UID_3, ], }, - None, - )), - OpteeSmcFunction::GetOsRevision => Ok(( - OpteeSmcResult::OsRevision { + msg_to_handle: None, + }), + OpteeSmcFunction::GetOsRevision => Ok(OpteeSmcHandled { + result: OpteeSmcResult::OsRevision { major: OPTEE_MSG_REVISION_MAJOR, minor: OPTEE_MSG_REVISION_MINOR, build_id: OPTEE_MSG_BUILD_ID, }, - None, - )), - OpteeSmcFunction::CallsRevision => Ok(( - OpteeSmcResult::Revision { + msg_to_handle: None, + }), + OpteeSmcFunction::CallsRevision => Ok(OpteeSmcHandled { + result: OpteeSmcResult::Revision { major: OPTEE_MSG_REVISION_MAJOR, minor: OPTEE_MSG_REVISION_MINOR, }, - None, - )), + msg_to_handle: None, + }), _ => Err(OpteeSmcReturn::UnknownFunction), } } /// This function handles an OP-TEE message contained in `OpteeMsgArg`. -/// Currently, it only handles share memory registration and unregistration. +/// Currently, it only handles shared memory registration and unregistration. /// If an OP-TEE message involves with a TA request, it simply returns /// `Err(OpteeSmcReturn::Ok)` while expecting that the caller will handle /// the message with `handle_ta_request`. @@ -166,7 +174,7 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> if tmem.buf_ptr == 0 || tmem.size == 0 || tmem.shm_ref == 0 { return Err(OpteeSmcReturn::EBadAddr); } - // `tmem.buf_ptr` embeds two different information: + // `tmem.buf_ptr` encodes two different information: // - The physical page address of the first `ShmRefPagesData` // - The page offset of the first shared memory page (`pages_list[0]`) let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index a2fe3f9cf..0625804bb 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -19,9 +19,9 @@ //! leakage due to concurrent or persistent access). //! //! Instead, the approach this module takes is to map the required physical memory -//! region on-demand when accessing them while using a LiteBox-managed buffer to copy +//! region on-demand when accessing them while using a LiteBox-owned buffer to copy //! data to/from those regions. This way, this module can ensure that data must be -//! copied into LiteBox-managed memory before being used while avoiding any unknown +//! copied into LiteBox-owned memory before being used while avoiding any unknown //! side effects due to persistent memory mapping. //! //! Considerations: @@ -97,10 +97,10 @@ pub struct PhysMutPtr { impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given physical page array and offset. /// - /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be smaller - /// than `ALIGN`. Also, `pages` should contain enough pages to cover at least one object of - /// type `T` starting from `offset`. If these conditions are not met, this function returns - /// `Err(PhysPointerError)`. + /// All addresses in `pages` should be valid and aligned to `ALIGN`, and `offset` should be + /// smaller than `ALIGN`. Also, `pages` should contain enough pages to cover at least one + /// object of type `T` starting from `offset`. If these conditions are not met, this function + /// returns `Err(PhysPointerError)`. pub fn new(pages: &[PhysPageAddr], offset: usize) -> Result { if offset >= ALIGN { return Err(PhysPointerError::InvalidBaseOffset(offset, ALIGN)); @@ -133,9 +133,9 @@ impl PhysMutPtr { /// Create a new `PhysMutPtr` from the given contiguous physical address and length. /// /// This is a shortcut for - /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// `PhysMutPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, - /// later accesses through `PhysMutPtr` may read/write incorrect data. + /// later accesses through `PhysMutPtr` may read/write data in a wrong order. pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { if bytes < core::mem::size_of::() { return Err(PhysPointerError::InsufficientPhysicalPages( @@ -197,7 +197,7 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(src) = (unsafe { self.base_ptr() }) else { + let Some(src) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -259,7 +259,7 @@ impl PhysMutPtr { self.map_range(start, end, PhysPageMapPermissions::READ)?; } // Don't forget to call unmap() before returning to the caller - let Some(src) = (unsafe { self.base_ptr() }) else { + let Some(src) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -289,7 +289,7 @@ impl PhysMutPtr { /// /// # Safety /// - /// The caller should be aware that the given physical address might be concurrently writtenby + /// The caller should be aware that the given physical address might be concurrently written by /// other entities (e.g., the normal world kernel) if there is no extra security mechanism /// in place (e.g., by the hypervisor or hardware). That is, data it writes might be overwritten. pub unsafe fn write_at_offset( @@ -318,7 +318,7 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(dst) = (unsafe { self.base_ptr() }) else { + let Some(dst) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -372,7 +372,7 @@ impl PhysMutPtr { )?; } // Don't forget to call unmap() before returning to the caller - let Some(dst) = (unsafe { self.base_ptr() }) else { + let Some(dst) = self.base_ptr() else { unsafe { self.unmap()?; } @@ -453,16 +453,12 @@ impl PhysMutPtr { } /// Get the base virtual pointer if mapped. - /// - /// # Safety - /// - /// This function performs pointer arithmetic on the mapped base pointer. #[inline] - unsafe fn base_ptr(&self) -> Option<*mut T> { + fn base_ptr(&self) -> Option<*mut T> { let Some(map_info) = &self.map_info else { return None; }; - Some(unsafe { map_info.base.add(self.offset) }.cast::()) + Some(map_info.base.wrapping_add(self.offset).cast::()) } } @@ -505,9 +501,9 @@ impl PhysConstPtr { /// Create a new `PhysConstPtr` from the given contiguous physical address and length. /// /// This is a shortcut for - /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(align_down(pa) + bytes)], pa % ALIGN)`. + /// `PhysConstPtr::new([align_down(pa), align_down(pa) + ALIGN, ..., align_up(pa + bytes) - ALIGN], pa % ALIGN)`. /// This function assumes that `pa`, ..., `pa+bytes` are both physically and virtually contiguous. If not, - /// later accesses through `PhysConstPtr` may read incorrect data. + /// later accesses through `PhysConstPtr` may read data in a wrong order. pub fn with_contiguous_pages(pa: usize, bytes: usize) -> Result { Ok(Self { inner: PhysMutPtr::with_contiguous_pages(pa, bytes)?, From 9a88e082a775cacbb96a82666206c32098902b4b Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 15 Dec 2025 23:40:56 +0000 Subject: [PATCH 35/52] handle TA request (wip) --- litebox_shim_optee/src/msg_handler.rs | 191 +++++++++++++++++++++++++- 1 file changed, 187 insertions(+), 4 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index c3bbcfdc4..c040c16ab 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -21,8 +21,9 @@ use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; use litebox::platform::vmap::PhysPageAddr; use litebox_common_optee::{ - OpteeMessageCommand, OpteeMsgArg, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, - OpteeSmcResult, OpteeSmcReturn, + OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, + OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, + UteeEntryFunc, UteeParamOwned, }; use once_cell::race::OnceBox; @@ -207,8 +208,133 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> } /// This function handles a TA request contained in `OpteeMsgArg` -pub fn handle_ta_request(_msg_arg: &OpteeMsgArg) -> Result { - todo!() +pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { + let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; + let skip: usize = if ta_entry_func == UteeEntryFunc::OpenSession { + // TODO: load a TA using its UUID + 2 + } else { + 0 + }; + let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; + let num_params: usize = msg_arg.num_params.try_into().unwrap(); + for (i, param) in msg_arg + .params + .iter() + .take(num_params) + .skip(skip) + .enumerate() + { + ta_params[i] = match param.attr_type() { + OpteeMsgAttrType::None => UteeParamOwned::None, + OpteeMsgAttrType::ValueInput => { + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInput { + value_a: value.a, + value_b: value.b, + } + } + OpteeMsgAttrType::ValueOutput => UteeParamOwned::ValueOutput { out_address: None }, + OpteeMsgAttrType::ValueInout => { + let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; + UteeParamOwned::ValueInout { + value_a: value.a, + value_b: value.b, + out_address: None, + } + } + OpteeMsgAttrType::TmemInput | OpteeMsgAttrType::RmemInput => { + if let (Ok(phys_addrs), data_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { + let slice = alloc::vec![0u8; data_size]; + // TODO: walk the scatter-gather list to populate `slice` + UteeParamOwned::MemrefInput { data: slice.into() } + } else { + UteeParamOwned::None + } + } + OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { + if let (Ok(phys_addrs), buffer_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemOutput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemOutput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { + UteeParamOwned::MemrefOutput { + buffer_size, + out_addresses: Some(phys_addrs), + } + } else { + UteeParamOwned::None + } + } + OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { + if let (Ok(phys_addrs), buffer_size) = { + match param.attr_type() { + OpteeMsgAttrType::TmemInout => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), + usize::try_from(tmem.size).unwrap(), + ) + } + OpteeMsgAttrType::RmemInout => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + ( + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + } + } { + let slice = alloc::vec![0u8; buffer_size]; + // TODO: walk the scatter-gather list to populate `slice` + UteeParamOwned::MemrefInout { + data: slice.into(), + buffer_size, + out_addresses: Some(phys_addrs), + } + } else { + UteeParamOwned::None + } + } + _ => todo!(), + }; + } + + // let ta_cmd_id = msg_arg.func; + Ok(*msg_arg) } /// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to @@ -333,3 +459,60 @@ fn shm_ref_map() -> &'static ShmRefMap { static SHM_REF_MAP: OnceBox> = OnceBox::new(); SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } + +/// Get the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamTmem`. +/// Note that we use this function for handing TA requests and in this context +/// `OpteeMsgParamTmem` and `OpteeMsgParamRmem` are equivalent because every shared memory +/// reference accessible by TAs must be registered in advance. +/// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. +fn get_shm_phys_addrs_from_optee_msg_param_tmem( + tmem: OpteeMsgParamTmem, +) -> Result, OpteeSmcReturn> { + let rmem = OpteeMsgParamRmem { + offs: tmem.buf_ptr, + size: tmem.size, + shm_ref: tmem.shm_ref, + }; + get_shm_phys_addrs_from_optee_msg_param_rmem(rmem) +} + +/// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. +/// Specifically, `rmem.offs` must be an offset within the shared memory region registered with +/// `rmem.shm_ref` before and `rmem.offs + rmem.size` must not exceed the size of the registered +/// shared memory region. +/// All addresses this function returns are page-aligned except the first one whose page offset is +/// in `ShmRefInfo`. These addresses are virtually contiguous within the normal world, but not +/// necessarily physically contiguous. +fn get_shm_phys_addrs_from_optee_msg_param_rmem( + rmem: OpteeMsgParamRmem, +) -> Result, OpteeSmcReturn> { + let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { + return Err(OpteeSmcReturn::ENotAvail); + }; + let offset = shm_ref_info + .page_offset + .checked_add(rmem.offs) + .ok_or(OpteeSmcReturn::EBadAddr)?; + let end = offset + .checked_add(rmem.size) + .ok_or(OpteeSmcReturn::EBadAddr)?; + let start_index = usize::try_from(page_align_down(offset)).unwrap() / PAGE_SIZE; + let end_index = usize::try_from(page_align_up(end)).unwrap() / PAGE_SIZE; + if start_index >= shm_ref_info.pages.len() || end_index > shm_ref_info.pages.len() { + return Err(OpteeSmcReturn::EBadAddr); + } + + let mut pages = Vec::with_capacity(end_index - start_index); + let page_offset = offset - page_align_down(offset); + pages[0] = usize::try_from(shm_ref_info.pages[start_index] + page_offset).unwrap(); + for (i, page) in shm_ref_info + .pages + .iter() + .take(end_index) + .skip(start_index) + .enumerate() + { + pages[i] = usize::try_from(*page).unwrap(); + } + Ok(pages.into_boxed_slice()) +} From bc469ff55d7bbb7931f49eb8e21fbf438cd65b49 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 16 Dec 2025 04:38:52 +0000 Subject: [PATCH 36/52] read_data_from_shm_phys_addrs --- litebox_shim_optee/src/msg_handler.rs | 86 ++++++++++++++++++--------- 1 file changed, 59 insertions(+), 27 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index c040c16ab..f494fb7f1 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -52,12 +52,17 @@ const MAX_NOTIF_VALUE: usize = 0; const NUM_RPC_PARMS: usize = 4; #[inline] -fn page_align_down(address: u64) -> u64 { +fn page_align_down_u64(address: u64) -> u64 { address & !(PAGE_SIZE as u64 - 1) } #[inline] -fn page_align_up(len: u64) -> u64 { +fn page_align_down(address: usize) -> usize { + address & !(PAGE_SIZE - 1) +} + +#[inline] +fn page_align_up_u64(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } @@ -178,9 +183,9 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> // `tmem.buf_ptr` encodes two different information: // - The physical page address of the first `ShmRefPagesData` // - The page offset of the first shared memory page (`pages_list[0]`) - let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); + let shm_ref_pages_data_phys_addr = page_align_down_u64(tmem.buf_ptr); let page_offset = tmem.buf_ptr - shm_ref_pages_data_phys_addr; - let aligned_size = page_align_up(page_offset + tmem.size); + let aligned_size = page_align_up_u64(page_offset + tmem.size); shm_ref_map().register_shm( shm_ref_pages_data_phys_addr, page_offset, @@ -208,6 +213,8 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> } /// This function handles a TA request contained in `OpteeMsgArg` +/// # Panics +/// Panics if any conversion from `u32` to `usize` fails. pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; let skip: usize = if ta_entry_func == UteeEntryFunc::OpenSession { @@ -263,9 +270,9 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result unreachable!(), } } { - let slice = alloc::vec![0u8; data_size]; - // TODO: walk the scatter-gather list to populate `slice` - UteeParamOwned::MemrefInput { data: slice.into() } + let mut data = alloc::vec![0u8; data_size]; + read_data_from_shm_phys_addrs(&phys_addrs, &mut data)?; + UteeParamOwned::MemrefInput { data: data.into() } } else { UteeParamOwned::None } @@ -318,10 +325,10 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result unreachable!(), } } { - let slice = alloc::vec![0u8; buffer_size]; - // TODO: walk the scatter-gather list to populate `slice` + let mut buffer = alloc::vec![0u8; buffer_size]; + read_data_from_shm_phys_addrs(&phys_addrs, &mut buffer)?; UteeParamOwned::MemrefInout { - data: slice.into(), + data: buffer.into(), buffer_size, out_addresses: Some(phys_addrs), } @@ -477,42 +484,67 @@ fn get_shm_phys_addrs_from_optee_msg_param_tmem( } /// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. -/// Specifically, `rmem.offs` must be an offset within the shared memory region registered with -/// `rmem.shm_ref` before and `rmem.offs + rmem.size` must not exceed the size of the registered -/// shared memory region. -/// All addresses this function returns are page-aligned except the first one whose page offset is -/// in `ShmRefInfo`. These addresses are virtually contiguous within the normal world, but not -/// necessarily physically contiguous. +/// `rmem.offs` must be an offset within the shared memory region registered with `rmem.shm_ref` before +/// and `rmem.offs + rmem.size` must not exceed the size of the registered shared memory region. +/// All addresses this function returns are page-aligned except the first one. These addresses are +/// virtually contiguous within the normal world, but not necessarily physically contiguous. fn get_shm_phys_addrs_from_optee_msg_param_rmem( rmem: OpteeMsgParamRmem, ) -> Result, OpteeSmcReturn> { let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { return Err(OpteeSmcReturn::ENotAvail); }; - let offset = shm_ref_info + let start = shm_ref_info .page_offset .checked_add(rmem.offs) .ok_or(OpteeSmcReturn::EBadAddr)?; - let end = offset + let end = start .checked_add(rmem.size) .ok_or(OpteeSmcReturn::EBadAddr)?; - let start_index = usize::try_from(page_align_down(offset)).unwrap() / PAGE_SIZE; - let end_index = usize::try_from(page_align_up(end)).unwrap() / PAGE_SIZE; - if start_index >= shm_ref_info.pages.len() || end_index > shm_ref_info.pages.len() { + let start_page_index = usize::try_from(page_align_down_u64(start)).unwrap() / PAGE_SIZE; + let end_page_index = usize::try_from(page_align_up_u64(end)).unwrap() / PAGE_SIZE; + if start_page_index >= shm_ref_info.pages.len() || end_page_index > shm_ref_info.pages.len() { return Err(OpteeSmcReturn::EBadAddr); } - - let mut pages = Vec::with_capacity(end_index - start_index); - let page_offset = offset - page_align_down(offset); - pages[0] = usize::try_from(shm_ref_info.pages[start_index] + page_offset).unwrap(); + let mut pages = Vec::with_capacity(end_page_index - start_page_index); + let page_offset = start - page_align_down_u64(start); + pages[0] = usize::try_from(shm_ref_info.pages[start_page_index] + page_offset).unwrap(); for (i, page) in shm_ref_info .pages .iter() - .take(end_index) - .skip(start_index) + .take(end_page_index) + .skip(start_page_index) .enumerate() { pages[i] = usize::try_from(*page).unwrap(); } Ok(pages.into_boxed_slice()) } + +/// Read data from the normal world shared memory pages whose physical addresses are given in +/// `phys_addrs` into `buffer`. The size of `buffer` indicates how many bytes to read. +/// Currently, this function reads data page by page (i.e., it does not map multiple physical +/// pages at once). All physical addresses in `phys_addrs` are page-aligned except the first one. +fn read_data_from_shm_phys_addrs( + phys_addrs: &[usize], + buffer: &mut [u8], +) -> Result<(), OpteeSmcReturn> { + let ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE]>::from_usize(page_align_down(phys_addrs[0])); + let page = unsafe { ptr.read_at_offset(0) }.ok_or(OpteeSmcReturn::EBadAddr)?; + let page_offset = phys_addrs[0] - page_align_down(phys_addrs[0]); + let to_copy = core::cmp::min(PAGE_SIZE - page_offset, buffer.len()); + buffer.copy_from_slice(&page[page_offset..page_offset + to_copy]); + let mut copied = to_copy; + + for phys_addr in phys_addrs.iter().skip(1) { + if copied >= buffer.len() { + break; + } + let ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE]>::from_usize(*phys_addr); + let page = unsafe { ptr.read_at_offset(0) }.ok_or(OpteeSmcReturn::EBadAddr)?; + let to_copy = core::cmp::min(PAGE_SIZE, buffer.len() - copied); + buffer[copied..copied + to_copy].copy_from_slice(&page[..to_copy]); + copied += to_copy; + } + Ok(()) +} From 368cdf48df517e4040919b64ae2cf4515a346b34 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 16 Dec 2025 17:30:25 +0000 Subject: [PATCH 37/52] ta uuid --- litebox_common_optee/src/lib.rs | 16 ++++++++++++++++ litebox_shim_optee/src/msg_handler.rs | 26 +++++++++++++++++--------- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 26ae7cf2d..12174cef1 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -522,6 +522,22 @@ pub struct TeeUuid { pub time_hi_and_version: u16, pub clock_seq_and_node: [u8; 8], } +impl TeeUuid { + pub fn new_from_u32s(data: [u32; 4]) -> Self { + let time_low = data[0]; + let time_mid = (data[1] >> 16) as u16; + let time_hi_and_version = (data[1] & 0xffff) as u16; + let mut clock_seq_and_node = [0u8; 8]; + clock_seq_and_node[0..4].copy_from_slice(&data[2].to_be_bytes()); + clock_seq_and_node[4..8].copy_from_slice(&data[3].to_be_bytes()); + TeeUuid { + time_low, + time_mid, + time_hi_and_version, + clock_seq_and_node, + } + } +} /// `TEE_Identity` from `optee_os/lib/libutee/include/tee_api_types.h`. #[derive(Clone, Copy, PartialEq)] diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index f494fb7f1..a9a152bf5 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -23,7 +23,7 @@ use litebox::platform::vmap::PhysPageAddr; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, - UteeEntryFunc, UteeParamOwned, + TeeUuid, UteeEntryFunc, UteeParamOwned, }; use once_cell::race::OnceBox; @@ -214,17 +214,25 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> /// This function handles a TA request contained in `OpteeMsgArg` /// # Panics -/// Panics if any conversion from `u32` to `usize` fails. +/// Panics if any conversion from `u64` to `usize` fails. OP-TEE shim doesn't support a 32-bit environment. pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; - let skip: usize = if ta_entry_func == UteeEntryFunc::OpenSession { - // TODO: load a TA using its UUID - 2 + let (ta_uuid, skip): (Option, usize) = if ta_entry_func == UteeEntryFunc::OpenSession { + // If it is an OpenSession request, extract the TA UUID from the first two parameters + let mut data = [0u32; 4]; + data[0] = (msg_arg.get_param_value(0)?.a).truncate(); + data[1] = (msg_arg.get_param_value(0)?.b).truncate(); + data[2] = (msg_arg.get_param_value(1)?.a).truncate(); + data[3] = (msg_arg.get_param_value(1)?.b).truncate(); + (Some(TeeUuid::new_from_u32s(data)), 2) } else { - 0 + (None, 0) }; + + let ta_cmd_id = msg_arg.func; + let mut ta_params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; - let num_params: usize = msg_arg.num_params.try_into().unwrap(); + let num_params = msg_arg.num_params as usize; for (i, param) in msg_arg .params .iter() @@ -234,6 +242,7 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result UteeParamOwned::None, + // TODO: drop `out_address`. We'll revise the call-by-value handling. OpteeMsgAttrType::ValueInput => { let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInput { @@ -336,11 +345,10 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result todo!(), + _ => return Err(OpteeSmcReturn::EBadCmd), }; } - // let ta_cmd_id = msg_arg.func; Ok(*msg_arg) } From cc24af96577247517ad1df2ee4c807ed14e42d79 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 16 Dec 2025 22:19:43 +0000 Subject: [PATCH 38/52] prepare for return to normal world --- litebox_common_optee/src/lib.rs | 12 +++ litebox_shim_optee/src/msg_handler.rs | 140 +++++++++++++++++++++++--- 2 files changed, 139 insertions(+), 13 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 12174cef1..6b9d52894 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1381,6 +1381,18 @@ impl OpteeMsgArg { .ok_or(OpteeSmcReturn::EBadCmd)?) } } + pub fn set_param_value( + &mut self, + index: usize, + value: OpteeMsgParamValue, + ) -> Result<(), OpteeSmcReturn> { + if index >= self.num_params as usize { + Err(OpteeSmcReturn::ENotAvail) + } else { + self.params[index].u.value = value; + Ok(()) + } + } } /// A memory page to exchange OP-TEE SMC call arguments. diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index a9a152bf5..7be192491 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -22,8 +22,9 @@ use litebox::mm::linux::PAGE_SIZE; use litebox::platform::vmap::PhysPageAddr; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, - OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, OpteeSmcResult, OpteeSmcReturn, - TeeUuid, UteeEntryFunc, UteeParamOwned, + OpteeMsgParamValue, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, + OpteeSmcResult, OpteeSmcReturn, TeeParamType, TeeUuid, UteeEntryFunc, UteeParamOwned, + UteeParams, }; use once_cell::race::OnceBox; @@ -75,7 +76,7 @@ pub struct OpteeSmcHandled<'a> { /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// It returns an `OpteeSmcResult` representing the result of the SMC call and /// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE messagewhich should be handled by -/// `handle_optee_msg_arg` or `handle_ta_request`. +/// `handle_optee_msg_arg` or `decode_ta_request`. /// /// # Panics /// @@ -171,7 +172,7 @@ pub fn handle_optee_smc_args( /// Currently, it only handles shared memory registration and unregistration. /// If an OP-TEE message involves with a TA request, it simply returns /// `Err(OpteeSmcReturn::Ok)` while expecting that the caller will handle -/// the message with `handle_ta_request`. +/// the message with `decode_ta_request`. pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> { msg_arg.validate()?; match msg_arg.cmd { @@ -212,10 +213,27 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> Ok(()) } -/// This function handles a TA request contained in `OpteeMsgArg` +/// TA request information extracted from an OP-TEE message. +/// In addition to standard TA information (i.e., TA UUID, session ID, command ID, +/// and parameters), it contains shared memory addresses (`out_phys_addrs`) to +/// write back output data to the normal world once the TA execution is done. +pub struct TaRequestInfo { + pub uuid: Option, + pub session: u32, + pub entry_func: UteeEntryFunc, + pub cmd_id: u32, + pub params: [UteeParamOwned; UteeParamOwned::TEE_NUM_PARAMS], + pub out_phys_addrs: [Option>; UteeParamOwned::TEE_NUM_PARAMS], +} + +/// This function decodes a TA request contained in `OpteeMsgArg`. +/// Currently, this function copies the entire parameter data from the normal world +/// shared memory into LiteBox's memory to create `UteeParamOwned` structures. +/// Clearly, this approach is infficient and we need to revise it to avoid unnecessary +/// data copies (while maintaining the security). /// # Panics /// Panics if any conversion from `u64` to `usize` fails. OP-TEE shim doesn't support a 32-bit environment. -pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result { +pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result { let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; let (ta_uuid, skip): (Option, usize) = if ta_entry_func == UteeEntryFunc::OpenSession { // If it is an OpenSession request, extract the TA UUID from the first two parameters @@ -229,9 +247,15 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result Result UteeParamOwned::None, - // TODO: drop `out_address`. We'll revise the call-by-value handling. + // TODO: drop `out_address(es)`. We have revised the way to return back output data. OpteeMsgAttrType::ValueInput => { let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInput { @@ -306,9 +330,10 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result unreachable!(), } } { + ta_req_info.out_phys_addrs[i] = Some(phys_addrs); UteeParamOwned::MemrefOutput { buffer_size, - out_addresses: Some(phys_addrs), + out_addresses: None, } } else { UteeParamOwned::None @@ -336,10 +361,11 @@ pub fn handle_ta_request(msg_arg: &OpteeMsgArg) -> Result Result Result<(), OpteeSmcReturn> { + for index in 0..UteeParams::TEE_NUM_PARAMS { + let param_type = ta_params + .get_type(index) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + match param_type { + TeeParamType::ValueOutput | TeeParamType::ValueInout => { + if let Ok(Some((value_a, value_b))) = ta_params.get_values(index) { + msg_arg.set_param_value( + index, + OpteeMsgParamValue { + a: value_a, + b: value_b, + c: 0, + }, + )?; + } + } + TeeParamType::MemrefOutput | TeeParamType::MemrefInout => { + if let Ok(Some((addr, len))) = ta_params.get_values(index) { + // SAFETY + // `addr` is expected to be a valid address of a TA and `addr + len` does not + // exceed the TA's memory region. + let slice = unsafe { + &*core::ptr::slice_from_raw_parts( + addr as *const u8, + usize::try_from(len).unwrap_or(0), + ) + }; + if slice.is_empty() { + continue; + } + if let Some(out_addrs) = &ta_req_info.out_phys_addrs[index] { + write_data_to_shm_phys_addrs(out_addrs, slice)?; + } + } + } + _ => {} + } + } + Ok(()) } /// A scatter-gather list of OP-TEE physical page addresses in the normal world (VTL0) to @@ -556,3 +635,38 @@ fn read_data_from_shm_phys_addrs( } Ok(()) } + +/// Write data in `buffer` to the normal world shared memory pages whose physical addresses +/// are given in `phys_addrs`. The size of `buffer` indicates how many bytes to write. +/// Currently, this function writes data page by page (i.e., it does not map multiple physical +/// pages at once). All physical addresses in `phys_addrs` are page-aligned except the first one. +fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<(), OpteeSmcReturn> { + let ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE]>::from_usize(page_align_down(phys_addrs[0])); + let mut page = unsafe { ptr.read_at_offset(0) } + .ok_or(OpteeSmcReturn::EBadAddr)? + .into_owned(); + let page_offset = phys_addrs[0] - page_align_down(phys_addrs[0]); + let to_copy = core::cmp::min(PAGE_SIZE - page_offset, buffer.len()); + page[page_offset..page_offset + to_copy].copy_from_slice(&buffer[..to_copy]); + unsafe { + ptr.write_at_offset(0, page) + .ok_or(OpteeSmcReturn::EBadAddr)?; + } + let mut written = to_copy; + + for phys_addr in phys_addrs.iter().skip(1) { + if written >= buffer.len() { + break; + } + let ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE]>::from_usize(*phys_addr); + let mut page = [0u8; PAGE_SIZE]; + let to_copy = core::cmp::min(PAGE_SIZE, buffer.len() - written); + page[..to_copy].copy_from_slice(&buffer[written..written + to_copy]); + unsafe { + ptr.write_at_offset(0, page) + .ok_or(OpteeSmcReturn::EBadAddr)?; + } + written += to_copy; + } + Ok(()) +} From 2fcbde40dad74a1a8475f2cca81d9396d8f0983d Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 16 Dec 2025 22:27:56 +0000 Subject: [PATCH 39/52] drop out_addresses from utee_params --- litebox_common_optee/src/lib.rs | 32 ++++--------------- .../src/tests.rs | 7 +--- litebox_shim_optee/src/loader/ta_stack.rs | 19 +++-------- litebox_shim_optee/src/msg_handler.rs | 16 +++------- 4 files changed, 16 insertions(+), 58 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 6b9d52894..0eb76d44d 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -441,35 +441,15 @@ impl UteeParams { /// Each parameter for TA invocation with copied content/buffer for safer operations. /// This is our representation of `utee_params` and not for directly /// interacting with OP-TEE TAs and clients (which expect pointers/references). -/// `out_address(es)`: VTL0 physical address(es) to write output data to. They are virtually -/// contiguous but may not be physically contiguous. #[derive(Clone)] pub enum UteeParamOwned { None, - ValueInput { - value_a: u64, - value_b: u64, - }, - ValueOutput { - out_address: Option, - }, - ValueInout { - value_a: u64, - value_b: u64, - out_address: Option, - }, - MemrefInput { - data: Box<[u8]>, - }, - MemrefOutput { - buffer_size: usize, - out_addresses: Option>, - }, - MemrefInout { - data: Box<[u8]>, - buffer_size: usize, - out_addresses: Option>, - }, + ValueInput { value_a: u64, value_b: u64 }, + ValueOutput {}, + ValueInout { value_a: u64, value_b: u64 }, + MemrefInput { data: Box<[u8]> }, + MemrefOutput { buffer_size: usize }, + MemrefInout { data: Box<[u8]>, buffer_size: usize }, } impl UteeParamOwned { diff --git a/litebox_runner_optee_on_linux_userland/src/tests.rs b/litebox_runner_optee_on_linux_userland/src/tests.rs index 8438610d2..404251696 100644 --- a/litebox_runner_optee_on_linux_userland/src/tests.rs +++ b/litebox_runner_optee_on_linux_userland/src/tests.rs @@ -259,20 +259,16 @@ impl TaCommandParamsBase64 { value_a: *value_a, value_b: *value_b, }, - TaCommandParamsBase64::ValueOutput {} => { - UteeParamOwned::ValueOutput { out_address: None } - } + TaCommandParamsBase64::ValueOutput {} => UteeParamOwned::ValueOutput {}, TaCommandParamsBase64::ValueInout { value_a, value_b } => UteeParamOwned::ValueInout { value_a: *value_a, value_b: *value_b, - out_address: None, }, TaCommandParamsBase64::MemrefInput { data_base64 } => UteeParamOwned::MemrefInput { data: Self::decode_base64(data_base64).into_boxed_slice(), }, TaCommandParamsBase64::MemrefOutput { buffer_size } => UteeParamOwned::MemrefOutput { buffer_size: usize::try_from(*buffer_size).unwrap(), - out_addresses: None, }, TaCommandParamsBase64::MemrefInout { data_base64, @@ -287,7 +283,6 @@ impl TaCommandParamsBase64 { UteeParamOwned::MemrefInout { data: decoded_data.into_boxed_slice(), buffer_size, - out_addresses: None, } } } diff --git a/litebox_shim_optee/src/loader/ta_stack.rs b/litebox_shim_optee/src/loader/ta_stack.rs index d77743eb8..299919510 100644 --- a/litebox_shim_optee/src/loader/ta_stack.rs +++ b/litebox_shim_optee/src/loader/ta_stack.rs @@ -213,30 +213,19 @@ impl TaStack { UteeParamOwned::ValueInput { value_a, value_b } => { self.push_param_values(TeeParamType::ValueInput, Some((*value_a, *value_b)))?; } - UteeParamOwned::ValueOutput { out_address: _ } => { + UteeParamOwned::ValueOutput {} => { self.push_param_values(TeeParamType::ValueOutput, None)?; } - UteeParamOwned::ValueInout { - value_a, - value_b, - out_address: _, - } => { + UteeParamOwned::ValueInout { value_a, value_b } => { self.push_param_values(TeeParamType::ValueInout, Some((*value_a, *value_b)))?; } UteeParamOwned::MemrefInput { data } => { self.push_param_memref(TeeParamType::MemrefInput, Some(data), data.len())?; } - UteeParamOwned::MemrefInout { - data, - buffer_size, - out_addresses: _, - } => { + UteeParamOwned::MemrefInout { data, buffer_size } => { self.push_param_memref(TeeParamType::MemrefInout, Some(data), *buffer_size)?; } - UteeParamOwned::MemrefOutput { - buffer_size, - out_addresses: _, - } => { + UteeParamOwned::MemrefOutput { buffer_size } => { self.push_param_memref(TeeParamType::MemrefOutput, None, *buffer_size)?; } UteeParamOwned::None => self.push_param_none()?, diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 7be192491..558cb0331 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -75,7 +75,7 @@ pub struct OpteeSmcHandled<'a> { /// This function handles `OpteeSmcArgs` passed from the normal world (VTL0) via an OP-TEE SMC call. /// It returns an `OpteeSmcResult` representing the result of the SMC call and -/// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE messagewhich should be handled by +/// an optional `OpteeMsgArg` if the SMC call involves with an OP-TEE message which should be handled by /// `handle_optee_msg_arg` or `decode_ta_request`. /// /// # Panics @@ -229,7 +229,7 @@ pub struct TaRequestInfo { /// This function decodes a TA request contained in `OpteeMsgArg`. /// Currently, this function copies the entire parameter data from the normal world /// shared memory into LiteBox's memory to create `UteeParamOwned` structures. -/// Clearly, this approach is infficient and we need to revise it to avoid unnecessary +/// Clearly, this approach is inefficient and we need to revise it to avoid unnecessary /// data copies (while maintaining the security). /// # Panics /// Panics if any conversion from `u64` to `usize` fails. OP-TEE shim doesn't support a 32-bit environment. @@ -266,7 +266,6 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result UteeParamOwned::None, - // TODO: drop `out_address(es)`. We have revised the way to return back output data. OpteeMsgAttrType::ValueInput => { let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInput { @@ -274,13 +273,12 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result UteeParamOwned::ValueOutput { out_address: None }, + OpteeMsgAttrType::ValueOutput => UteeParamOwned::ValueOutput {}, OpteeMsgAttrType::ValueInout => { let value = param.get_param_value().ok_or(OpteeSmcReturn::EBadCmd)?; UteeParamOwned::ValueInout { value_a: value.a, value_b: value.b, - out_address: None, } } OpteeMsgAttrType::TmemInput | OpteeMsgAttrType::RmemInput => { @@ -331,10 +329,7 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result Result Result Date: Wed, 17 Dec 2025 00:20:54 +0000 Subject: [PATCH 40/52] optee_msg_handler upcall (wip) --- Cargo.lock | 2 + litebox_runner_lvbs/Cargo.toml | 3 +- litebox_runner_lvbs/src/lib.rs | 125 +++++++++++++++++++++++++++++++++ 3 files changed, 129 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 05e678aa8..32a550f40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -931,6 +931,8 @@ name = "litebox_runner_lvbs" version = "0.1.0" dependencies = [ "litebox", + "litebox_common_linux", + "litebox_common_optee", "litebox_platform_lvbs", "litebox_platform_multiplex", "litebox_shim_optee", diff --git a/litebox_runner_lvbs/Cargo.toml b/litebox_runner_lvbs/Cargo.toml index 30a916484..e35ac8fe4 100644 --- a/litebox_runner_lvbs/Cargo.toml +++ b/litebox_runner_lvbs/Cargo.toml @@ -7,9 +7,10 @@ edition = "2024" litebox = { version = "0.1.0", path = "../litebox" } litebox_platform_lvbs = { version = "0.1.0", path = "../litebox_platform_lvbs", default-features = false, features = ["interrupt"] } litebox_platform_multiplex = { version = "0.1.0", path = "../litebox_platform_multiplex", default-features = false, features = ["platform_lvbs"] } +litebox_common_optee = { path = "../litebox_common_optee/", version = "0.1.0" } +litebox_common_linux = { path = "../litebox_common_linux/", version = "0.1.0" } litebox_shim_optee = { path = "../litebox_shim_optee/", version = "0.1.0" } - [target.'cfg(target_arch = "x86_64")'.dependencies] x86_64 = { version = "0.15.2", default-features = false, features = ["instructions"] } diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index ca136b3f0..99e3815c2 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -99,6 +99,131 @@ pub fn run(platform: Option<&'static Platform>) -> ! { vtl_switch_loop_entry(platform) } +// Tentative OP-TEE message handler upcall implementation. +// This will be revised once the upcall interface is finalized. +// NOTE: This function doesn't work because `run_thread` is not ready. +// It is okay to remove this function in this PR and add it in a follow-up PR. +use litebox::platform::{RawConstPointer, RawMutPointer}; +use litebox_common_optee::{ + LdelfArg, OpteeMessageCommand, OpteeMsgArg, OpteeSmcArgs, OpteeSmcReturn, TeeIdentity, + TeeLogin, TeeUuid, UteeEntryFunc, UteeParamOwned, UteeParams, +}; +use litebox_shim_optee::loader::ElfLoadInfo; +use litebox_shim_optee::msg_handler::{ + decode_ta_request, handle_optee_msg_arg, handle_optee_smc_args, + prepare_for_return_to_normal_world, +}; +use litebox_shim_optee::ptr::{NormalWorldConstPtr, NormalWorldMutPtr}; +#[expect(dead_code)] +fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result { + let smc_args_ptr = NormalWorldConstPtr::::from_usize(smc_args_addr); + let mut smc_args = unsafe { smc_args_ptr.read_at_offset(0) } + .unwrap() + .into_owned(); + let msg_arg_phys_addr = smc_args.optee_msg_arg_phys_addr()?; + let (res, msg_arg) = handle_optee_smc_args(&mut smc_args)?; + if let Some(mut msg_arg) = msg_arg { + match msg_arg.cmd { + OpteeMessageCommand::OpenSession + | OpteeMessageCommand::InvokeCommand + | OpteeMessageCommand::CloseSession => { + let Ok(ta_req_info) = decode_ta_request(&msg_arg) else { + return Err(OpteeSmcReturn::EBadCmd); + }; + + let params = [const { UteeParamOwned::None }; UteeParamOwned::TEE_NUM_PARAMS]; + if ta_req_info.entry_func == UteeEntryFunc::OpenSession { + let _litebox = litebox_shim_optee::init_session( + &TeeUuid::default(), + &TeeIdentity { + login: TeeLogin::User, + uuid: TeeUuid::default(), + }, + Some(TA_BINARY), // TODO: replace this with UUID-based TA loading + ); + + let ldelf_info = litebox_shim_optee::loader::load_elf_buffer(LDELF_BINARY) + .expect("Failed to load ldelf"); + let Some(ldelf_arg_address) = ldelf_info.ldelf_arg_address else { + panic!("ldelf_arg_address not found"); + }; + let ldelf_arg = LdelfArg::new(); // TODO: set TA UUID + + let stack = litebox_shim_optee::loader::init_ldelf_stack( + Some(ldelf_info.stack_base), + &ldelf_arg, + ) + .expect("Failed to initialize stack for ldelf"); + let mut _pt_regs = + litebox_shim_optee::loader::prepare_ldelf_registers(&ldelf_info, &stack); + // TODO: run_thread + + // Note: `ldelf` allocates stack (returned via `stack_ptr`) but we don't use it here. + // Need to revisit this to see whether the stack is large enough for our use cases (e.g., + // copy owned data through stack to minimize TOCTTOU threats). + let ldelf_arg_out = unsafe { &*(ldelf_arg_address as *const LdelfArg) }; + let entry_func = usize::try_from(ldelf_arg_out.entry_func).unwrap(); + + litebox_shim_optee::set_ta_loaded(); + + litebox_shim_optee::loader::allocate_guest_tls(None) + .expect("Failed to allocate TLS"); + + // TODO: maintain this ta load info in a global data structure + let ta_info = ElfLoadInfo { + entry_point: entry_func, + stack_base: ldelf_info.stack_base, + params_address: ldelf_info.params_address, + ldelf_arg_address: None, + }; + + // In OP-TEE TA, each command invocation is like (re)starting the TA with a new stack with + // loaded binary and heap. In that sense, we can create (and destroy) a stack + // for each command freely. + let stack = litebox_shim_optee::loader::init_stack( + Some(ta_info.stack_base), + params.as_slice(), + ) + .expect("Failed to initialize stack with parameters"); + let mut _pt_regs = litebox_shim_optee::loader::prepare_registers( + &ta_info, + &stack, + litebox_shim_optee::get_session_id(), + ta_req_info.entry_func as u32, + None, + ); + + // TODO: run_thread + + // SAFETY + // We assume that `ta_info.params_address` is a valid pointer to `UteeParams`. + let ta_params = unsafe { *(ta_info.params_address as *const UteeParams) }; + + prepare_for_return_to_normal_world(&ta_params, &ta_req_info, &mut msg_arg)?; + + let ptr = NormalWorldMutPtr::::from_usize( + usize::try_from(msg_arg_phys_addr).unwrap(), + ); + let _ = unsafe { ptr.write_at_offset(0, msg_arg) }; + } else { + // retrieve `ta_info` from global data structure + todo!() + } + Ok(res.into()) + } + _ => { + handle_optee_msg_arg(&msg_arg)?; + Ok(res.into()) + } + } + } else { + Ok(res.into()) + } +} + +const TA_BINARY: &[u8] = &[0u8; 0]; +const LDELF_BINARY: &[u8] = &[0u8; 0]; + #[panic_handler] fn panic(info: &PanicInfo) -> ! { serial_println!("{}", info); From 64ca1b83caa86aeaeef8b7f051fb4f64b5440655 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 23 Dec 2025 05:04:41 +0000 Subject: [PATCH 41/52] rebase --- litebox_shim_optee/src/msg_handler.rs | 30 ++++++++++++++++----------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 558cb0331..5420a7672 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -610,8 +610,11 @@ fn read_data_from_shm_phys_addrs( phys_addrs: &[usize], buffer: &mut [u8], ) -> Result<(), OpteeSmcReturn> { - let ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE]>::from_usize(page_align_down(phys_addrs[0])); - let page = unsafe { ptr.read_at_offset(0) }.ok_or(OpteeSmcReturn::EBadAddr)?; + let mut ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize( + page_align_down(phys_addrs[0]), + ) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let page = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; let page_offset = phys_addrs[0] - page_align_down(phys_addrs[0]); let to_copy = core::cmp::min(PAGE_SIZE - page_offset, buffer.len()); buffer.copy_from_slice(&page[page_offset..page_offset + to_copy]); @@ -621,8 +624,9 @@ fn read_data_from_shm_phys_addrs( if copied >= buffer.len() { break; } - let ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE]>::from_usize(*phys_addr); - let page = unsafe { ptr.read_at_offset(0) }.ok_or(OpteeSmcReturn::EBadAddr)?; + let mut ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize(*phys_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let page = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; let to_copy = core::cmp::min(PAGE_SIZE, buffer.len() - copied); buffer[copied..copied + to_copy].copy_from_slice(&page[..to_copy]); copied += to_copy; @@ -635,16 +639,17 @@ fn read_data_from_shm_phys_addrs( /// Currently, this function writes data page by page (i.e., it does not map multiple physical /// pages at once). All physical addresses in `phys_addrs` are page-aligned except the first one. fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<(), OpteeSmcReturn> { - let ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE]>::from_usize(page_align_down(phys_addrs[0])); - let mut page = unsafe { ptr.read_at_offset(0) } - .ok_or(OpteeSmcReturn::EBadAddr)? - .into_owned(); + let mut ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize(page_align_down( + phys_addrs[0], + )) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut page = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; let page_offset = phys_addrs[0] - page_align_down(phys_addrs[0]); let to_copy = core::cmp::min(PAGE_SIZE - page_offset, buffer.len()); page[page_offset..page_offset + to_copy].copy_from_slice(&buffer[..to_copy]); unsafe { - ptr.write_at_offset(0, page) - .ok_or(OpteeSmcReturn::EBadAddr)?; + ptr.write_at_offset(0, *page) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; } let mut written = to_copy; @@ -652,13 +657,14 @@ fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<( if written >= buffer.len() { break; } - let ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE]>::from_usize(*phys_addr); + let mut ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize(*phys_addr) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; let mut page = [0u8; PAGE_SIZE]; let to_copy = core::cmp::min(PAGE_SIZE, buffer.len() - written); page[..to_copy].copy_from_slice(&buffer[written..written + to_copy]); unsafe { ptr.write_at_offset(0, page) - .ok_or(OpteeSmcReturn::EBadAddr)?; + .map_err(|_| OpteeSmcReturn::EBadAddr)?; } written += to_copy; } From dcd3023c6613ea86c4ad49a8072c18801f90f6c5 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 23 Dec 2025 05:44:36 +0000 Subject: [PATCH 42/52] revision with new APIs --- litebox_runner_lvbs/src/lib.rs | 17 +++--- litebox_shim_optee/src/msg_handler.rs | 77 ++++++++++----------------- 2 files changed, 37 insertions(+), 57 deletions(-) diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index 99e3815c2..461e4fcf3 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -1,6 +1,7 @@ #![no_std] use core::panic::PanicInfo; +use litebox::mm::linux::PAGE_SIZE; use litebox_platform_lvbs::{ arch::{gdt, get_core_id, instrs::hlt_loop, interrupts}, debug_serial_println, @@ -10,7 +11,7 @@ use litebox_platform_lvbs::{ hvcall, vtl_switch::vtl_switch_loop_entry, vtl1_mem_layout::{ - PAGE_SIZE, VTL1_INIT_HEAP_SIZE, VTL1_INIT_HEAP_START_PAGE, VTL1_PML4E_PAGE, + VTL1_INIT_HEAP_SIZE, VTL1_INIT_HEAP_START_PAGE, VTL1_PML4E_PAGE, VTL1_PRE_POPULATED_MEMORY_SIZE, get_heap_start_address, }, }, @@ -103,7 +104,6 @@ pub fn run(platform: Option<&'static Platform>) -> ! { // This will be revised once the upcall interface is finalized. // NOTE: This function doesn't work because `run_thread` is not ready. // It is okay to remove this function in this PR and add it in a follow-up PR. -use litebox::platform::{RawConstPointer, RawMutPointer}; use litebox_common_optee::{ LdelfArg, OpteeMessageCommand, OpteeMsgArg, OpteeSmcArgs, OpteeSmcReturn, TeeIdentity, TeeLogin, TeeUuid, UteeEntryFunc, UteeParamOwned, UteeParams, @@ -116,10 +116,9 @@ use litebox_shim_optee::msg_handler::{ use litebox_shim_optee::ptr::{NormalWorldConstPtr, NormalWorldMutPtr}; #[expect(dead_code)] fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result { - let smc_args_ptr = NormalWorldConstPtr::::from_usize(smc_args_addr); - let mut smc_args = unsafe { smc_args_ptr.read_at_offset(0) } - .unwrap() - .into_owned(); + let mut smc_args_ptr = + NormalWorldConstPtr::::try_from_usize(smc_args_addr)?; + let mut smc_args = unsafe { smc_args_ptr.read_at_offset(0) }?; let msg_arg_phys_addr = smc_args.optee_msg_arg_phys_addr()?; let (res, msg_arg) = handle_optee_smc_args(&mut smc_args)?; if let Some(mut msg_arg) = msg_arg { @@ -201,10 +200,10 @@ fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result::from_usize( + let mut ptr = NormalWorldMutPtr::::try_from_usize( usize::try_from(msg_arg_phys_addr).unwrap(), - ); - let _ = unsafe { ptr.write_at_offset(0, msg_arg) }; + )?; + unsafe { ptr.write_at_offset(0, msg_arg) }?; } else { // retrieve `ta_info` from global data structure todo!() diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 5420a7672..3ba1208ab 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -604,69 +604,50 @@ fn get_shm_phys_addrs_from_optee_msg_param_rmem( /// Read data from the normal world shared memory pages whose physical addresses are given in /// `phys_addrs` into `buffer`. The size of `buffer` indicates how many bytes to read. -/// Currently, this function reads data page by page (i.e., it does not map multiple physical -/// pages at once). All physical addresses in `phys_addrs` are page-aligned except the first one. +/// All physical addresses in `phys_addrs` are page-aligned except the first one. fn read_data_from_shm_phys_addrs( phys_addrs: &[usize], buffer: &mut [u8], ) -> Result<(), OpteeSmcReturn> { - let mut ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize( - page_align_down(phys_addrs[0]), - ) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; - let page = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; - let page_offset = phys_addrs[0] - page_align_down(phys_addrs[0]); - let to_copy = core::cmp::min(PAGE_SIZE - page_offset, buffer.len()); - buffer.copy_from_slice(&page[page_offset..page_offset + to_copy]); - let mut copied = to_copy; - - for phys_addr in phys_addrs.iter().skip(1) { - if copied >= buffer.len() { - break; - } - let mut ptr = NormalWorldConstPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize(*phys_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; - let page = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; - let to_copy = core::cmp::min(PAGE_SIZE, buffer.len() - copied); - buffer[copied..copied + to_copy].copy_from_slice(&page[..to_copy]); - copied += to_copy; + let mut array: Vec = Vec::with_capacity(phys_addrs.len()); + array.push(page_align_down(phys_addrs[0])); + array.extend_from_slice(&phys_addrs[1..]); + let page_array = PhysPageArray::::try_from_slice(array.as_slice())?; + let mut ptr = NormalWorldConstPtr::::try_from_page_array( + page_array, + phys_addrs[0] - array[0], + )?; + unsafe { + ptr.read_slice_at_offset(0, buffer)?; } Ok(()) } /// Write data in `buffer` to the normal world shared memory pages whose physical addresses /// are given in `phys_addrs`. The size of `buffer` indicates how many bytes to write. -/// Currently, this function writes data page by page (i.e., it does not map multiple physical -/// pages at once). All physical addresses in `phys_addrs` are page-aligned except the first one. +/// All physical addresses in `phys_addrs` are page-aligned except the first one. fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<(), OpteeSmcReturn> { - let mut ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize(page_align_down( - phys_addrs[0], - )) + let mut array: Vec = Vec::with_capacity(phys_addrs.len()); + array.push(page_align_down(phys_addrs[0])); + array.extend_from_slice(&phys_addrs[1..]); + let page_array = PhysPageArray::::try_from_slice(array.as_slice())?; + let mut ptr = NormalWorldMutPtr::::try_from_page_array( + page_array, + phys_addrs[0] - array[0], + ) .map_err(|_| OpteeSmcReturn::EBadAddr)?; - let mut page = unsafe { ptr.read_at_offset(0) }.map_err(|_| OpteeSmcReturn::EBadAddr)?; - let page_offset = phys_addrs[0] - page_align_down(phys_addrs[0]); - let to_copy = core::cmp::min(PAGE_SIZE - page_offset, buffer.len()); - page[page_offset..page_offset + to_copy].copy_from_slice(&buffer[..to_copy]); unsafe { - ptr.write_at_offset(0, *page) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + ptr.write_slice_at_offset(0, buffer)?; } - let mut written = to_copy; + Ok(()) +} - for phys_addr in phys_addrs.iter().skip(1) { - if written >= buffer.len() { - break; - } - let mut ptr = NormalWorldMutPtr::<[u8; PAGE_SIZE], PAGE_SIZE>::try_from_usize(*phys_addr) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; - let mut page = [0u8; PAGE_SIZE]; - let to_copy = core::cmp::min(PAGE_SIZE, buffer.len() - written); - page[..to_copy].copy_from_slice(&buffer[written..written + to_copy]); - unsafe { - ptr.write_at_offset(0, page) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; +impl From for OpteeSmcReturn { + fn from(err: PhysPointerError) -> Self { + match err { + PhysPointerError::AlreadyMapped(_) => OpteeSmcReturn::EBusy, + PhysPointerError::NoMappingInfo => OpteeSmcReturn::ENomem, + _ => OpteeSmcReturn::EBadAddr, } - written += to_copy; } - Ok(()) } From 5a9fca570ad553abc71f86e67307345d692884f8 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 26 Dec 2025 23:39:22 +0000 Subject: [PATCH 43/52] rebase --- litebox_common_optee/src/lib.rs | 12 +++++++- litebox_runner_lvbs/src/lib.rs | 2 +- litebox_shim_optee/src/msg_handler.rs | 43 +++++++++++++-------------- 3 files changed, 33 insertions(+), 24 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 0eb76d44d..08a8247fe 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -6,7 +6,7 @@ extern crate alloc; use alloc::boxed::Box; -use litebox::platform::RawConstPointer as _; +use litebox::platform::{RawConstPointer as _, vmap::PhysPointerError}; use litebox_common_linux::{PtRegs, errno::Errno}; use modular_bitfield::prelude::*; use modular_bitfield::specifiers::{B4, B8, B48, B54}; @@ -1593,3 +1593,13 @@ pub enum OpteeSmcReturn { ENotAvail = OPTEE_SMC_RETURN_ENOTAVAIL, UnknownFunction = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION, } + +impl From for OpteeSmcReturn { + fn from(err: PhysPointerError) -> Self { + match err { + PhysPointerError::AlreadyMapped(_) => OpteeSmcReturn::EBusy, + PhysPointerError::NoMappingInfo => OpteeSmcReturn::ENomem, + _ => OpteeSmcReturn::EBadAddr, + } + } +} diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index 461e4fcf3..077a86465 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -113,7 +113,7 @@ use litebox_shim_optee::msg_handler::{ decode_ta_request, handle_optee_msg_arg, handle_optee_smc_args, prepare_for_return_to_normal_world, }; -use litebox_shim_optee::ptr::{NormalWorldConstPtr, NormalWorldMutPtr}; +use litebox_shim_optee::{NormalWorldConstPtr, NormalWorldMutPtr}; #[expect(dead_code)] fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result { let mut smc_args_ptr = diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 3ba1208ab..09af5abc0 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -15,7 +15,7 @@ //! world physical addresses to exchange a large amount of data. Also, like the OP-TEE //! SMC call, a certain OP-TEE message/command does not involve with any TA (e.g., register //! shared memory). -use crate::NormalWorldConstPtr; +use crate::{NormalWorldConstPtr, NormalWorldMutPtr}; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; @@ -604,18 +604,15 @@ fn get_shm_phys_addrs_from_optee_msg_param_rmem( /// Read data from the normal world shared memory pages whose physical addresses are given in /// `phys_addrs` into `buffer`. The size of `buffer` indicates how many bytes to read. -/// All physical addresses in `phys_addrs` are page-aligned except the first one. +/// All physical addresses in `phys_addrs` are page aligned except the first one. fn read_data_from_shm_phys_addrs( phys_addrs: &[usize], buffer: &mut [u8], ) -> Result<(), OpteeSmcReturn> { - let mut array: Vec = Vec::with_capacity(phys_addrs.len()); - array.push(page_align_down(phys_addrs[0])); - array.extend_from_slice(&phys_addrs[1..]); - let page_array = PhysPageArray::::try_from_slice(array.as_slice())?; + let phys_page_addrs = phys_addrs_to_page_addrs::(phys_addrs)?; let mut ptr = NormalWorldConstPtr::::try_from_page_array( - page_array, - phys_addrs[0] - array[0], + &phys_page_addrs, + phys_addrs[0] - phys_page_addrs[0].as_usize(), )?; unsafe { ptr.read_slice_at_offset(0, buffer)?; @@ -625,15 +622,12 @@ fn read_data_from_shm_phys_addrs( /// Write data in `buffer` to the normal world shared memory pages whose physical addresses /// are given in `phys_addrs`. The size of `buffer` indicates how many bytes to write. -/// All physical addresses in `phys_addrs` are page-aligned except the first one. +/// All physical addresses in `phys_addrs` are page aligned except the first one. fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<(), OpteeSmcReturn> { - let mut array: Vec = Vec::with_capacity(phys_addrs.len()); - array.push(page_align_down(phys_addrs[0])); - array.extend_from_slice(&phys_addrs[1..]); - let page_array = PhysPageArray::::try_from_slice(array.as_slice())?; + let phys_page_addrs = phys_addrs_to_page_addrs::(phys_addrs)?; let mut ptr = NormalWorldMutPtr::::try_from_page_array( - page_array, - phys_addrs[0] - array[0], + &phys_page_addrs, + phys_addrs[0] - phys_page_addrs[0].as_usize(), ) .map_err(|_| OpteeSmcReturn::EBadAddr)?; unsafe { @@ -642,12 +636,17 @@ fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<( Ok(()) } -impl From for OpteeSmcReturn { - fn from(err: PhysPointerError) -> Self { - match err { - PhysPointerError::AlreadyMapped(_) => OpteeSmcReturn::EBusy, - PhysPointerError::NoMappingInfo => OpteeSmcReturn::ENomem, - _ => OpteeSmcReturn::EBadAddr, - } +#[inline] +fn phys_addrs_to_page_addrs( + phys_addrs: &[usize], +) -> Result>, OpteeSmcReturn> { + let mut page_addrs: Vec> = Vec::with_capacity(phys_addrs.len()); + page_addrs.push( + PhysPageAddr::::new(page_align_down(phys_addrs[0])) + .ok_or(OpteeSmcReturn::EBadAddr)?, + ); + for addr in &phys_addrs[1..] { + page_addrs.push(PhysPageAddr::::new(*addr).ok_or(OpteeSmcReturn::EBadAddr)?); } + Ok(page_addrs) } From f60ac6afdf07b56cd36113e0ff1694b8ea7a3d3e Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 29 Dec 2025 17:58:39 +0000 Subject: [PATCH 44/52] rebase --- litebox_runner_lvbs/src/lib.rs | 4 ++-- litebox_shim_optee/src/msg_handler.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index 077a86465..275a75eb8 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -117,7 +117,7 @@ use litebox_shim_optee::{NormalWorldConstPtr, NormalWorldMutPtr}; #[expect(dead_code)] fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result { let mut smc_args_ptr = - NormalWorldConstPtr::::try_from_usize(smc_args_addr)?; + NormalWorldConstPtr::::with_usize(smc_args_addr)?; let mut smc_args = unsafe { smc_args_ptr.read_at_offset(0) }?; let msg_arg_phys_addr = smc_args.optee_msg_arg_phys_addr()?; let (res, msg_arg) = handle_optee_smc_args(&mut smc_args)?; @@ -200,7 +200,7 @@ fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result::try_from_usize( + let mut ptr = NormalWorldMutPtr::::with_usize( usize::try_from(msg_arg_phys_addr).unwrap(), )?; unsafe { ptr.write_at_offset(0, msg_arg) }?; diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 09af5abc0..1169474b0 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -610,7 +610,7 @@ fn read_data_from_shm_phys_addrs( buffer: &mut [u8], ) -> Result<(), OpteeSmcReturn> { let phys_page_addrs = phys_addrs_to_page_addrs::(phys_addrs)?; - let mut ptr = NormalWorldConstPtr::::try_from_page_array( + let mut ptr = NormalWorldConstPtr::::new( &phys_page_addrs, phys_addrs[0] - phys_page_addrs[0].as_usize(), )?; @@ -625,7 +625,7 @@ fn read_data_from_shm_phys_addrs( /// All physical addresses in `phys_addrs` are page aligned except the first one. fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<(), OpteeSmcReturn> { let phys_page_addrs = phys_addrs_to_page_addrs::(phys_addrs)?; - let mut ptr = NormalWorldMutPtr::::try_from_page_array( + let mut ptr = NormalWorldMutPtr::::new( &phys_page_addrs, phys_addrs[0] - phys_page_addrs[0].as_usize(), ) From 7f7bb824d63667439ab04bc3b9811c8aac8c2b87 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 29 Dec 2025 21:05:45 +0000 Subject: [PATCH 45/52] remove vsm_optee_smc from the LVBS platform --- litebox_platform_lvbs/src/mshv/mod.rs | 1 - .../src/mshv/vsm_optee_smc.rs | 30 ------------------- litebox_platform_lvbs/src/mshv/vtl_switch.rs | 17 +++++++++-- 3 files changed, 15 insertions(+), 33 deletions(-) delete mode 100644 litebox_platform_lvbs/src/mshv/vsm_optee_smc.rs diff --git a/litebox_platform_lvbs/src/mshv/mod.rs b/litebox_platform_lvbs/src/mshv/mod.rs index b95c15b4d..f89e85bc3 100644 --- a/litebox_platform_lvbs/src/mshv/mod.rs +++ b/litebox_platform_lvbs/src/mshv/mod.rs @@ -7,7 +7,6 @@ mod hvcall_vp; mod mem_integrity; pub(crate) mod vsm; mod vsm_intercept; -mod vsm_optee_smc; pub mod vtl1_mem_layout; pub mod vtl_switch; diff --git a/litebox_platform_lvbs/src/mshv/vsm_optee_smc.rs b/litebox_platform_lvbs/src/mshv/vsm_optee_smc.rs deleted file mode 100644 index 0ac94059b..000000000 --- a/litebox_platform_lvbs/src/mshv/vsm_optee_smc.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! VSM OP-TEE SMC functions - -use crate::{ - debug_serial_println, host::per_cpu_variables::with_per_cpu_variables_mut, mshv::HV_VTL_SECURE, -}; -use litebox_common_linux::errno::Errno; -use litebox_common_optee::OpteeSmcArgs; -use x86_64::PhysAddr; - -pub(crate) fn optee_smc_dispatch(optee_smc_args_pfn: u64) -> i64 { - if let Ok(optee_smc_args_page_addr) = PhysAddr::try_new(optee_smc_args_pfn << 12) - && let Some(mut _optee_smc_args) = unsafe { - crate::platform_low().copy_from_vtl0_phys::(optee_smc_args_page_addr) - } - { - // Since we do not know whether an OP-TEE TA uses extended states, we conservatively - // save and restore extended states before and after running any OP-TEE TA. - with_per_cpu_variables_mut(|per_cpu_variables| { - per_cpu_variables.save_extended_states(HV_VTL_SECURE); - }); - // TODO: Implement OP-TEE SMC for TA command invocation here. - debug_serial_println!("VSM function call for OP-TEE message"); - with_per_cpu_variables_mut(|per_cpu_variables| { - per_cpu_variables.restore_extended_states(HV_VTL_SECURE); - }); - 0 - } else { - Errno::EINVAL.as_neg().into() - } -} diff --git a/litebox_platform_lvbs/src/mshv/vtl_switch.rs b/litebox_platform_lvbs/src/mshv/vtl_switch.rs index 2855160b4..718d36bce 100644 --- a/litebox_platform_lvbs/src/mshv/vtl_switch.rs +++ b/litebox_platform_lvbs/src/mshv/vtl_switch.rs @@ -9,7 +9,7 @@ use crate::{ HV_REGISTER_VSM_CODEPAGE_OFFSETS, HV_VTL_NORMAL, HV_VTL_SECURE, HvRegisterVsmCodePageOffsets, NUM_VTLCALL_PARAMS, VTL_ENTRY_REASON_INTERRUPT, VTL_ENTRY_REASON_LOWER_VTL_CALL, VsmFunction, hvcall_vp::hvcall_get_vp_registers, - vsm::vsm_dispatch, vsm_intercept::vsm_handle_intercept, vsm_optee_smc, + vsm::vsm_dispatch, vsm_intercept::vsm_handle_intercept, }, }; use core::arch::{asm, naked_asm}; @@ -313,7 +313,20 @@ fn vtlcall_dispatch(params: &[u64; NUM_VTLCALL_PARAMS]) -> i64 { .unwrap_or(VsmFunction::Unknown); match func_id { VsmFunction::Unknown => Errno::EINVAL.as_neg().into(), - VsmFunction::OpteeMessage => vsm_optee_smc::optee_smc_dispatch(params[1]), + VsmFunction::OpteeMessage => { + // Since we do not know whether an upcall handler uses extended states, we conservatively + // save and restore extended states before and after invoking the upcall handler. + with_per_cpu_variables_mut(|per_cpu_variables| { + per_cpu_variables.save_extended_states(HV_VTL_SECURE); + }); + + // TODO: invoke the OP-TEE upcall once it is merged. + + with_per_cpu_variables_mut(|per_cpu_variables| { + per_cpu_variables.restore_extended_states(HV_VTL_SECURE); + }); + 0 + } _ => vsm_dispatch(func_id, ¶ms[1..]), } } From fe1171d57f448704f0d2a57955cc5dcbd4aa8b3a Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Mon, 29 Dec 2025 21:54:15 +0000 Subject: [PATCH 46/52] fix TeeUuid --- litebox_common_optee/src/lib.rs | 23 ++++++++++++++++------- litebox_shim_optee/src/msg_handler.rs | 2 +- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index 08a8247fe..b4251ff29 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -503,20 +503,29 @@ pub struct TeeUuid { pub clock_seq_and_node: [u8; 8], } impl TeeUuid { - pub fn new_from_u32s(data: [u32; 4]) -> Self { - let time_low = data[0]; - let time_mid = (data[1] >> 16) as u16; - let time_hi_and_version = (data[1] & 0xffff) as u16; + #[allow(clippy::missing_panics_doc)] + pub fn from_bytes(data: [u8; 16]) -> Self { + let time_low = u32::from_le_bytes(data[0..4].try_into().unwrap()); + let time_mid = u16::from_le_bytes(data[4..6].try_into().unwrap()); + let time_hi_and_version = u16::from_le_bytes(data[6..8].try_into().unwrap()); let mut clock_seq_and_node = [0u8; 8]; - clock_seq_and_node[0..4].copy_from_slice(&data[2].to_be_bytes()); - clock_seq_and_node[4..8].copy_from_slice(&data[3].to_be_bytes()); - TeeUuid { + clock_seq_and_node.copy_from_slice(&data[8..16]); + Self { time_low, time_mid, time_hi_and_version, clock_seq_and_node, } } + + pub fn with_u32_array(data: [u32; 4]) -> Self { + let mut buffer = [0u8; 16]; + buffer[0..4].copy_from_slice(&data[0].to_le_bytes()); + buffer[4..8].copy_from_slice(&data[1].to_le_bytes()); + buffer[8..12].copy_from_slice(&data[2].to_le_bytes()); + buffer[12..16].copy_from_slice(&data[3].to_le_bytes()); + Self::from_bytes(buffer) + } } /// `TEE_Identity` from `optee_os/lib/libutee/include/tee_api_types.h`. diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 1169474b0..e0a4761ec 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -242,7 +242,7 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result Date: Mon, 29 Dec 2025 23:42:43 +0000 Subject: [PATCH 47/52] explictly use page_offset --- litebox_common_optee/src/lib.rs | 2 +- litebox_shim_optee/src/msg_handler.rs | 270 ++++++++++++-------------- 2 files changed, 123 insertions(+), 149 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index b4251ff29..c63d0990e 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -518,7 +518,7 @@ impl TeeUuid { } } - pub fn with_u32_array(data: [u32; 4]) -> Self { + pub fn from_u32_array(data: [u32; 4]) -> Self { let mut buffer = [0u8; 16]; buffer[0..4].copy_from_slice(&data[0].to_le_bytes()); buffer[4..8].copy_from_slice(&data[1].to_le_bytes()); diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index e0a4761ec..c1cf028e3 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -20,6 +20,7 @@ use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; use litebox::platform::vmap::PhysPageAddr; +use litebox::utils::TruncateExt; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, OpteeMsgParamValue, OpteeSecureWorldCapabilities, OpteeSmcArgs, OpteeSmcFunction, @@ -57,11 +58,6 @@ fn page_align_down_u64(address: u64) -> u64 { address & !(PAGE_SIZE as u64 - 1) } -#[inline] -fn page_align_down(address: usize) -> usize { - address & !(PAGE_SIZE - 1) -} - #[inline] fn page_align_up_u64(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) @@ -214,8 +210,9 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> } /// TA request information extracted from an OP-TEE message. +/// /// In addition to standard TA information (i.e., TA UUID, session ID, command ID, -/// and parameters), it contains shared memory addresses (`out_phys_addrs`) to +/// and parameters), it contains shared memory addresses (`out_phys_addrs` and `page_offsets`) to /// write back output data to the normal world once the TA execution is done. pub struct TaRequestInfo { pub uuid: Option, @@ -223,15 +220,17 @@ pub struct TaRequestInfo { pub entry_func: UteeEntryFunc, pub cmd_id: u32, pub params: [UteeParamOwned; UteeParamOwned::TEE_NUM_PARAMS], - pub out_phys_addrs: [Option>; UteeParamOwned::TEE_NUM_PARAMS], + pub out_phys_addrs: [Option]>>; UteeParamOwned::TEE_NUM_PARAMS], + pub page_offsets: [Option; UteeParamOwned::TEE_NUM_PARAMS], } /// This function decodes a TA request contained in `OpteeMsgArg`. -/// Currently, this function copies the entire parameter data from the normal world -/// shared memory into LiteBox's memory to create `UteeParamOwned` structures. -/// Clearly, this approach is inefficient and we need to revise it to avoid unnecessary -/// data copies (while maintaining the security). +/// +/// It copies the entire parameter data from the normal world shared memory into LiteBox's memory +/// to create `UteeParamOwned` structures to avoid potential data corruption during TA execution. +/// /// # Panics +/// /// Panics if any conversion from `u64` to `usize` fails. OP-TEE shim doesn't support a 32-bit environment. pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result { let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; @@ -242,7 +241,7 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result Result Result { - if let (Ok(phys_addrs), data_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), + let (phys_addrs, page_offset, data_size) = match param.attr_type() { + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_tmem(tmem)?; + ( + shm_phys_addrs.0, + shm_phys_addrs.1, + usize::try_from(tmem.size).unwrap(), + ) } - } { - let mut data = alloc::vec![0u8; data_size]; - read_data_from_shm_phys_addrs(&phys_addrs, &mut data)?; - UteeParamOwned::MemrefInput { data: data.into() } - } else { - UteeParamOwned::None - } + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_rmem(rmem)?; + ( + shm_phys_addrs.0, + shm_phys_addrs.1, + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + }; + let mut data = alloc::vec![0u8; data_size]; + read_data_from_shm_phys_addrs(&phys_addrs, page_offset, &mut data)?; + UteeParamOwned::MemrefInput { data: data.into() } } OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { - if let (Ok(phys_addrs), buffer_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemOutput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemOutput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), + let (phys_addrs, page_offset, buffer_size) = match param.attr_type() { + OpteeMsgAttrType::TmemOutput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_tmem(tmem)?; + ( + shm_phys_addrs.0, + shm_phys_addrs.1, + usize::try_from(tmem.size).unwrap(), + ) } - } { - ta_req_info.out_phys_addrs[i] = Some(phys_addrs); - UteeParamOwned::MemrefOutput { buffer_size } - } else { - UteeParamOwned::None - } + OpteeMsgAttrType::RmemOutput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_rmem(rmem)?; + ( + shm_phys_addrs.0, + shm_phys_addrs.1, + usize::try_from(rmem.size).unwrap(), + ) + } + _ => unreachable!(), + }; + ta_req_info.out_phys_addrs[i] = Some(phys_addrs.into_boxed_slice()); + ta_req_info.page_offsets[i] = Some(page_offset); + UteeParamOwned::MemrefOutput { buffer_size } } OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { - if let (Ok(phys_addrs), buffer_size) = { - match param.attr_type() { - OpteeMsgAttrType::TmemInout => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_tmem(tmem), - usize::try_from(tmem.size).unwrap(), - ) - } - OpteeMsgAttrType::RmemInout => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem), - usize::try_from(rmem.size).unwrap(), - ) - } - _ => unreachable!(), + let (phys_addrs, page_offset, buffer_size) = match param.attr_type() { + OpteeMsgAttrType::TmemInout => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_tmem(tmem)?; + ( + shm_phys_addrs.0, + shm_phys_addrs.1, + usize::try_from(tmem.size).unwrap(), + ) } - } { - let mut buffer = alloc::vec![0u8; buffer_size]; - read_data_from_shm_phys_addrs(&phys_addrs, &mut buffer)?; - ta_req_info.out_phys_addrs[i] = Some(phys_addrs); - UteeParamOwned::MemrefInout { - data: buffer.into(), - buffer_size, + OpteeMsgAttrType::RmemInout => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_rmem(rmem)?; + ( + shm_phys_addrs.0, + shm_phys_addrs.1, + usize::try_from(rmem.size).unwrap(), + ) } - } else { - UteeParamOwned::None + _ => unreachable!(), + }; + let mut buffer = alloc::vec![0u8; buffer_size]; + read_data_from_shm_phys_addrs(&phys_addrs, page_offset, &mut buffer)?; + ta_req_info.out_phys_addrs[i] = Some(phys_addrs.into_boxed_slice()); + ta_req_info.page_offsets[i] = Some(page_offset); + UteeParamOwned::MemrefInout { + data: buffer.into(), + buffer_size, } } _ => return Err(OpteeSmcReturn::EBadCmd), @@ -373,8 +372,9 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result { pub pages: Box<[PhysPageAddr]>, @@ -485,7 +488,6 @@ impl ShmRefMap { guard.remove(&shm_ref) } - #[expect(unused)] pub fn get(&self, shm_ref: u64) -> Option> { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() @@ -548,14 +550,15 @@ fn shm_ref_map() -> &'static ShmRefMap { SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } -/// Get the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamTmem`. +/// Get the normal world physical addresses and page offset of OP-TEE shared memory from `OpteeMsgParamTmem`. +/// /// Note that we use this function for handing TA requests and in this context /// `OpteeMsgParamTmem` and `OpteeMsgParamRmem` are equivalent because every shared memory /// reference accessible by TAs must be registered in advance. /// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. fn get_shm_phys_addrs_from_optee_msg_param_tmem( tmem: OpteeMsgParamTmem, -) -> Result, OpteeSmcReturn> { +) -> Result<(Vec>, usize), OpteeSmcReturn> { let rmem = OpteeMsgParamRmem { offs: tmem.buf_ptr, size: tmem.size, @@ -564,89 +567,60 @@ fn get_shm_phys_addrs_from_optee_msg_param_tmem( get_shm_phys_addrs_from_optee_msg_param_rmem(rmem) } -/// Get a list of the normal world physical addresses of OP-TEE shared memory from `OpteeMsgParamRmem`. +/// Get a list of the normal world physical addresses and page offset of OP-TEE shared memory from `OpteeMsgParamRmem`. +/// /// `rmem.offs` must be an offset within the shared memory region registered with `rmem.shm_ref` before /// and `rmem.offs + rmem.size` must not exceed the size of the registered shared memory region. -/// All addresses this function returns are page-aligned except the first one. These addresses are -/// virtually contiguous within the normal world, but not necessarily physically contiguous. +/// All addresses this function returns are page aligned and virtually contiguous within the normal world but +/// not necessarily physically contiguous. fn get_shm_phys_addrs_from_optee_msg_param_rmem( rmem: OpteeMsgParamRmem, -) -> Result, OpteeSmcReturn> { +) -> Result<(Vec>, usize), OpteeSmcReturn> { let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { return Err(OpteeSmcReturn::ENotAvail); }; - let start = shm_ref_info - .page_offset - .checked_add(rmem.offs) + let page_offset = shm_ref_info.page_offset; + let start = page_offset + .checked_add(usize::try_from(rmem.offs).unwrap()) .ok_or(OpteeSmcReturn::EBadAddr)?; let end = start - .checked_add(rmem.size) + .checked_add(usize::try_from(rmem.size).unwrap()) .ok_or(OpteeSmcReturn::EBadAddr)?; - let start_page_index = usize::try_from(page_align_down_u64(start)).unwrap() / PAGE_SIZE; - let end_page_index = usize::try_from(page_align_up_u64(end)).unwrap() / PAGE_SIZE; + let start_page_index = start / PAGE_SIZE; + let end_page_index = end.div_ceil(PAGE_SIZE); if start_page_index >= shm_ref_info.pages.len() || end_page_index > shm_ref_info.pages.len() { return Err(OpteeSmcReturn::EBadAddr); } let mut pages = Vec::with_capacity(end_page_index - start_page_index); - let page_offset = start - page_align_down_u64(start); - pages[0] = usize::try_from(shm_ref_info.pages[start_page_index] + page_offset).unwrap(); - for (i, page) in shm_ref_info - .pages - .iter() - .take(end_page_index) - .skip(start_page_index) - .enumerate() - { - pages[i] = usize::try_from(*page).unwrap(); - } - Ok(pages.into_boxed_slice()) + pages.copy_from_slice(&shm_ref_info.pages[start_page_index..end_page_index]); + Ok((pages, page_offset)) } /// Read data from the normal world shared memory pages whose physical addresses are given in -/// `phys_addrs` into `buffer`. The size of `buffer` indicates how many bytes to read. -/// All physical addresses in `phys_addrs` are page aligned except the first one. -fn read_data_from_shm_phys_addrs( - phys_addrs: &[usize], +/// `phys_addrs` and `page_offset` into `buffer`. The size of `buffer` indicates how many bytes to read. +fn read_data_from_shm_phys_addrs( + phys_addrs: &[PhysPageAddr], + page_offset: usize, buffer: &mut [u8], ) -> Result<(), OpteeSmcReturn> { - let phys_page_addrs = phys_addrs_to_page_addrs::(phys_addrs)?; - let mut ptr = NormalWorldConstPtr::::new( - &phys_page_addrs, - phys_addrs[0] - phys_page_addrs[0].as_usize(), - )?; + let mut ptr = NormalWorldConstPtr::::new(phys_addrs, page_offset)?; unsafe { ptr.read_slice_at_offset(0, buffer)?; } Ok(()) } -/// Write data in `buffer` to the normal world shared memory pages whose physical addresses -/// are given in `phys_addrs`. The size of `buffer` indicates how many bytes to write. -/// All physical addresses in `phys_addrs` are page aligned except the first one. -fn write_data_to_shm_phys_addrs(phys_addrs: &[usize], buffer: &[u8]) -> Result<(), OpteeSmcReturn> { - let phys_page_addrs = phys_addrs_to_page_addrs::(phys_addrs)?; - let mut ptr = NormalWorldMutPtr::::new( - &phys_page_addrs, - phys_addrs[0] - phys_page_addrs[0].as_usize(), - ) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; +/// Write data in `buffer` to the normal world shared memory pages whose physical addresses are given +/// in `phys_addrs` and `page_offset`. The size of `buffer` indicates how many bytes to write. +fn write_data_to_shm_phys_addrs( + phys_addrs: &[PhysPageAddr], + page_offset: usize, + buffer: &[u8], +) -> Result<(), OpteeSmcReturn> { + let mut ptr = NormalWorldMutPtr::::new(phys_addrs, page_offset) + .map_err(|_| OpteeSmcReturn::EBadAddr)?; unsafe { ptr.write_slice_at_offset(0, buffer)?; } Ok(()) } - -#[inline] -fn phys_addrs_to_page_addrs( - phys_addrs: &[usize], -) -> Result>, OpteeSmcReturn> { - let mut page_addrs: Vec> = Vec::with_capacity(phys_addrs.len()); - page_addrs.push( - PhysPageAddr::::new(page_align_down(phys_addrs[0])) - .ok_or(OpteeSmcReturn::EBadAddr)?, - ); - for addr in &phys_addrs[1..] { - page_addrs.push(PhysPageAddr::::new(*addr).ok_or(OpteeSmcReturn::EBadAddr)?); - } - Ok(page_addrs) -} From 5b4fcf9f8d668e09e4cf741578da9b4a6ab774d2 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 30 Dec 2025 01:04:00 +0000 Subject: [PATCH 48/52] xsave --- .../src/host/per_cpu_variables.rs | 12 ++++++++++-- litebox_platform_lvbs/src/mshv/vtl_switch.rs | 10 ---------- litebox_runner_lvbs/src/lib.rs | 18 ++++++++++++++++-- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/litebox_platform_lvbs/src/host/per_cpu_variables.rs b/litebox_platform_lvbs/src/host/per_cpu_variables.rs index 1c2fa28c5..bc695639f 100644 --- a/litebox_platform_lvbs/src/host/per_cpu_variables.rs +++ b/litebox_platform_lvbs/src/host/per_cpu_variables.rs @@ -110,7 +110,11 @@ impl PerCpuVariables { } /// Save the extended states of each core (VTL0 or VTL1). - pub(crate) fn save_extended_states(&self, vtl: u8) { + /// + /// # Panics + /// + /// Panics if XSAVE areas are not allocated or if an invalid VTL value is provided. + pub fn save_extended_states(&self, vtl: u8) { if self.vtl0_xsave_area_addr.is_null() || self.vtl1_xsave_area_addr.is_null() { panic!("XSAVE areas are not allocated"); } else { @@ -132,7 +136,11 @@ impl PerCpuVariables { } /// Restore the extended states of each core (VTL0 or VTL1). - pub(crate) fn restore_extended_states(&self, vtl: u8) { + /// + /// # Panics + /// + /// Panics if XSAVE areas are not allocated or if an invalid VTL value is provided. + pub fn restore_extended_states(&self, vtl: u8) { if self.vtl0_xsave_area_addr.is_null() || self.vtl1_xsave_area_addr.is_null() { panic!("XSAVE areas are not allocated"); } else { diff --git a/litebox_platform_lvbs/src/mshv/vtl_switch.rs b/litebox_platform_lvbs/src/mshv/vtl_switch.rs index 718d36bce..dd6aad7d6 100644 --- a/litebox_platform_lvbs/src/mshv/vtl_switch.rs +++ b/litebox_platform_lvbs/src/mshv/vtl_switch.rs @@ -314,17 +314,7 @@ fn vtlcall_dispatch(params: &[u64; NUM_VTLCALL_PARAMS]) -> i64 { match func_id { VsmFunction::Unknown => Errno::EINVAL.as_neg().into(), VsmFunction::OpteeMessage => { - // Since we do not know whether an upcall handler uses extended states, we conservatively - // save and restore extended states before and after invoking the upcall handler. - with_per_cpu_variables_mut(|per_cpu_variables| { - per_cpu_variables.save_extended_states(HV_VTL_SECURE); - }); - // TODO: invoke the OP-TEE upcall once it is merged. - - with_per_cpu_variables_mut(|per_cpu_variables| { - per_cpu_variables.restore_extended_states(HV_VTL_SECURE); - }); 0 } _ => vsm_dispatch(func_id, ¶ms[1..]), diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index 275a75eb8..4830345e0 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -5,10 +5,13 @@ use litebox::mm::linux::PAGE_SIZE; use litebox_platform_lvbs::{ arch::{gdt, get_core_id, instrs::hlt_loop, interrupts}, debug_serial_println, - host::{bootparam::get_vtl1_memory_info, per_cpu_variables::allocate_per_cpu_variables}, + host::{ + bootparam::get_vtl1_memory_info, + per_cpu_variables::{allocate_per_cpu_variables, with_per_cpu_variables_mut}, + }, mm::MemoryProvider, mshv::{ - hvcall, + HV_VTL_SECURE, hvcall, vtl_switch::vtl_switch_loop_entry, vtl1_mem_layout::{ VTL1_INIT_HEAP_SIZE, VTL1_INIT_HEAP_START_PAGE, VTL1_PML4E_PAGE, @@ -192,14 +195,25 @@ fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result::with_usize( usize::try_from(msg_arg_phys_addr).unwrap(), )?; From 1293f241b7005407875a9edb0fe8f8fce18ba859 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 9 Jan 2026 04:48:42 +0000 Subject: [PATCH 49/52] clarify --- litebox_runner_lvbs/src/lib.rs | 10 +-- litebox_shim_optee/src/msg_handler.rs | 124 ++++++++++++-------------- 2 files changed, 63 insertions(+), 71 deletions(-) diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index 4830345e0..4bb491d10 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -123,8 +123,8 @@ fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result::with_usize(smc_args_addr)?; let mut smc_args = unsafe { smc_args_ptr.read_at_offset(0) }?; let msg_arg_phys_addr = smc_args.optee_msg_arg_phys_addr()?; - let (res, msg_arg) = handle_optee_smc_args(&mut smc_args)?; - if let Some(mut msg_arg) = msg_arg { + let smc_handled = handle_optee_smc_args(&mut smc_args)?; + if let Some(mut msg_arg) = smc_handled.msg_to_handle { match msg_arg.cmd { OpteeMessageCommand::OpenSession | OpteeMessageCommand::InvokeCommand @@ -222,15 +222,15 @@ fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result { handle_optee_msg_arg(&msg_arg)?; - Ok(res.into()) + Ok(smc_handled.result.into()) } } } else { - Ok(res.into()) + Ok(smc_handled.result.into()) } } diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index c1cf028e3..6eb773596 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -212,7 +212,7 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> /// TA request information extracted from an OP-TEE message. /// /// In addition to standard TA information (i.e., TA UUID, session ID, command ID, -/// and parameters), it contains shared memory addresses (`out_phys_addrs` and `page_offsets`) to +/// and parameters), it contains shared memory addresses (`out_page_addrs` and `page_offsets`) to /// write back output data to the normal world once the TA execution is done. pub struct TaRequestInfo { pub uuid: Option, @@ -220,7 +220,7 @@ pub struct TaRequestInfo { pub entry_func: UteeEntryFunc, pub cmd_id: u32, pub params: [UteeParamOwned; UteeParamOwned::TEE_NUM_PARAMS], - pub out_phys_addrs: [Option]>>; UteeParamOwned::TEE_NUM_PARAMS], + pub out_page_addrs: [Option]>>; UteeParamOwned::TEE_NUM_PARAMS], pub page_offsets: [Option; UteeParamOwned::TEE_NUM_PARAMS], } @@ -252,7 +252,7 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result Result { - let (phys_addrs, page_offset, data_size) = match param.attr_type() { + let (shm_info, data_size) = match param.attr_type() { OpteeMsgAttrType::TmemInput => { let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_tmem(tmem)?; ( - shm_phys_addrs.0, - shm_phys_addrs.1, + get_shm_info_from_optee_msg_param_tmem(tmem)?, usize::try_from(tmem.size).unwrap(), ) } OpteeMsgAttrType::RmemInput => { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_rmem(rmem)?; ( - shm_phys_addrs.0, - shm_phys_addrs.1, + get_shm_info_from_optee_msg_param_rmem(rmem)?, usize::try_from(rmem.size).unwrap(), ) } _ => unreachable!(), }; let mut data = alloc::vec![0u8; data_size]; - read_data_from_shm_phys_addrs(&phys_addrs, page_offset, &mut data)?; + read_data_from_shm_info(&shm_info, &mut data)?; UteeParamOwned::MemrefInput { data: data.into() } } OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { - let (phys_addrs, page_offset, buffer_size) = match param.attr_type() { + let (shm_info, buffer_size) = match param.attr_type() { OpteeMsgAttrType::TmemOutput => { let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_tmem(tmem)?; ( - shm_phys_addrs.0, - shm_phys_addrs.1, + get_shm_info_from_optee_msg_param_tmem(tmem)?, usize::try_from(tmem.size).unwrap(), ) } OpteeMsgAttrType::RmemOutput => { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_rmem(rmem)?; ( - shm_phys_addrs.0, - shm_phys_addrs.1, + get_shm_info_from_optee_msg_param_rmem(rmem)?, usize::try_from(rmem.size).unwrap(), ) } _ => unreachable!(), }; - ta_req_info.out_phys_addrs[i] = Some(phys_addrs.into_boxed_slice()); - ta_req_info.page_offsets[i] = Some(page_offset); + ta_req_info.out_page_addrs[i] = Some(shm_info.page_addrs); + ta_req_info.page_offsets[i] = Some(shm_info.page_offset); UteeParamOwned::MemrefOutput { buffer_size } } OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { - let (phys_addrs, page_offset, buffer_size) = match param.attr_type() { + let (shm_info, buffer_size) = match param.attr_type() { OpteeMsgAttrType::TmemInout => { let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_tmem(tmem)?; ( - shm_phys_addrs.0, - shm_phys_addrs.1, + get_shm_info_from_optee_msg_param_tmem(tmem)?, usize::try_from(tmem.size).unwrap(), ) } OpteeMsgAttrType::RmemInout => { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - let shm_phys_addrs = get_shm_phys_addrs_from_optee_msg_param_rmem(rmem)?; ( - shm_phys_addrs.0, - shm_phys_addrs.1, + get_shm_info_from_optee_msg_param_rmem(rmem)?, usize::try_from(rmem.size).unwrap(), ) } _ => unreachable!(), }; let mut buffer = alloc::vec![0u8; buffer_size]; - read_data_from_shm_phys_addrs(&phys_addrs, page_offset, &mut buffer)?; - ta_req_info.out_phys_addrs[i] = Some(phys_addrs.into_boxed_slice()); - ta_req_info.page_offsets[i] = Some(page_offset); + read_data_from_shm_info(&shm_info, &mut buffer)?; + ta_req_info.out_page_addrs[i] = Some(shm_info.page_addrs); + ta_req_info.page_offsets[i] = Some(shm_info.page_offset); UteeParamOwned::MemrefInout { data: buffer.into(), buffer_size, @@ -414,10 +402,12 @@ pub fn prepare_for_return_to_normal_world( if slice.is_empty() { continue; } - if let Some(out_addrs) = &ta_req_info.out_phys_addrs[index] { - write_data_to_shm_phys_addrs( - out_addrs, - ta_req_info.page_offsets[index].unwrap_or(0), + if let Some(out_addrs) = &ta_req_info.out_page_addrs[index] { + write_data_to_shm_info( + &ShmInfo { + page_addrs: out_addrs.clone(), + page_offset: ta_req_info.page_offsets[index].unwrap_or(0), + }, slice, )?; } @@ -449,12 +439,12 @@ impl ShmRefPagesData { } /// Data structure to maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. -/// `pages` contains an array of physical page addresses. +/// `page_addrs` contains an array of physical page addresses. /// `page_offset` indicates the page offset of the first page (i.e., `pages[0]`) which should be /// smaller than `ALIGN`. #[derive(Clone)] -struct ShmRefInfo { - pub pages: Box<[PhysPageAddr]>, +struct ShmInfo { + pub page_addrs: Box<[PhysPageAddr]>, pub page_offset: usize, } @@ -463,7 +453,7 @@ struct ShmRefInfo { /// used during OP-TEE calls with parameters referencing shared memory. /// Any normal memory references without this registration will be rejected. struct ShmRefMap { - inner: spin::mutex::SpinMutex>>, + inner: spin::mutex::SpinMutex>>, } impl ShmRefMap { @@ -473,7 +463,7 @@ impl ShmRefMap { } } - pub fn insert(&self, shm_ref: u64, info: ShmRefInfo) -> Result<(), OpteeSmcReturn> { + pub fn insert(&self, shm_ref: u64, info: ShmInfo) -> Result<(), OpteeSmcReturn> { let mut guard = self.inner.lock(); if guard.contains_key(&shm_ref) { Err(OpteeSmcReturn::ENotAvail) @@ -483,12 +473,12 @@ impl ShmRefMap { } } - pub fn remove(&self, shm_ref: u64) -> Option> { + pub fn remove(&self, shm_ref: u64) -> Option> { let mut guard = self.inner.lock(); guard.remove(&shm_ref) } - pub fn get(&self, shm_ref: u64) -> Option> { + pub fn get(&self, shm_ref: u64) -> Option> { let guard = self.inner.lock(); guard.get(&shm_ref).cloned() } @@ -536,8 +526,8 @@ impl ShmRefMap { self.insert( shm_ref, - ShmRefInfo { - pages: pages.into_boxed_slice(), + ShmInfo { + page_addrs: pages.into_boxed_slice(), page_offset: usize::try_from(page_offset).unwrap(), }, )?; @@ -550,36 +540,36 @@ fn shm_ref_map() -> &'static ShmRefMap { SHM_REF_MAP.get_or_init(|| Box::new(ShmRefMap::new())) } -/// Get the normal world physical addresses and page offset of OP-TEE shared memory from `OpteeMsgParamTmem`. +/// Get the normal world shared memory information (physical addresses and page offset) from `OpteeMsgParamTmem`. /// /// Note that we use this function for handing TA requests and in this context /// `OpteeMsgParamTmem` and `OpteeMsgParamRmem` are equivalent because every shared memory /// reference accessible by TAs must be registered in advance. /// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. -fn get_shm_phys_addrs_from_optee_msg_param_tmem( +fn get_shm_info_from_optee_msg_param_tmem( tmem: OpteeMsgParamTmem, -) -> Result<(Vec>, usize), OpteeSmcReturn> { +) -> Result, OpteeSmcReturn> { let rmem = OpteeMsgParamRmem { offs: tmem.buf_ptr, size: tmem.size, shm_ref: tmem.shm_ref, }; - get_shm_phys_addrs_from_optee_msg_param_rmem(rmem) + get_shm_info_from_optee_msg_param_rmem(rmem) } -/// Get a list of the normal world physical addresses and page offset of OP-TEE shared memory from `OpteeMsgParamRmem`. +/// Get the normal world shared memory information (physical addresses and page offset) from `OpteeMsgParamRmem`. /// /// `rmem.offs` must be an offset within the shared memory region registered with `rmem.shm_ref` before /// and `rmem.offs + rmem.size` must not exceed the size of the registered shared memory region. /// All addresses this function returns are page aligned and virtually contiguous within the normal world but /// not necessarily physically contiguous. -fn get_shm_phys_addrs_from_optee_msg_param_rmem( +fn get_shm_info_from_optee_msg_param_rmem( rmem: OpteeMsgParamRmem, -) -> Result<(Vec>, usize), OpteeSmcReturn> { - let Some(shm_ref_info) = shm_ref_map().get(rmem.shm_ref) else { +) -> Result, OpteeSmcReturn> { + let Some(shm_info) = shm_ref_map().get(rmem.shm_ref) else { return Err(OpteeSmcReturn::ENotAvail); }; - let page_offset = shm_ref_info.page_offset; + let page_offset = shm_info.page_offset; let start = page_offset .checked_add(usize::try_from(rmem.offs).unwrap()) .ok_or(OpteeSmcReturn::EBadAddr)?; @@ -588,22 +578,25 @@ fn get_shm_phys_addrs_from_optee_msg_param_rmem( .ok_or(OpteeSmcReturn::EBadAddr)?; let start_page_index = start / PAGE_SIZE; let end_page_index = end.div_ceil(PAGE_SIZE); - if start_page_index >= shm_ref_info.pages.len() || end_page_index > shm_ref_info.pages.len() { + if start_page_index >= shm_info.page_addrs.len() || end_page_index > shm_info.page_addrs.len() { return Err(OpteeSmcReturn::EBadAddr); } - let mut pages = Vec::with_capacity(end_page_index - start_page_index); - pages.copy_from_slice(&shm_ref_info.pages[start_page_index..end_page_index]); - Ok((pages, page_offset)) + let mut page_addrs = Vec::with_capacity(end_page_index - start_page_index); + page_addrs.copy_from_slice(&shm_info.page_addrs[start_page_index..end_page_index]); + Ok(ShmInfo { + page_addrs: page_addrs.into_boxed_slice(), + page_offset, + }) } /// Read data from the normal world shared memory pages whose physical addresses are given in -/// `phys_addrs` and `page_offset` into `buffer`. The size of `buffer` indicates how many bytes to read. -fn read_data_from_shm_phys_addrs( - phys_addrs: &[PhysPageAddr], - page_offset: usize, +/// `shm_info` into `buffer`. The size of `buffer` indicates the number of bytes to read. +fn read_data_from_shm_info( + shm_info: &ShmInfo, buffer: &mut [u8], ) -> Result<(), OpteeSmcReturn> { - let mut ptr = NormalWorldConstPtr::::new(phys_addrs, page_offset)?; + let mut ptr = + NormalWorldConstPtr::::new(&shm_info.page_addrs, shm_info.page_offset)?; unsafe { ptr.read_slice_at_offset(0, buffer)?; } @@ -611,13 +604,12 @@ fn read_data_from_shm_phys_addrs( } /// Write data in `buffer` to the normal world shared memory pages whose physical addresses are given -/// in `phys_addrs` and `page_offset`. The size of `buffer` indicates how many bytes to write. -fn write_data_to_shm_phys_addrs( - phys_addrs: &[PhysPageAddr], - page_offset: usize, +/// in `shm_info`. The size of `buffer` indicates the number of bytes to write. +fn write_data_to_shm_info( + shm_info: &ShmInfo, buffer: &[u8], ) -> Result<(), OpteeSmcReturn> { - let mut ptr = NormalWorldMutPtr::::new(phys_addrs, page_offset) + let mut ptr = NormalWorldMutPtr::::new(&shm_info.page_addrs, shm_info.page_offset) .map_err(|_| OpteeSmcReturn::EBadAddr)?; unsafe { ptr.write_slice_at_offset(0, buffer)?; From 8700931bade98fe2ca47958a3ad9c9f54fd8733a Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 9 Jan 2026 18:10:50 +0000 Subject: [PATCH 50/52] refactoring --- litebox_common_optee/src/lib.rs | 15 +++ litebox_runner_lvbs/src/lib.rs | 31 ++++++- litebox_shim_optee/src/msg_handler.rs | 128 +++++++++++++++----------- 3 files changed, 116 insertions(+), 58 deletions(-) diff --git a/litebox_common_optee/src/lib.rs b/litebox_common_optee/src/lib.rs index c63d0990e..a4826c6cc 100644 --- a/litebox_common_optee/src/lib.rs +++ b/litebox_common_optee/src/lib.rs @@ -1612,3 +1612,18 @@ impl From for OpteeSmcReturn { } } } + +impl From for litebox_common_linux::errno::Errno { + fn from(ret: OpteeSmcReturn) -> Self { + match ret { + OpteeSmcReturn::EBusy | OpteeSmcReturn::EThreadLimit => { + litebox_common_linux::errno::Errno::EBUSY + } + OpteeSmcReturn::EResume => litebox_common_linux::errno::Errno::EAGAIN, + OpteeSmcReturn::EBadAddr => litebox_common_linux::errno::Errno::EFAULT, + OpteeSmcReturn::ENomem => litebox_common_linux::errno::Errno::ENOMEM, + OpteeSmcReturn::ENotAvail => litebox_common_linux::errno::Errno::ENOENT, + _ => litebox_common_linux::errno::Errno::EINVAL, + } + } +} diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index 4bb491d10..1f9cf102a 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -1,7 +1,7 @@ #![no_std] use core::panic::PanicInfo; -use litebox::mm::linux::PAGE_SIZE; +use litebox::{mm::linux::PAGE_SIZE, utils::TruncateExt}; use litebox_platform_lvbs::{ arch::{gdt, get_core_id, instrs::hlt_loop, interrupts}, debug_serial_println, @@ -117,8 +117,31 @@ use litebox_shim_optee::msg_handler::{ prepare_for_return_to_normal_world, }; use litebox_shim_optee::{NormalWorldConstPtr, NormalWorldMutPtr}; + +/// An entry point function for OP-TEE message handler upcall/callback. +/// +/// This entry point function is intended to be called from the LVBS platform which is unware of +/// OP-TEE semantics. Thus, we align this function's signature with other VSM/HVCI functions (i.e., +/// up to three u64 arguments and returning Result). #[expect(dead_code)] -fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result { +fn optee_msg_handler_upcall_entry( + smc_args_addr: u64, +) -> Result { + let smc_args_addr: usize = smc_args_addr.truncate(); + match optee_msg_handler(smc_args_addr) { + Ok(smc_arg) => { + let mut smc_args_ptr = + NormalWorldMutPtr::::with_usize(smc_args_addr) + .map_err(|_| litebox_common_linux::errno::Errno::EINVAL)?; + unsafe { smc_args_ptr.write_at_offset(0, smc_arg) } + .map_err(|_| litebox_common_linux::errno::Errno::EFAULT)?; + Ok(0) + } + Err(smc_ret) => Err(smc_ret.into()), + } +} + +fn optee_msg_handler(smc_args_addr: usize) -> Result { let mut smc_args_ptr = NormalWorldConstPtr::::with_usize(smc_args_addr)?; let mut smc_args = unsafe { smc_args_ptr.read_at_offset(0) }?; @@ -164,7 +187,7 @@ fn optee_msg_handler_upcall(smc_args_addr: usize) -> Result Result::with_usize( - usize::try_from(msg_arg_phys_addr).unwrap(), + msg_arg_phys_addr.truncate(), )?; unsafe { ptr.write_at_offset(0, msg_arg) }?; } else { diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 6eb773596..556145c01 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -19,7 +19,8 @@ use crate::{NormalWorldConstPtr, NormalWorldMutPtr}; use alloc::{boxed::Box, vec::Vec}; use hashbrown::HashMap; use litebox::mm::linux::PAGE_SIZE; -use litebox::platform::vmap::PhysPageAddr; +use litebox::platform::RawConstPointer; +use litebox::platform::vmap::{PhysPageAddr, PhysPointerError}; use litebox::utils::TruncateExt; use litebox_common_optee::{ OpteeMessageCommand, OpteeMsgArg, OpteeMsgAttrType, OpteeMsgParamRmem, OpteeMsgParamTmem, @@ -212,16 +213,15 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> /// TA request information extracted from an OP-TEE message. /// /// In addition to standard TA information (i.e., TA UUID, session ID, command ID, -/// and parameters), it contains shared memory addresses (`out_page_addrs` and `page_offsets`) to +/// and parameters), it contains shared memory information (`out_shm_info`) to /// write back output data to the normal world once the TA execution is done. -pub struct TaRequestInfo { +pub struct TaRequestInfo { pub uuid: Option, pub session: u32, pub entry_func: UteeEntryFunc, pub cmd_id: u32, pub params: [UteeParamOwned; UteeParamOwned::TEE_NUM_PARAMS], - pub out_page_addrs: [Option]>>; UteeParamOwned::TEE_NUM_PARAMS], - pub page_offsets: [Option; UteeParamOwned::TEE_NUM_PARAMS], + pub out_shm_info: [Option>; UteeParamOwned::TEE_NUM_PARAMS], } /// This function decodes a TA request contained in `OpteeMsgArg`. @@ -232,7 +232,9 @@ pub struct TaRequestInfo { /// # Panics /// /// Panics if any conversion from `u64` to `usize` fails. OP-TEE shim doesn't support a 32-bit environment. -pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result { +pub fn decode_ta_request( + msg_arg: &OpteeMsgArg, +) -> Result, OpteeSmcReturn> { let ta_entry_func: UteeEntryFunc = msg_arg.cmd.try_into()?; let (ta_uuid, skip): (Option, usize) = if ta_entry_func == UteeEntryFunc::OpenSession { // If it is an OpenSession request, extract the TA UUID from the first two parameters @@ -252,8 +254,7 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result Result { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; ( get_shm_info_from_optee_msg_param_rmem(rmem)?, - usize::try_from(rmem.size).unwrap(), + rmem.size.truncate(), ) } _ => unreachable!(), }; let mut data = alloc::vec![0u8; data_size]; - read_data_from_shm_info(&shm_info, &mut data)?; + read_data_from_shm(&shm_info, &mut data)?; UteeParamOwned::MemrefInput { data: data.into() } } OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { @@ -309,20 +310,19 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; ( get_shm_info_from_optee_msg_param_rmem(rmem)?, - usize::try_from(rmem.size).unwrap(), + rmem.size.truncate(), ) } _ => unreachable!(), }; - ta_req_info.out_page_addrs[i] = Some(shm_info.page_addrs); - ta_req_info.page_offsets[i] = Some(shm_info.page_offset); + ta_req_info.out_shm_info[i] = Some(shm_info); UteeParamOwned::MemrefOutput { buffer_size } } OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { @@ -331,22 +331,21 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; ( get_shm_info_from_optee_msg_param_rmem(rmem)?, - usize::try_from(rmem.size).unwrap(), + rmem.size.truncate(), ) } _ => unreachable!(), }; let mut buffer = alloc::vec![0u8; buffer_size]; - read_data_from_shm_info(&shm_info, &mut buffer)?; - ta_req_info.out_page_addrs[i] = Some(shm_info.page_addrs); - ta_req_info.page_offsets[i] = Some(shm_info.page_offset); + read_data_from_shm(&shm_info, &mut buffer)?; + ta_req_info.out_shm_info[i] = Some(shm_info); UteeParamOwned::MemrefInout { data: buffer.into(), buffer_size, @@ -368,7 +367,7 @@ pub fn decode_ta_request(msg_arg: &OpteeMsgArg) -> Result, msg_arg: &mut OpteeMsgArg, ) -> Result<(), OpteeSmcReturn> { for index in 0..UteeParams::TEE_NUM_PARAMS { @@ -393,23 +392,15 @@ pub fn prepare_for_return_to_normal_world( // SAFETY // `addr` is expected to be a valid address of a TA and `addr + len` does not // exceed the TA's memory region. - let slice = unsafe { - &*core::ptr::slice_from_raw_parts( - addr as *const u8, - usize::try_from(len).unwrap_or(0), - ) - }; + let ptr = crate::UserConstPtr::::from_usize(addr.truncate()); + let slice = unsafe { ptr.to_cow_slice(len.truncate()) } + .ok_or(OpteeSmcReturn::EBadAddr)?; + if slice.is_empty() { continue; } - if let Some(out_addrs) = &ta_req_info.out_page_addrs[index] { - write_data_to_shm_info( - &ShmInfo { - page_addrs: out_addrs.clone(), - page_offset: ta_req_info.page_offsets[index].unwrap_or(0), - }, - slice, - )?; + if let Some(out_shm_info) = &ta_req_info.out_shm_info[index] { + write_data_to_shm(out_shm_info, slice.as_ref())?; } } } @@ -443,9 +434,43 @@ impl ShmRefPagesData { /// `page_offset` indicates the page offset of the first page (i.e., `pages[0]`) which should be /// smaller than `ALIGN`. #[derive(Clone)] -struct ShmInfo { - pub page_addrs: Box<[PhysPageAddr]>, - pub page_offset: usize, +pub struct ShmInfo { + page_addrs: Box<[PhysPageAddr]>, + page_offset: usize, +} + +impl ShmInfo { + pub fn new( + page_addrs: Box<[PhysPageAddr]>, + page_offset: usize, + ) -> Result { + if page_offset >= ALIGN { + return Err(OpteeSmcReturn::EBadAddr); + } + Ok(Self { + page_addrs, + page_offset, + }) + } +} + +/// Conversion from `ShmInfo` to `NormalWorldConstPtr` and `NormalWorldMutPtr`. +/// +/// OP-TEE shared memory regions are untyped, so we use `u8` as the base type. +impl TryFrom> for NormalWorldConstPtr { + type Error = PhysPointerError; + + fn try_from(shm_info: ShmInfo) -> Result { + NormalWorldConstPtr::new(&shm_info.page_addrs, shm_info.page_offset) + } +} + +impl TryFrom> for NormalWorldMutPtr { + type Error = PhysPointerError; + + fn try_from(shm_info: ShmInfo) -> Result { + NormalWorldMutPtr::new(&shm_info.page_addrs, shm_info.page_offset) + } } /// Maintain the information of OP-TEE shared memory in VTL0 referenced by `shm_ref`. @@ -526,10 +551,10 @@ impl ShmRefMap { self.insert( shm_ref, - ShmInfo { - page_addrs: pages.into_boxed_slice(), - page_offset: usize::try_from(page_offset).unwrap(), - }, + ShmInfo::new( + pages.into_boxed_slice(), + usize::try_from(page_offset).unwrap(), + )?, )?; Ok(()) } @@ -571,10 +596,10 @@ fn get_shm_info_from_optee_msg_param_rmem( }; let page_offset = shm_info.page_offset; let start = page_offset - .checked_add(usize::try_from(rmem.offs).unwrap()) + .checked_add(rmem.offs.truncate()) .ok_or(OpteeSmcReturn::EBadAddr)?; let end = start - .checked_add(usize::try_from(rmem.size).unwrap()) + .checked_add(rmem.size.truncate()) .ok_or(OpteeSmcReturn::EBadAddr)?; let start_page_index = start / PAGE_SIZE; let end_page_index = end.div_ceil(PAGE_SIZE); @@ -583,20 +608,16 @@ fn get_shm_info_from_optee_msg_param_rmem( } let mut page_addrs = Vec::with_capacity(end_page_index - start_page_index); page_addrs.copy_from_slice(&shm_info.page_addrs[start_page_index..end_page_index]); - Ok(ShmInfo { - page_addrs: page_addrs.into_boxed_slice(), - page_offset, - }) + ShmInfo::new(page_addrs.into_boxed_slice(), page_offset) } /// Read data from the normal world shared memory pages whose physical addresses are given in /// `shm_info` into `buffer`. The size of `buffer` indicates the number of bytes to read. -fn read_data_from_shm_info( +fn read_data_from_shm( shm_info: &ShmInfo, buffer: &mut [u8], ) -> Result<(), OpteeSmcReturn> { - let mut ptr = - NormalWorldConstPtr::::new(&shm_info.page_addrs, shm_info.page_offset)?; + let mut ptr: NormalWorldConstPtr = shm_info.clone().try_into()?; unsafe { ptr.read_slice_at_offset(0, buffer)?; } @@ -605,12 +626,11 @@ fn read_data_from_shm_info( /// Write data in `buffer` to the normal world shared memory pages whose physical addresses are given /// in `shm_info`. The size of `buffer` indicates the number of bytes to write. -fn write_data_to_shm_info( +fn write_data_to_shm( shm_info: &ShmInfo, buffer: &[u8], ) -> Result<(), OpteeSmcReturn> { - let mut ptr = NormalWorldMutPtr::::new(&shm_info.page_addrs, shm_info.page_offset) - .map_err(|_| OpteeSmcReturn::EBadAddr)?; + let mut ptr: NormalWorldMutPtr = shm_info.clone().try_into()?; unsafe { ptr.write_slice_at_offset(0, buffer)?; } From 3b85f3547e82a5d8a805b7a95fb23c5b1289d29c Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 19:01:24 +0000 Subject: [PATCH 51/52] feedbacks --- litebox_runner_lvbs/src/lib.rs | 32 ++++---- litebox_shim_optee/src/msg_handler.rs | 112 ++++++++++++-------------- 2 files changed, 66 insertions(+), 78 deletions(-) diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index 1f9cf102a..83edc1e4f 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -2,6 +2,10 @@ use core::panic::PanicInfo; use litebox::{mm::linux::PAGE_SIZE, utils::TruncateExt}; +use litebox_common_optee::{ + LdelfArg, OpteeMessageCommand, OpteeMsgArg, OpteeSmcArgs, OpteeSmcReturn, TeeIdentity, + TeeLogin, TeeUuid, UteeEntryFunc, UteeParamOwned, UteeParams, +}; use litebox_platform_lvbs::{ arch::{gdt, get_core_id, instrs::hlt_loop, interrupts}, debug_serial_println, @@ -21,6 +25,14 @@ use litebox_platform_lvbs::{ serial_println, }; use litebox_platform_multiplex::Platform; +use litebox_shim_optee::{NormalWorldConstPtr, NormalWorldMutPtr}; +use litebox_shim_optee::{ + loader::ElfLoadInfo, + msg_handler::{ + decode_ta_request, handle_optee_msg_arg, handle_optee_smc_args, + prepare_for_return_to_normal_world, + }, +}; /// # Panics /// @@ -65,6 +77,7 @@ pub fn init() -> Option<&'static Platform> { let pml4_table_addr = vtl1_start + u64::try_from(PAGE_SIZE * VTL1_PML4E_PAGE).unwrap(); let platform = Platform::new(pml4_table_addr, vtl1_start, vtl1_end); ret = Some(platform); + litebox_platform_multiplex::set_platform(platform); // Add the rest of the VTL1 memory to the global allocator once they are mapped to the kernel page table. let mem_fill_start = mem_fill_start + mem_fill_size; @@ -103,28 +116,13 @@ pub fn run(platform: Option<&'static Platform>) -> ! { vtl_switch_loop_entry(platform) } -// Tentative OP-TEE message handler upcall implementation. -// This will be revised once the upcall interface is finalized. -// NOTE: This function doesn't work because `run_thread` is not ready. -// It is okay to remove this function in this PR and add it in a follow-up PR. -use litebox_common_optee::{ - LdelfArg, OpteeMessageCommand, OpteeMsgArg, OpteeSmcArgs, OpteeSmcReturn, TeeIdentity, - TeeLogin, TeeUuid, UteeEntryFunc, UteeParamOwned, UteeParams, -}; -use litebox_shim_optee::loader::ElfLoadInfo; -use litebox_shim_optee::msg_handler::{ - decode_ta_request, handle_optee_msg_arg, handle_optee_smc_args, - prepare_for_return_to_normal_world, -}; -use litebox_shim_optee::{NormalWorldConstPtr, NormalWorldMutPtr}; - -/// An entry point function for OP-TEE message handler upcall/callback. +/// A tentative entry point function for OP-TEE message handler upcall/callback. /// /// This entry point function is intended to be called from the LVBS platform which is unware of /// OP-TEE semantics. Thus, we align this function's signature with other VSM/HVCI functions (i.e., /// up to three u64 arguments and returning Result). #[expect(dead_code)] -fn optee_msg_handler_upcall_entry( +fn optee_smc_handler_upcall_entry( smc_args_addr: u64, ) -> Result { let smc_args_addr: usize = smc_args_addr.truncate(); diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index 556145c01..d48e020c5 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -55,12 +55,12 @@ const MAX_NOTIF_VALUE: usize = 0; const NUM_RPC_PARMS: usize = 4; #[inline] -fn page_align_down_u64(address: u64) -> u64 { +fn page_align_down(address: u64) -> u64 { address & !(PAGE_SIZE as u64 - 1) } #[inline] -fn page_align_up_u64(len: u64) -> u64 { +fn page_align_up(len: u64) -> u64 { len.next_multiple_of(PAGE_SIZE as u64) } @@ -181,9 +181,9 @@ pub fn handle_optee_msg_arg(msg_arg: &OpteeMsgArg) -> Result<(), OpteeSmcReturn> // `tmem.buf_ptr` encodes two different information: // - The physical page address of the first `ShmRefPagesData` // - The page offset of the first shared memory page (`pages_list[0]`) - let shm_ref_pages_data_phys_addr = page_align_down_u64(tmem.buf_ptr); + let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); let page_offset = tmem.buf_ptr - shm_ref_pages_data_phys_addr; - let aligned_size = page_align_up_u64(page_offset + tmem.size); + let aligned_size = page_align_up(page_offset + tmem.size); shm_ref_map().register_shm( shm_ref_pages_data_phys_addr, page_offset, @@ -243,6 +243,7 @@ pub fn decode_ta_request( data[1] = (msg_arg.get_param_value(0)?.b).truncate(); data[2] = (msg_arg.get_param_value(1)?.a).truncate(); data[3] = (msg_arg.get_param_value(1)?.b).truncate(); + // Skip the first two parameters as they convey the TA UUID (Some(TeeUuid::from_u32_array(data)), 2) } else { (None, 0) @@ -282,67 +283,58 @@ pub fn decode_ta_request( value_b: value.b, } } - OpteeMsgAttrType::TmemInput | OpteeMsgAttrType::RmemInput => { - let (shm_info, data_size) = match param.attr_type() { - OpteeMsgAttrType::TmemInput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_info_from_optee_msg_param_tmem(tmem)?, - tmem.size.truncate(), - ) - } - OpteeMsgAttrType::RmemInput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_info_from_optee_msg_param_rmem(rmem)?, - rmem.size.truncate(), - ) - } - _ => unreachable!(), - }; + OpteeMsgAttrType::TmemInput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; + let data_size = tmem.size.truncate(); + let mut data = alloc::vec![0u8; data_size]; read_data_from_shm(&shm_info, &mut data)?; UteeParamOwned::MemrefInput { data: data.into() } } - OpteeMsgAttrType::TmemOutput | OpteeMsgAttrType::RmemOutput => { - let (shm_info, buffer_size) = match param.attr_type() { - OpteeMsgAttrType::TmemOutput => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_info_from_optee_msg_param_tmem(tmem)?, - tmem.size.truncate(), - ) - } - OpteeMsgAttrType::RmemOutput => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_info_from_optee_msg_param_rmem(rmem)?, - rmem.size.truncate(), - ) - } - _ => unreachable!(), - }; + OpteeMsgAttrType::RmemInput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; + let data_size = rmem.size.truncate(); + + let mut data = alloc::vec![0u8; data_size]; + read_data_from_shm(&shm_info, &mut data)?; + UteeParamOwned::MemrefInput { data: data.into() } + } + OpteeMsgAttrType::TmemOutput => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; + let buffer_size = tmem.size.truncate(); + ta_req_info.out_shm_info[i] = Some(shm_info); UteeParamOwned::MemrefOutput { buffer_size } } - OpteeMsgAttrType::TmemInout | OpteeMsgAttrType::RmemInout => { - let (shm_info, buffer_size) = match param.attr_type() { - OpteeMsgAttrType::TmemInout => { - let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_info_from_optee_msg_param_tmem(tmem)?, - tmem.size.truncate(), - ) - } - OpteeMsgAttrType::RmemInout => { - let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; - ( - get_shm_info_from_optee_msg_param_rmem(rmem)?, - rmem.size.truncate(), - ) - } - _ => unreachable!(), - }; + OpteeMsgAttrType::RmemOutput => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; + let buffer_size = rmem.size.truncate(); + + ta_req_info.out_shm_info[i] = Some(shm_info); + UteeParamOwned::MemrefOutput { buffer_size } + } + OpteeMsgAttrType::TmemInout => { + let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; + let buffer_size = tmem.size.truncate(); + + let mut buffer = alloc::vec![0u8; buffer_size]; + read_data_from_shm(&shm_info, &mut buffer)?; + ta_req_info.out_shm_info[i] = Some(shm_info); + UteeParamOwned::MemrefInout { + data: buffer.into(), + buffer_size, + } + } + OpteeMsgAttrType::RmemInout => { + let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; + let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; + let buffer_size = rmem.size.truncate(); + let mut buffer = alloc::vec![0u8; buffer_size]; read_data_from_shm(&shm_info, &mut buffer)?; ta_req_info.out_shm_info[i] = Some(shm_info); @@ -570,7 +562,7 @@ fn shm_ref_map() -> &'static ShmRefMap { /// Note that we use this function for handing TA requests and in this context /// `OpteeMsgParamTmem` and `OpteeMsgParamRmem` are equivalent because every shared memory /// reference accessible by TAs must be registered in advance. -/// `OpteeMsgParamTmem` is matter for the registration of shared memory regions. +/// `OpteeMsgParamTmem` is needed when we register shared memory regions (rmem is not allowed for this purpose). fn get_shm_info_from_optee_msg_param_tmem( tmem: OpteeMsgParamTmem, ) -> Result, OpteeSmcReturn> { @@ -586,8 +578,6 @@ fn get_shm_info_from_optee_msg_param_tmem( /// /// `rmem.offs` must be an offset within the shared memory region registered with `rmem.shm_ref` before /// and `rmem.offs + rmem.size` must not exceed the size of the registered shared memory region. -/// All addresses this function returns are page aligned and virtually contiguous within the normal world but -/// not necessarily physically contiguous. fn get_shm_info_from_optee_msg_param_rmem( rmem: OpteeMsgParamRmem, ) -> Result, OpteeSmcReturn> { From eee03805dc554a3b138a305cded75f2857357ab0 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 23 Jan 2026 19:16:34 +0000 Subject: [PATCH 52/52] refactor --- litebox_shim_optee/src/msg_handler.rs | 51 +++++++++++++++------------ 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index d48e020c5..2b6eaeed5 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -287,19 +287,13 @@ pub fn decode_ta_request( let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; let data_size = tmem.size.truncate(); - - let mut data = alloc::vec![0u8; data_size]; - read_data_from_shm(&shm_info, &mut data)?; - UteeParamOwned::MemrefInput { data: data.into() } + build_memref_input(&shm_info, data_size)? } OpteeMsgAttrType::RmemInput => { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; let data_size = rmem.size.truncate(); - - let mut data = alloc::vec![0u8; data_size]; - read_data_from_shm(&shm_info, &mut data)?; - UteeParamOwned::MemrefInput { data: data.into() } + build_memref_input(&shm_info, data_size)? } OpteeMsgAttrType::TmemOutput => { let tmem = param.get_param_tmem().ok_or(OpteeSmcReturn::EBadCmd)?; @@ -322,26 +316,16 @@ pub fn decode_ta_request( let shm_info = get_shm_info_from_optee_msg_param_tmem(tmem)?; let buffer_size = tmem.size.truncate(); - let mut buffer = alloc::vec![0u8; buffer_size]; - read_data_from_shm(&shm_info, &mut buffer)?; - ta_req_info.out_shm_info[i] = Some(shm_info); - UteeParamOwned::MemrefInout { - data: buffer.into(), - buffer_size, - } + ta_req_info.out_shm_info[i] = Some(shm_info.clone()); + build_memref_inout(&shm_info, buffer_size)? } OpteeMsgAttrType::RmemInout => { let rmem = param.get_param_rmem().ok_or(OpteeSmcReturn::EBadCmd)?; let shm_info = get_shm_info_from_optee_msg_param_rmem(rmem)?; let buffer_size = rmem.size.truncate(); - let mut buffer = alloc::vec![0u8; buffer_size]; - read_data_from_shm(&shm_info, &mut buffer)?; - ta_req_info.out_shm_info[i] = Some(shm_info); - UteeParamOwned::MemrefInout { - data: buffer.into(), - buffer_size, - } + ta_req_info.out_shm_info[i] = Some(shm_info.clone()); + build_memref_inout(&shm_info, buffer_size)? } _ => return Err(OpteeSmcReturn::EBadCmd), }; @@ -350,6 +334,29 @@ pub fn decode_ta_request( Ok(ta_req_info) } +#[inline] +fn build_memref_input( + shm_info: &ShmInfo, + data_size: usize, +) -> Result { + let mut data = alloc::vec![0u8; data_size]; + read_data_from_shm(shm_info, &mut data)?; + Ok(UteeParamOwned::MemrefInput { data: data.into() }) +} + +#[inline] +fn build_memref_inout( + shm_info: &ShmInfo, + buffer_size: usize, +) -> Result { + let mut buffer = alloc::vec![0u8; buffer_size]; + read_data_from_shm(shm_info, &mut buffer)?; + Ok(UteeParamOwned::MemrefInout { + data: buffer.into(), + buffer_size, + }) +} + /// This function prepares for returning from OP-TEE secure world to the normal world. /// /// It writes back TA execution outputs associated with shared memory references and updates