From d4626b2555d66eba238d919305742a84feb6e1e7 Mon Sep 17 00:00:00 2001 From: Femi Adeyemi Date: Tue, 3 Feb 2026 00:57:50 +0000 Subject: [PATCH] Parse attributes in extended VTL0 APIs VTL0 sends data to the secure kernel during and after initial boot. Use the new attributes parameter to extend load_kdata, allowing it to process data after boot and use the improved format for sending data from VTL0. Small, simple data buffers like certificates can be sent efficiently with the same APIs used for larger aggregated data like module info. The new data format is leveraged to reduce the size of the kernel string table passed from ~10MB (all of rodata) to ~250KB. The validate_module and validate_kexec APIs are extended to use the new attributes. --- litebox_platform_lvbs/src/mshv/error.rs | 22 +- litebox_platform_lvbs/src/mshv/heki.rs | 218 +----- litebox_platform_lvbs/src/mshv/heki_data.rs | 180 +++++ litebox_platform_lvbs/src/mshv/mod.rs | 1 + litebox_platform_lvbs/src/mshv/vsm.rs | 740 +++++++++++--------- 5 files changed, 626 insertions(+), 535 deletions(-) create mode 100644 litebox_platform_lvbs/src/mshv/heki_data.rs diff --git a/litebox_platform_lvbs/src/mshv/error.rs b/litebox_platform_lvbs/src/mshv/error.rs index 5d6274be5..ddfe51056 100644 --- a/litebox_platform_lvbs/src/mshv/error.rs +++ b/litebox_platform_lvbs/src/mshv/error.rs @@ -152,6 +152,21 @@ pub enum VsmError { #[error("symbol name contains invalid UTF-8")] SymbolNameInvalidUtf8, + + #[error("invalid API attribute")] + ApiAttrInvalid, + + #[error("invalid symbol info type")] + SymbolInfoTypeInvalid, + + #[error("invalid permissions info type")] + PermInfoTypeInvalid, + + #[error("invalid patch type")] + PatchTypeInvalid, + + #[error("invalid data page")] + DataPageInvalid, } impl From for VsmError { @@ -217,7 +232,12 @@ impl From for Errno { | VsmError::SymbolNameInvalidUtf8 | VsmError::SymbolNameNoTerminator | VsmError::CertificateDerLengthInvalid { .. } - | VsmError::CertificateParseFailed => Errno::EINVAL, + | VsmError::CertificateParseFailed + | VsmError::ApiAttrInvalid + | VsmError::SymbolInfoTypeInvalid + | VsmError::PermInfoTypeInvalid + | VsmError::DataPageInvalid + | VsmError::PatchTypeInvalid => Errno::EINVAL, // Signature verification failures delegate to VerificationError's Errno mapping VsmError::SignatureVerificationFailed(e) => Errno::from(e), diff --git a/litebox_platform_lvbs/src/mshv/heki.rs b/litebox_platform_lvbs/src/mshv/heki.rs index 5314992c8..8106c92ef 100644 --- a/litebox_platform_lvbs/src/mshv/heki.rs +++ b/litebox_platform_lvbs/src/mshv/heki.rs @@ -7,11 +7,13 @@ use crate::{ }; use core::mem; use litebox::utils::TruncateExt; +use modular_bitfield::Specifier; use num_enum::TryFromPrimitive; use x86_64::{ - PhysAddr, VirtAddr, + PhysAddr, structures::paging::{PageSize, Size4KiB}, }; +use zerocopy::{FromBytes, Immutable, KnownLayout}; bitflags::bitflags! { #[derive(Clone, Copy, Debug, PartialEq)] @@ -42,8 +44,9 @@ pub(crate) fn mem_attr_to_hv_page_prot_flags(attr: MemAttr) -> HvPageProtFlags { flags } -#[derive(Default, Debug, TryFromPrimitive, PartialEq)] -#[repr(u64)] +#[derive(Default, Debug, TryFromPrimitive, PartialEq, Specifier)] +#[bits = 16] +#[repr(u16)] pub enum HekiKdataType { SystemCerts = 0, RevocationCerts = 1, @@ -52,22 +55,36 @@ pub enum HekiKdataType { KernelData = 4, PatchInfo = 5, KexecTrampoline = 6, + SymbolInfo = 7, + ModuleInfo = 8, + PermInfo = 9, + KexecInfo = 10, + DataPage = 0xff, #[default] - Unknown = 0xffff_ffff_ffff_ffff, + Unknown = 0xffff, +} + +#[derive(Debug, TryFromPrimitive, PartialEq)] +#[repr(u16)] +pub enum HekiSymbolInfoType { + SymbolTable = 0, + GplSymbolTable = 1, + SymbolStringTable = 2, + Unknown = 0xffff, } #[derive(Default, Debug, TryFromPrimitive, PartialEq)] -#[repr(u64)] +#[repr(u16)] pub enum HekiKexecType { KexecImage = 0, KexecKernelBlob = 1, KexecPages = 2, #[default] - Unknown = 0xffff_ffff_ffff_ffff, + Unknown = 0xffff, } #[derive(Clone, Copy, Default, Debug, TryFromPrimitive, PartialEq)] -#[repr(u64)] +#[repr(u16)] pub enum ModMemType { Text = 0, Data = 1, @@ -79,7 +96,7 @@ pub enum ModMemType { ElfBuffer = 7, Patch = 8, #[default] - Unknown = 0xffff_ffff_ffff_ffff, + Unknown = 0xffff, } pub(crate) fn mod_mem_type_to_mem_attr(mod_mem_type: ModMemType) -> MemAttr { @@ -103,150 +120,6 @@ pub(crate) fn mod_mem_type_to_mem_attr(mod_mem_type: ModMemType) -> MemAttr { mem_attr } -/// `HekiRange` is a generic container for various types of memory ranges. -/// It has an `attributes` field which can be interpreted differently based on the context like -/// `MemAttr`, `KdataType`, `ModMemType`, or `KexecType`. -#[derive(Default, Clone, Copy)] -#[repr(C, packed)] -pub struct HekiRange { - pub va: u64, - pub pa: u64, - pub epa: u64, - pub attributes: u64, -} - -impl HekiRange { - #[inline] - pub fn is_aligned(&self, align: U) -> bool - where - U: Into + Copy, - { - let va = self.va; - let pa = self.pa; - let epa = self.epa; - - VirtAddr::new(va).is_aligned(align) - && PhysAddr::new(pa).is_aligned(align) - && PhysAddr::new(epa).is_aligned(align) - } - - #[inline] - pub fn mem_attr(&self) -> Option { - let attr = self.attributes; - MemAttr::from_bits(attr) - } - - #[inline] - pub fn mod_mem_type(&self) -> ModMemType { - let attr = self.attributes; - ModMemType::try_from(attr).unwrap_or(ModMemType::Unknown) - } - - #[inline] - pub fn heki_kdata_type(&self) -> HekiKdataType { - let attr = self.attributes; - HekiKdataType::try_from(attr).unwrap_or(HekiKdataType::Unknown) - } - - #[inline] - pub fn heki_kexec_type(&self) -> HekiKexecType { - let attr = self.attributes; - HekiKexecType::try_from(attr).unwrap_or(HekiKexecType::Unknown) - } - - pub fn is_valid(&self) -> bool { - let va = self.va; - let pa = self.pa; - let epa = self.epa; - let Ok(pa) = PhysAddr::try_new(pa) else { - return false; - }; - let Ok(epa) = PhysAddr::try_new(epa) else { - return false; - }; - !(VirtAddr::try_new(va).is_err() - || epa < pa - || (self.mem_attr().is_none() - && self.heki_kdata_type() == HekiKdataType::Unknown - && self.heki_kexec_type() == HekiKexecType::Unknown - && self.mod_mem_type() == ModMemType::Unknown)) - } -} - -impl core::fmt::Debug for HekiRange { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let va = self.va; - let pa = self.pa; - let epa = self.epa; - let attr = self.attributes; - f.debug_struct("HekiRange") - .field("va", &format_args!("{va:#x}")) - .field("pa", &format_args!("{pa:#x}")) - .field("epa", &format_args!("{epa:#x}")) - .field("attr", &format_args!("{attr:#x}")) - .field("type", &format_args!("{:?}", self.heki_kdata_type())) - .field("size", &format_args!("{:?}", self.epa - self.pa)) - .finish() - } -} - -#[expect(clippy::cast_possible_truncation)] -pub const HEKI_MAX_RANGES: usize = - ((PAGE_SIZE as u32 - u64::BITS * 3 / 8) / core::mem::size_of::() as u32) as usize; - -#[derive(Clone, Copy)] -#[repr(align(4096))] -#[repr(C)] -pub struct HekiPage { - pub next: *mut HekiPage, - pub next_pa: u64, - pub nranges: u64, - pub ranges: [HekiRange; HEKI_MAX_RANGES], - pad: u64, -} - -impl HekiPage { - pub fn new() -> Self { - HekiPage { - next: core::ptr::null_mut(), - ..Default::default() - } - } - - pub fn is_valid(&self) -> bool { - if PhysAddr::try_new(self.next_pa).is_err() { - return false; - } - let Some(nranges) = usize::try_from(self.nranges) - .ok() - .filter(|&n| n <= HEKI_MAX_RANGES) - else { - return false; - }; - for heki_range in &self.ranges[..nranges] { - if !heki_range.is_valid() { - return false; - } - } - true - } -} - -impl Default for HekiPage { - fn default() -> Self { - Self::new() - } -} - -impl<'a> IntoIterator for &'a HekiPage { - type Item = &'a HekiRange; - type IntoIter = core::slice::Iter<'a, HekiRange>; - - fn into_iter(self) -> Self::IntoIter { - self.ranges[..usize::try_from(self.nranges).unwrap_or(0)].iter() - } -} - #[derive(Default, Clone, Copy, Debug)] #[repr(C)] pub struct HekiPatch { @@ -304,12 +177,12 @@ impl HekiPatch { } } -#[derive(Default, Clone, Copy, Debug, PartialEq)] -#[repr(u32)] +#[derive(Default, Clone, Copy, Debug, PartialEq, TryFromPrimitive)] +#[repr(u16)] pub enum HekiPatchType { JumpLabel = 0, #[default] - Unknown = 0xffff_ffff, + Unknown = 0xffff, } #[derive(Clone, Copy, Debug)] @@ -348,6 +221,7 @@ impl HekiPatchInfo { } } +#[derive(FromBytes, KnownLayout, Immutable)] #[repr(C)] #[allow(clippy::struct_field_names)] // TODO: Account for kernel config changing the size and meaning of the field members @@ -380,37 +254,3 @@ impl HekiKernelSymbol { } } } - -#[repr(C)] -#[allow(clippy::struct_field_names)] -pub struct HekiKernelInfo { - pub ksymtab_start: *const HekiKernelSymbol, - pub ksymtab_end: *const HekiKernelSymbol, - pub ksymtab_gpl_start: *const HekiKernelSymbol, - pub ksymtab_gpl_end: *const HekiKernelSymbol, - // Skip unused arch info -} - -impl HekiKernelInfo { - const KINFO_LEN: usize = mem::size_of::(); - - pub fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() < Self::KINFO_LEN { - return Err(VsmError::BufferTooSmall("HekiKernelInfo")); - } - - #[allow(clippy::cast_ptr_alignment)] - let kinfo_ptr = bytes.as_ptr().cast::(); - assert!(kinfo_ptr.is_aligned(), "kinfo_ptr is not aligned"); - - // SAFETY: Casting from vtl0 buffer that contained the struct - unsafe { - Ok(HekiKernelInfo { - ksymtab_start: (*kinfo_ptr).ksymtab_start, - ksymtab_end: (*kinfo_ptr).ksymtab_end, - ksymtab_gpl_start: (*kinfo_ptr).ksymtab_gpl_start, - ksymtab_gpl_end: (*kinfo_ptr).ksymtab_gpl_end, - }) - } - } -} diff --git a/litebox_platform_lvbs/src/mshv/heki_data.rs b/litebox_platform_lvbs/src/mshv/heki_data.rs new file mode 100644 index 000000000..9e15659ec --- /dev/null +++ b/litebox_platform_lvbs/src/mshv/heki_data.rs @@ -0,0 +1,180 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// modular_bitfield generates warning. There is an updated crate +// with fix, but we are locked to current version. +#![allow(unused_parens)] + +use modular_bitfield::{ + bitfield, + prelude::{B8, B32}, +}; +use zerocopy::{FromBytes, Immutable, KnownLayout}; + +use crate::mshv::{ + error::VsmError, + heki::{HekiKdataType, HekiKexecType, HekiSymbolInfoType, ModMemType}, + vtl1_mem_layout::PAGE_SIZE, +}; + +#[bitfield(bits = 64)] +#[derive(Debug, Clone, Copy, Default)] +#[repr(u64)] +pub struct HekiDataAttr { + #[skip(setters)] + pub dtype: HekiKdataType, + #[skip(setters)] + pub size: B32, + pub dflags: B8, + #[skip] + __: B8, +} + +#[repr(C)] +#[derive(FromBytes, Immutable, KnownLayout)] +pub struct HekiDataRange { + pub va: u64, + pub pa: u64, + pub epa: u64, + data_type: u16, + pub attr: u16, + rsvd: u32, +} + +impl core::fmt::Debug for HekiDataRange { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let data_type = self.data_type; + let va = self.va; + let pa = self.pa; + let epa = self.epa; + let attr = self.attr; + f.debug_struct("HekiDataRange") + .field("va", &format_args!("{va:#x}")) + .field("pa", &format_args!("{pa:#x}")) + .field("epa", &format_args!("{epa:#x}")) + .field("attr", &format_args!("{attr:#x}")) + .field("type", &format_args!("{data_type}")) + .field("size", &format_args!("{:?}", self.epa - self.pa)) + .finish_non_exhaustive() + } +} + +/// `HekiDataRange` is a generic container for various types of memory ranges. +/// It has a context-specific `attributes` +impl HekiDataRange { + #[inline] + pub fn heki_symbol_info_type(&self) -> HekiSymbolInfoType { + HekiSymbolInfoType::try_from(self.data_type).unwrap_or(HekiSymbolInfoType::Unknown) + } + + #[inline] + pub fn heki_kdata_type(&self) -> HekiKdataType { + HekiKdataType::try_from(self.data_type).unwrap_or(HekiKdataType::Unknown) + } + + #[inline] + pub fn heki_mod_mem_type(&self) -> ModMemType { + ModMemType::try_from(self.data_type).unwrap_or(ModMemType::Unknown) + } + + #[inline] + pub fn heki_kexec_type(&self) -> HekiKexecType { + HekiKexecType::try_from(self.data_type).unwrap_or(HekiKexecType::Unknown) + } +} + +#[repr(C)] +#[derive(FromBytes, Immutable, KnownLayout)] +pub struct HekiDataHdr { + data_type: u16, + range_count: u16, + rsvd: u32, + next: u64, + next_pa: u64, +} + +#[repr(C)] +#[derive(FromBytes, Immutable, KnownLayout)] +pub struct HekiDataPage { + hdr: HekiDataHdr, + range: [HekiDataRange; Self::MAX_RANGE_COUNT], +} + +impl core::fmt::Debug for HekiDataPage { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let data_type = + HekiKdataType::try_from(self.hdr.data_type).unwrap_or(HekiKdataType::Unknown); + let count = self.hdr.range_count; + let next = self.hdr.next_pa; + f.debug_struct("HekiDataPage") + .field("type", &format_args!("{data_type:#?}")) + .field("ranges", &format_args!("{count}")) + .field("next", &format_args!("{next:#x}")) + .finish_non_exhaustive() + } +} + +impl HekiDataPage { + const SIZE: usize = PAGE_SIZE; + const MAX_RANGE_COUNT: usize = + (Self::SIZE - size_of::()) / size_of::(); + + pub fn from_bytes(bytes: &[u8]) -> Result<&Self, VsmError> { + let (data_page, _) = + HekiDataPage::ref_from_prefix(bytes).map_err(|_| VsmError::DataPageInvalid)?; + Ok(data_page) + } + + pub fn kdata_type(&self) -> HekiKdataType { + HekiKdataType::try_from(self.hdr.data_type).unwrap_or(HekiKdataType::Unknown) + } + + pub fn next(&self) -> Option<(u64, u64, usize)> { + if self.hdr.next_pa != 0 { + Some((self.hdr.next, self.hdr.next_pa, Self::SIZE)) + } else { + None + } + } +} + +impl<'a> IntoIterator for &'a HekiDataPage { + type Item = (u16, &'a [HekiDataRange]); + type IntoIter = HekiDataPageIter<'a>; + + fn into_iter(self) -> Self::IntoIter { + HekiDataPageIter { + range_index: 0, + range: &self.range[0..self.hdr.range_count as usize], + } + } +} + +pub struct HekiDataPageIter<'a> { + range_index: usize, + range: &'a [HekiDataRange], +} + +impl<'a> Iterator for HekiDataPageIter<'a> { + type Item = (u16, &'a [HekiDataRange]); + + fn next(&mut self) -> Option { + if self.range_index == self.range.len() { + None + } else { + let start_index = self.range_index; + let range_type = self.range[start_index].data_type; + let mut end_index: usize = start_index + 1; + + while end_index < self.range.len() { + if self.range[end_index].data_type == range_type { + end_index += 1; + } else { + break; + } + } + self.range_index = end_index; + Some((range_type, &self.range[start_index..end_index])) + } + } +} diff --git a/litebox_platform_lvbs/src/mshv/mod.rs b/litebox_platform_lvbs/src/mshv/mod.rs index 0acd5e29e..b75ac7844 100644 --- a/litebox_platform_lvbs/src/mshv/mod.rs +++ b/litebox_platform_lvbs/src/mshv/mod.rs @@ -5,6 +5,7 @@ pub mod error; pub(crate) mod heki; +pub(crate) mod heki_data; pub mod hvcall; mod hvcall_mm; mod hvcall_vp; diff --git a/litebox_platform_lvbs/src/mshv/vsm.rs b/litebox_platform_lvbs/src/mshv/vsm.rs index 51ee92afa..bd13975f5 100644 --- a/litebox_platform_lvbs/src/mshv/vsm.rs +++ b/litebox_platform_lvbs/src/mshv/vsm.rs @@ -3,6 +3,8 @@ //! VSM functions +use crate::mshv::heki::{HekiPatchType, HekiSymbolInfoType}; +use crate::mshv::heki_data::{HekiDataAttr, HekiDataPage, HekiDataRange}; #[cfg(debug_assertions)] use crate::mshv::mem_integrity::parse_modinfo; use crate::mshv::ringbuffer::set_ringbuffer; @@ -25,9 +27,8 @@ use crate::{ HvRegisterVsmVpSecureVtlConfig, VsmFunction, X86Cr0Flags, X86Cr4Flags, error::VsmError, heki::{ - HekiKdataType, HekiKernelInfo, HekiKernelSymbol, HekiKexecType, HekiPage, HekiPatch, - HekiPatchInfo, HekiRange, MemAttr, ModMemType, mem_attr_to_hv_page_prot_flags, - mod_mem_type_to_mem_attr, + HekiKdataType, HekiKernelSymbol, HekiKexecType, HekiPatch, HekiPatchInfo, MemAttr, + ModMemType, mem_attr_to_hv_page_prot_flags, mod_mem_type_to_mem_attr, }, hvcall::HypervCallError, hvcall_mm::hv_modify_vtl_protection_mask, @@ -40,7 +41,7 @@ use crate::{ vtl1_mem_layout::{PAGE_SHIFT, PAGE_SIZE}, }, }; -use alloc::{boxed::Box, ffi::CString, string::String, vec::Vec}; +use alloc::{boxed::Box, ffi::CString, string::String, vec, vec::Vec}; use core::{ mem, ops::Range, @@ -56,6 +57,7 @@ use x86_64::{ structures::paging::{PageSize, PhysFrame, Size4KiB, frame::PhysFrameRange}, }; use x509_cert::{Certificate, der::Decode}; +use zerocopy::{FromBytes, Immutable, KnownLayout}; #[derive(Copy, Clone)] #[repr(align(4096))] @@ -254,12 +256,14 @@ pub fn mshv_vsm_end_of_boot() -> i64 { /// VSM function for protecting certain memory ranges (e.g., kernel text, data, heap). /// `pa` and `nranges` specify a memory area containing the information about the memory ranges to protect. -pub fn mshv_vsm_protect_memory(pa: u64, nranges: u64) -> Result { +pub fn mshv_vsm_protect_memory(pa: u64, va: u64, attr: u64) -> Result { + if attr == 0 { + return Err(VsmError::ApiAttrInvalid); + } if PhysAddr::try_new(pa) .ok() .filter(|p| p.is_aligned(Size4KiB::SIZE)) .is_none() - || nranges == 0 { return Err(VsmError::InvalidInputAddress); } @@ -270,39 +274,36 @@ pub fn mshv_vsm_protect_memory(pa: u64, nranges: u64) -> Result { )); } - let heki_pages = copy_heki_pages_from_vtl0(pa, nranges).ok_or(VsmError::HekiPagesCopyFailed)?; - - for heki_page in heki_pages { - for heki_range in &heki_page { - let pa = heki_range.pa; - let epa = heki_range.epa; - let mem_attr = heki_range - .mem_attr() - .ok_or(VsmError::MemoryAttributeInvalid)?; + let attr = HekiDataAttr::from_bytes(attr.to_le_bytes()); + let size = attr.size_or_err().map_err(|_| VsmError::ApiAttrInvalid)? as usize; - if !heki_range.is_aligned(Size4KiB::SIZE) { - return Err(VsmError::AddressNotPageAligned); + let (mut va, mut pa, mut size) = (va, pa, size); + loop { + let mem = MemoryContainer::from_vtl0_addr(va, pa, size)?; + let data_page = HekiDataPage::from_bytes(&mem)?; + for (_, ranges) in data_page { + for range in ranges { + if range.heki_kdata_type() != HekiKdataType::PermInfo { + return Err(VsmError::KernelDataTypeInvalid); + } + let Some(mem_attr) = MemAttr::from_bits(u64::from(range.attr)) else { + return Err(VsmError::PermInfoTypeInvalid); + }; + debug_serial_println!("VSM: Protect memory: {range:?}"); + + protect_physical_memory_range( + PhysFrame::range( + PhysFrame::containing_address(PhysAddr::new(range.pa)), + PhysFrame::containing_address(PhysAddr::new(range.epa)), + ), + mem_attr, + )?; } - - #[cfg(debug_assertions)] - let va = heki_range.va; - debug_serial_println!( - "VSM: Protect memory: va {:#x} pa {:#x} epa {:#x} {:?} (size: {})", - va, - pa, - epa, - mem_attr, - epa - pa - ); - - protect_physical_memory_range( - PhysFrame::range( - PhysFrame::containing_address(PhysAddr::new(pa)), - PhysFrame::containing_address(PhysAddr::new(epa)), - ), - mem_attr, - )?; } + let Some(next_data_page_addr) = data_page.next() else { + break; + }; + (va, pa, size) = next_data_page_addr; } Ok(0) } @@ -332,128 +333,29 @@ fn parse_certs(mut buf: &[u8]) -> Result, VsmError> { /// VSM function for loading kernel data (e.g., certificates, blocklist, kernel symbols) into VTL1. /// `pa` and `nranges` specify memory areas containing the information about the memory ranges to load. -pub fn mshv_vsm_load_kdata(pa: u64, nranges: u64) -> Result { +pub fn mshv_vsm_load_kdata(pa: u64, va: u64, attr: u64) -> Result { + if attr == 0 { + return Err(VsmError::ApiAttrInvalid); + } if PhysAddr::try_new(pa) .ok() .filter(|p| p.is_aligned(Size4KiB::SIZE)) .is_none() - || nranges == 0 { return Err(VsmError::InvalidInputAddress); } - if crate::platform_low().vtl0_kernel_info.check_end_of_boot() { - return Err(VsmError::OperationAfterEndOfBoot("loading kernel data")); - } - - let vtl0_info = &crate::platform_low().vtl0_kernel_info; - - let mut system_certs_mem = MemoryContainer::new(); - let mut kexec_trampoline_metadata = KexecMemoryMetadata::new(); - let mut patch_info_mem = MemoryContainer::new(); - let mut kinfo_mem = MemoryContainer::new(); - let mut kdata_mem = MemoryContainer::new(); - - let heki_pages = copy_heki_pages_from_vtl0(pa, nranges).ok_or(VsmError::HekiPagesCopyFailed)?; - - for heki_page in &heki_pages { - for heki_range in heki_page { - debug_serial_println!("VSM: Load kernel data {heki_range:?}"); - match heki_range.heki_kdata_type() { - HekiKdataType::SystemCerts => system_certs_mem - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?, - HekiKdataType::KexecTrampoline => { - kexec_trampoline_metadata.insert_heki_range(heki_range); - } - HekiKdataType::PatchInfo => patch_info_mem - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?, - HekiKdataType::KernelInfo => kinfo_mem - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?, - HekiKdataType::KernelData => kdata_mem - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?, - HekiKdataType::Unknown => { - return Err(VsmError::KernelDataTypeInvalid); - } - _ => { - debug_serial_println!("VSM: Unsupported kernel data not loaded {heki_range:?}"); - } - } - } - } + let attr = HekiDataAttr::from_bytes(attr.to_le_bytes()); + let data_type = attr.dtype_or_err().unwrap_or(HekiKdataType::Unknown); + let size = attr.size_or_err().map_err(|_| VsmError::ApiAttrInvalid)? as usize; - system_certs_mem - .write_bytes_from_heki_range() - .map_err(|_| VsmError::Vtl0CopyFailed)?; - patch_info_mem - .write_bytes_from_heki_range() - .map_err(|_| VsmError::Vtl0CopyFailed)?; - kinfo_mem - .write_bytes_from_heki_range() - .map_err(|_| VsmError::Vtl0CopyFailed)?; - kdata_mem - .write_bytes_from_heki_range() - .map_err(|_| VsmError::Vtl0CopyFailed)?; + let mem = MemoryContainer::from_vtl0_addr(va, pa, size)?; - if system_certs_mem.is_empty() { - return Err(VsmError::SystemCertificatesNotFound); + match data_type { + HekiKdataType::DataPage => vsm_load_data_page(&mem), + HekiKdataType::Unknown => Err(VsmError::KernelDataTypeInvalid), + _ => vsm_load_data_buf(data_type, &mem), } - - let cert_buf = &system_certs_mem[..]; - let certs = parse_certs(cert_buf)?; - - if certs.is_empty() { - return Err(VsmError::SystemCertificatesInvalid); - } - - // The system certificate is loaded into VTL1 and locked down before `end_of_boot` is signaled. - // Its integrity depends on UEFI Secure Boot which ensures only trusted software is loaded during - // the boot process. - vtl0_info.set_system_certificates(certs.clone()); - debug_serial_println!("VSM: Loaded {} system certificate(s)", certs.len()); - - for kexec_trampoline_range in &kexec_trampoline_metadata { - protect_physical_memory_range( - kexec_trampoline_range.phys_frame_range, - MemAttr::MEM_ATTR_READ, - )?; - } - - // pre-computed patch data for the kernel text - if !patch_info_mem.is_empty() { - let patch_info_buf = &patch_info_mem[..]; - vtl0_info - .precomputed_patches - .insert_patch_data_from_bytes(patch_info_buf, None) - .map_err(|_| VsmError::Vtl0CopyFailed)?; - } - - if kinfo_mem.is_empty() || kdata_mem.is_empty() { - return Err(VsmError::KernelSymbolTableNotFound); - } - - let kinfo_buf = &kinfo_mem[..]; - let kdata_buf = &kdata_mem[..]; - let kinfo = HekiKernelInfo::from_bytes(kinfo_buf)?; - - vtl0_info.gpl_symbols.build_from_container( - VirtAddr::from_ptr(kinfo.ksymtab_gpl_start), - VirtAddr::from_ptr(kinfo.ksymtab_gpl_end), - &kdata_mem, - kdata_buf, - )?; - - vtl0_info.symbols.build_from_container( - VirtAddr::from_ptr(kinfo.ksymtab_start), - VirtAddr::from_ptr(kinfo.ksymtab_end), - &kdata_mem, - kdata_buf, - )?; - - Ok(0) // TODO: create blocklist keys // TODO: save blocklist hashes } @@ -462,21 +364,24 @@ pub fn mshv_vsm_load_kdata(pa: u64, nranges: u64) -> Result { /// `pa` and `nranges` specify a memory area containing the information about the kernel module to validate or protect. /// `flags` controls the validation process (unused for now). /// This function returns a unique `token` to VTL0, which is used to identify the module in subsequent calls. -pub fn mshv_vsm_validate_guest_module(pa: u64, nranges: u64, _flags: u64) -> Result { +pub fn mshv_vsm_validate_guest_module(pa: u64, va: u64, attr: u64) -> Result { + if attr == 0 { + return Err(VsmError::ApiAttrInvalid); + } if PhysAddr::try_new(pa) .ok() .filter(|p| p.is_aligned(Size4KiB::SIZE)) .is_none() - || nranges == 0 { return Err(VsmError::InvalidInputAddress); } - debug_serial_println!( - "VSM: Validate kernel module: pa {:#x} nranges {}", - pa, - nranges, - ); + let attr = HekiDataAttr::from_bytes(attr.to_le_bytes()); + let data_type = attr.dtype_or_err().unwrap_or(HekiKdataType::Unknown); + let size = attr.size_or_err().map_err(|_| VsmError::ApiAttrInvalid)? as usize; + if data_type != HekiKdataType::DataPage { + return Err(VsmError::DataPageInvalid); + } let certs = crate::platform_low() .vtl0_kernel_info @@ -493,32 +398,35 @@ pub fn mshv_vsm_validate_guest_module(pa: u64, nranges: u64, _flags: u64) -> Res // patch info for the kernel module let mut patch_info_for_module = MemoryContainer::new(); - let heki_pages = copy_heki_pages_from_vtl0(pa, nranges).ok_or(VsmError::HekiPagesCopyFailed)?; - - for heki_page in &heki_pages { - for heki_range in heki_page { - match heki_range.mod_mem_type() { - ModMemType::Unknown => { - return Err(VsmError::ModuleMemoryTypeInvalid); - } - ModMemType::ElfBuffer => module_as_elf - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?, - ModMemType::Patch => patch_info_for_module - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?, - _ => { - // if input memory range's type is neither `Unknown` nor `ElfBuffer`, its addresses must be page-aligned - if !heki_range.is_aligned(Size4KiB::SIZE) { - return Err(VsmError::AddressNotPageAligned); + let (mut va, mut pa, mut size) = (va, pa, size); + loop { + let mem = MemoryContainer::from_vtl0_addr(va, pa, size)?; + let data_page = HekiDataPage::from_bytes(&mem)?; + for (_, ranges) in data_page { + for range in ranges { + match range.heki_mod_mem_type() { + ModMemType::ElfBuffer => module_as_elf + .extend_data_range(range) + .map_err(|_| VsmError::InvalidInputAddress)?, + ModMemType::Patch => patch_info_for_module + .extend_data_range(range) + .map_err(|_| VsmError::InvalidInputAddress)?, + ModMemType::Unknown => { + return Err(VsmError::ModuleMemoryTypeInvalid); + } + mod_mem_type => { + module_memory_metadata.insert_data_range(mod_mem_type, range); + module_in_memory + .extend_data_range(mod_mem_type, range) + .map_err(|_| VsmError::InvalidInputAddress)?; } - module_memory_metadata.insert_heki_range(heki_range); - module_in_memory - .extend_range(heki_range.mod_mem_type(), heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?; } } } + let Some(next_data_page_addr) = data_page.next() else { + break; + }; + (va, pa, size) = next_data_page_addr; } module_as_elf @@ -679,20 +587,21 @@ pub fn mshv_vsm_copy_secondary_key(_pa: u64, _nranges: u64) -> Result Result { - debug_serial_println!( - "VSM: Validate kexec pa {:#x} nranges {} crash {}", - pa, - nranges, - crash - ); +pub fn mshv_vsm_kexec_validate(pa: u64, va: u64, attr: u64) -> Result { + if attr == 0 { + return Err(VsmError::ApiAttrInvalid); + } + let attr = HekiDataAttr::from_bytes(attr.to_le_bytes()); + let size = attr.size_or_err().map_err(|_| VsmError::ApiAttrInvalid)? as usize; + + debug_serial_println!("VSM: Validate kexec pa {pa:#x} va {va:#x} attr {attr:?}"); let certs = crate::platform_low() .vtl0_kernel_info .get_system_certificates() .ok_or(VsmError::SystemCertificatesNotLoaded)?; - let is_crash = crash != 0; + let is_crash = attr.dflags() != 0; let kexec_metadata_ref = if is_crash { &crate::platform_low().vtl0_kernel_info.crash_kexec_metadata } else { @@ -717,31 +626,34 @@ pub fn mshv_vsm_kexec_validate(pa: u64, nranges: u64, crash: u64) -> Result { - kexec_memory_metadata.insert_heki_range(heki_range); - kexec_image - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?; - } - HekiKexecType::KexecKernelBlob => - // we do not protect kexec kernel blob memory - { - kexec_kernel_blob - .extend_range(heki_range) - .map_err(|_| VsmError::InvalidInputAddress)?; - } - - HekiKexecType::KexecPages => kexec_memory_metadata.insert_heki_range(heki_range), - HekiKexecType::Unknown => { - return Err(VsmError::KexecTypeInvalid); + let (mut va, mut pa, mut size) = (va, pa, size); + loop { + let mem = MemoryContainer::from_vtl0_addr(va, pa, size)?; + let data_page = HekiDataPage::from_bytes(&mem)?; + for (_, ranges) in data_page { + for range in ranges { + match range.heki_kexec_type() { + HekiKexecType::KexecImage => { + kexec_memory_metadata.insert_data_range(range); + kexec_image + .extend_data_range(range) + .map_err(|_| VsmError::InvalidInputAddress)?; + } + // we do not protect kexec kernel blob memory + HekiKexecType::KexecKernelBlob => kexec_kernel_blob + .extend_data_range(range) + .map_err(|_| VsmError::InvalidInputAddress)?, + HekiKexecType::KexecPages => kexec_memory_metadata.insert_data_range(range), + HekiKexecType::Unknown => { + return Err(VsmError::KexecTypeInvalid); + } } } } + let Some(next_data_page_addr) = data_page.next() else { + break; + }; + (va, pa, size) = next_data_page_addr; } kexec_image @@ -927,6 +839,155 @@ fn mshv_vsm_allocate_ringbuffer_memory(phys_addr: u64, size: usize) -> Result Result { + if crate::platform_low().vtl0_kernel_info.check_end_of_boot() { + return Err(VsmError::OperationAfterEndOfBoot( + "setting system certificate", + )); + } + + let certs = parse_certs(mem)?; + if certs.is_empty() { + return Err(VsmError::SystemCertificatesInvalid); + } + // The system certificate is loaded into VTL1 and locked down before `end_of_boot` is signaled. + // Its integrity depends on UEFI Secure Boot which ensures only trusted software is lo aded during + // the boot process. + crate::platform_low() + .vtl0_kernel_info + .set_system_certificates(certs.clone()); + Ok(0) +} + +fn vsm_protect_kexec_tramp(data_page: &HekiDataPage) -> Result { + if crate::platform_low().vtl0_kernel_info.check_end_of_boot() { + return Err(VsmError::OperationAfterEndOfBoot( + "protect kexec trampoline", + )); + } + + let mut kexec_trampoline_metadata = KexecMemoryMetadata::new(); + + for (_, ranges) in data_page { + for range in ranges { + kexec_trampoline_metadata.insert_data_range(range); + } + } + + for kexec_trampoline_range in &kexec_trampoline_metadata { + protect_physical_memory_range( + kexec_trampoline_range.phys_frame_range, + MemAttr::MEM_ATTR_READ, + )?; + } + + Ok(0) +} + +fn vsm_load_symbol_table(data_page: &HekiDataPage) -> Result { + if crate::platform_low().vtl0_kernel_info.check_end_of_boot() { + return Err(VsmError::KernelDataTypeInvalid); + } + + let mut sym_mem = MemoryContainer::new(); + let mut sym_gpl_mem = MemoryContainer::new(); + let mut sym_string_mem = MemoryContainer::new(); + + for (_, ranges) in data_page { + for range in ranges { + match range.heki_symbol_info_type() { + HekiSymbolInfoType::SymbolTable => sym_mem.extend_data_range(range)?, + HekiSymbolInfoType::GplSymbolTable => sym_gpl_mem.extend_data_range(range)?, + HekiSymbolInfoType::SymbolStringTable => sym_string_mem.extend_data_range(range)?, + HekiSymbolInfoType::Unknown => return Err(VsmError::SymbolInfoTypeInvalid), + } + } + } + + sym_mem + .write_bytes_from_heki_range() + .map_err(|_| VsmError::Vtl0CopyFailed)?; + sym_gpl_mem + .write_bytes_from_heki_range() + .map_err(|_| VsmError::Vtl0CopyFailed)?; + sym_string_mem + .write_bytes_from_heki_range() + .map_err(|_| VsmError::Vtl0CopyFailed)?; + + let vtl0_info = &crate::platform_low().vtl0_kernel_info; + vtl0_info + .symbols + .build_from_container(&sym_mem, &sym_string_mem)?; + + vtl0_info + .gpl_symbols + .build_from_container(&sym_gpl_mem, &sym_string_mem)?; + + Ok(0) +} + +fn vsm_load_patches(data_page: &HekiDataPage) -> Result { + if crate::platform_low().vtl0_kernel_info.check_end_of_boot() { + return Err(VsmError::OperationAfterEndOfBoot("load kernel patches")); + } + + let mut jump_label_mem = MemoryContainer::new(); + + for (range_type, ranges) in data_page { + let data_type = HekiPatchType::try_from(range_type).unwrap_or(HekiPatchType::Unknown); + for range in ranges { + match data_type { + HekiPatchType::JumpLabel => jump_label_mem.extend_data_range(range)?, + HekiPatchType::Unknown => return Err(VsmError::PatchTypeInvalid), + } + } + } + + jump_label_mem + .write_bytes_from_heki_range() + .map_err(|_| VsmError::Vtl0CopyFailed)?; + + if jump_label_mem.is_empty() { + return Ok(0); + } + + let vtl0_info = &crate::platform_low().vtl0_kernel_info; + let patch_info_buf = &jump_label_mem[..]; + vtl0_info + .precomputed_patches + .insert_patch_data_from_bytes(patch_info_buf, None) + .map_err(|_| VsmError::Vtl0CopyFailed)?; + + Ok(0) +} + +fn vsm_load_data_page(mem: &MemoryContainer) -> Result { + let data_page = HekiDataPage::from_bytes(mem)?; + let data_type = data_page.kdata_type(); + + debug_serial_println!("VSM: Load {data_type:?} data page"); + + let err = match data_type { + HekiKdataType::KexecTrampoline => vsm_protect_kexec_tramp(data_page)?, + HekiKdataType::SymbolInfo => vsm_load_symbol_table(data_page)?, + HekiKdataType::PatchInfo => vsm_load_patches(data_page)?, + //HekiKdataType::ModuleInfo => vsm_validate_guest_module(&data_page)?, + _ => 0, // TODO: Some "small" data types may get big enough to use data page + }; + Ok(err) +} + +fn vsm_load_data_buf(data_type: HekiKdataType, mem: &MemoryContainer) -> Result { + match data_type { + HekiKdataType::SystemCerts => vsm_set_system_certs(mem), + HekiKdataType::RevocationCerts | HekiKdataType::BlocklistHashes => { + debug_serial_println!("Handler for {data_type:?} unimplemented"); + Ok(0) + } + _ => Ok(0), + } +} + /// VSM function dispatcher pub fn vsm_dispatch(func_id: VsmFunction, params: &[u64]) -> i64 { let result: Result = match func_id { @@ -934,8 +995,8 @@ pub fn vsm_dispatch(func_id: VsmFunction, params: &[u64]) -> i64 { VsmFunction::BootAPs => mshv_vsm_boot_aps(params[0], params[1]), VsmFunction::LockRegs => mshv_vsm_lock_regs(), VsmFunction::SignalEndOfBoot => Ok(mshv_vsm_end_of_boot()), - VsmFunction::ProtectMemory => mshv_vsm_protect_memory(params[0], params[1]), - VsmFunction::LoadKData => mshv_vsm_load_kdata(params[0], params[1]), + VsmFunction::ProtectMemory => mshv_vsm_protect_memory(params[0], params[1], params[2]), + VsmFunction::LoadKData => mshv_vsm_load_kdata(params[0], params[1], params[2]), VsmFunction::ValidateModule => { mshv_vsm_validate_guest_module(params[0], params[1], params[2]) } @@ -1128,21 +1189,15 @@ impl ModuleMemoryMetadata { } #[inline] - pub(crate) fn insert_heki_range(&mut self, heki_range: &HekiRange) { - let va = heki_range.va; - let pa = heki_range.pa; - let epa = heki_range.epa; - self.insert_memory_range(ModuleMemoryRange::new( - va, - pa, - epa, - heki_range.mod_mem_type(), - )); + pub(crate) fn insert_memory_range(&mut self, mem_range: ModuleMemoryRange) { + self.ranges.push(mem_range); } #[inline] - pub(crate) fn insert_memory_range(&mut self, mem_range: ModuleMemoryRange) { - self.ranges.push(mem_range); + pub(crate) fn insert_data_range(&mut self, mem_type: ModMemType, range: &HekiDataRange) { + self.insert_memory_range(ModuleMemoryRange::new( + range.va, range.pa, range.epa, mem_type, + )); } #[inline] @@ -1291,28 +1346,6 @@ impl<'a> ModuleMemoryMetadataIters<'a> { } } -/// This function copies `HekiPage` structures from VTL0 and returns a vector of them. -/// `pa` and `nranges` specify the physical address range containing one or more than one `HekiPage` structures. -fn copy_heki_pages_from_vtl0(pa: u64, nranges: u64) -> Option> { - let mut next_pa = PhysAddr::new(pa); - let mut heki_pages = Vec::with_capacity(nranges.truncate()); - let mut range: u64 = 0; - - while range < nranges { - let heki_page = - (unsafe { crate::platform_low().copy_from_vtl0_phys::(next_pa) })?; - if !heki_page.is_valid() { - return None; - } - - range += heki_page.nranges; - next_pa = PhysAddr::new(heki_page.next_pa); - heki_pages.push(*heki_page); - } - - Some(heki_pages) -} - /// This function protects a physical memory range. It is a safe wrapper for `hv_modify_vtl_protection_mask`. /// `phys_frame_range` specifies the physical frame range to protect /// `mem_attr` specifies the memory attributes to be applied to the range @@ -1373,15 +1406,15 @@ impl ModuleMemory { Ok(()) } - pub(crate) fn extend_range( + pub(crate) fn extend_data_range( &mut self, mod_mem_type: ModMemType, - heki_range: &HekiRange, + range: &HekiDataRange, ) -> Result<(), VsmError> { match mod_mem_type { - ModMemType::Text => self.text.extend_range(heki_range)?, - ModMemType::InitText => self.init_text.extend_range(heki_range)?, - ModMemType::InitRoData => self.init_rodata.extend_range(heki_range)?, + ModMemType::Text => self.text.extend_data_range(range)?, + ModMemType::InitText => self.init_text.extend_data_range(range)?, + ModMemType::InitRoData => self.init_rodata.extend_data_range(range)?, _ => {} } Ok(()) @@ -1438,26 +1471,6 @@ impl MemoryContainer { }) } - pub(crate) fn extend_range(&mut self, heki_range: &HekiRange) -> Result<(), VsmError> { - let addr = VirtAddr::try_new(heki_range.va).map_err(|_| VsmError::InvalidVirtualAddress)?; - let phys_addr = - PhysAddr::try_new(heki_range.pa).map_err(|_| VsmError::InvalidPhysicalAddress)?; - if let Some(last_range) = self.range.last() - && last_range.addr + last_range.len != addr - { - debug_serial_println!("Discontiguous address found {heki_range:?}"); - // NOTE: Intentionally not returning an error here. - // TODO: This should be an error once patch_info is fixed from VTL0 - // It will simplify patch_info and heki_range parsing as well - } - self.range.push(MemoryRange { - addr, - phys_addr, - len: heki_range.epa - heki_range.pa, - }); - Ok(()) - } - /// Write physical memory bytes from VTL0 specified in `HekiRange` at the specified virtual address #[inline] pub(crate) fn write_bytes_from_heki_range(&mut self) -> Result<(), MemoryContainerError> { @@ -1504,6 +1517,46 @@ impl MemoryContainer { } Ok(()) } + + pub(crate) fn from_vtl0_addr(va: u64, pa: u64, size: usize) -> Result { + let phys_start = PhysAddr::try_new(pa).map_err(|_| VsmError::InvalidPhysicalAddress)?; + if !phys_start.is_aligned(Size4KiB::SIZE) { + return Err(VsmError::InvalidPhysicalAddress); + } + let phys_end = + PhysAddr::try_new(pa + size as u64).map_err(|_| VsmError::InvalidPhysicalAddress)?; + let addr = VirtAddr::try_new(va).map_err(|_| VsmError::InvalidVirtualAddress)?; + let mut mem = Self { + range: vec![MemoryRange { + addr, + phys_addr: phys_start, + len: phys_end - phys_start, + }], + buf: Vec::new(), + }; + mem.write_vtl0_phys_bytes(phys_start, phys_end) + .map_err(|_| VsmError::Vtl0CopyFailed)?; + Ok(mem) + } + + pub(crate) fn extend_data_range(&mut self, range: &HekiDataRange) -> Result<(), VsmError> { + let addr = VirtAddr::try_new(range.va).map_err(|_| VsmError::InvalidVirtualAddress)?; + let phys_start = + PhysAddr::try_new(range.pa).map_err(|_| VsmError::InvalidPhysicalAddress)?; + + if let Some(last_range) = self.range.last() + && last_range.addr + last_range.len != addr + { + debug_serial_println!("Discontiguous address found {addr:#x}"); + // TODO: This should be an error once patch_info is fixed from VTL0 + } + self.range.push(MemoryRange { + addr, + phys_addr: phys_start, + len: range.epa - range.pa, + }); + Ok(()) + } } impl core::ops::Deref for MemoryContainer { @@ -1568,11 +1621,8 @@ impl KexecMemoryMetadata { } #[inline] - pub(crate) fn insert_heki_range(&mut self, heki_range: &HekiRange) { - let va = heki_range.va; - let pa = heki_range.pa; - let epa = heki_range.epa; - self.insert_memory_range(KexecMemoryRange::new(va, pa, epa)); + pub(crate) fn insert_data_range(&mut self, range: &HekiDataRange) { + self.insert_memory_range(KexecMemoryRange::new(range.va, range.pa, range.epa)); } #[inline] @@ -1773,57 +1823,6 @@ pub struct Symbol { _value: u64, } -impl Symbol { - /// Parse a symbol from a byte buffer. - pub fn from_bytes( - kinfo_start: usize, - start: VirtAddr, - bytes: &[u8], - ) -> Result<(String, Self), VsmError> { - let kinfo_bytes = &bytes[kinfo_start..]; - let ksym = HekiKernelSymbol::from_bytes(kinfo_bytes)?; - - let value_addr = start + mem::offset_of!(HekiKernelSymbol, value_offset) as u64; - let value = value_addr - .as_u64() - .wrapping_add_signed(i64::from(ksym.value_offset)); - - let name_offset = kinfo_start - + mem::offset_of!(HekiKernelSymbol, name_offset) - + usize::try_from(ksym.name_offset).map_err(|_| VsmError::SymbolNameOffsetInvalid)?; - - if name_offset >= bytes.len() { - return Err(VsmError::SymbolNameOffsetInvalid); - } - let name_len = bytes[name_offset..] - .iter() - .position(|&b| b == 0) - .ok_or(VsmError::SymbolNameNoTerminator)?; - if name_len >= HekiKernelSymbol::KSY_NAME_LEN { - return Err(VsmError::SymbolNameTooLong); - } - - // SAFETY: - // - offset is within bytes (checked above) - // - there is a NUL terminator within bytes[offset..] (checked above) - // - Length of name string is within spec range (checked above) - // - bytes is still valid for the duration of this function - let name_str = unsafe { - let name_ptr = bytes.as_ptr().add(name_offset).cast::(); - CStr::from_ptr(name_ptr) - }; - let name = CString::new( - name_str - .to_str() - .map_err(|_| VsmError::SymbolNameInvalidUtf8)?, - ) - .map_err(|_| VsmError::SymbolNameInvalidUtf8)?; - let name = name - .into_string() - .map_err(|_| VsmError::SymbolNameInvalidUtf8)?; - Ok((name, Symbol { _value: value })) - } -} pub struct SymbolTable { inner: spin::rwlock::RwLock>, } @@ -1842,41 +1841,92 @@ impl SymbolTable { } } - /// Build a symbol table from a memory container. pub fn build_from_container( &self, - start: VirtAddr, - end: VirtAddr, - mem: &MemoryContainer, - buf: &[u8], + sym_table: &MemoryContainer, + sym_str_table: &MemoryContainer, ) -> Result { - if mem.is_empty() { - return Err(VsmError::SymbolTableEmpty); + #[derive(FromBytes, KnownLayout, Immutable)] + #[repr(C)] + struct SymbolTable { + sym: [HekiKernelSymbol], } - let Some(range) = mem.get_range() else { - return Err(VsmError::SymbolTableEmpty); + let Some(sym_table_range) = sym_table.get_range() else { + return Err(VsmError::KernelSymbolTableNotFound); }; - if start < range.start || end > range.end { - return Err(VsmError::SymbolTableOutOfRange); - } - let kinfo_len: usize = (end - start).truncate(); - if !kinfo_len.is_multiple_of(HekiKernelSymbol::KSYM_LEN) { + if !sym_table.len().is_multiple_of(HekiKernelSymbol::KSYM_LEN) { return Err(VsmError::SymbolTableLengthInvalid); } - let mut kinfo_offset: usize = (start - range.start).truncate(); - let mut kinfo_addr = start; - let ksym_count = kinfo_len / HekiKernelSymbol::KSYM_LEN; + let ksym_count = sym_table.len() / HekiKernelSymbol::KSYM_LEN; let mut inner = self.inner.write(); inner.reserve(ksym_count); - for _ in 0..ksym_count { - let (name, sym) = Symbol::from_bytes(kinfo_offset, kinfo_addr, buf)?; - inner.insert(name, sym); - kinfo_offset += HekiKernelSymbol::KSYM_LEN; - kinfo_addr += HekiKernelSymbol::KSYM_LEN as u64; + let sym_table = SymbolTable::ref_from_bytes_with_elems(&sym_table[..], ksym_count) + .map_err(|_| VsmError::SymbolTableLengthInvalid)?; + let mut sym_table_addr = sym_table_range.start; + + for (i, ksym) in (sym_table.sym).iter().enumerate() { + let (name, sym) = Self::sym_from_bytes(ksym, sym_table_addr, sym_str_table)?; + if i < 2 { + debug_serial_println!("symbol:{name}"); + } + inner.insert(name, Symbol { _value: sym }); + sym_table_addr += HekiKernelSymbol::KSYM_LEN as u64; } Ok(0) } + + pub fn sym_from_bytes( + ksym: &HekiKernelSymbol, + addr: VirtAddr, + sym_str_table: &MemoryContainer, + ) -> Result<(String, u64), VsmError> { + let Some(sym_str_table_range) = sym_str_table.get_range() else { + return Err(VsmError::SymbolTableOutOfRange); + }; + + let bytes = &sym_str_table[..]; + let value_addr = addr + mem::offset_of!(HekiKernelSymbol, value_offset) as u64; + let value = value_addr + .as_u64() + .wrapping_add_signed(i64::from(ksym.value_offset)); + + let name_addr = addr + + mem::offset_of!(HekiKernelSymbol, name_offset) as u64 + + u64::try_from(ksym.name_offset).map_err(|_| VsmError::SymbolNameOffsetInvalid)?; + if name_addr < sym_str_table_range.start || name_addr >= sym_str_table_range.end { + return Err(VsmError::SymbolTableOutOfRange); + } + let name_offset = usize::try_from(name_addr - sym_str_table_range.start) + .map_err(|_| VsmError::SymbolNameOffsetInvalid)?; + let name_len = bytes[name_offset..] + .iter() + .position(|&b| b == 0) + .ok_or(VsmError::SymbolNameNoTerminator)?; + if name_len >= HekiKernelSymbol::KSY_NAME_LEN { + return Err(VsmError::SymbolNameTooLong); + } + + // SAFETY: + // - offset is within bytes (checked above) + // - there is a NUL terminator within bytes[offset..] (checked above) + // - Length of name string is within spec range (checked above) + // - bytes is still valid for the duration of this function + let name_str = unsafe { + let name_ptr = bytes.as_ptr().add(name_offset).cast::(); + CStr::from_ptr(name_ptr) + }; + let name = CString::new( + name_str + .to_str() + .map_err(|_| VsmError::SymbolNameInvalidUtf8)?, + ) + .map_err(|_| VsmError::SymbolNameInvalidUtf8)?; + let name = name + .into_string() + .map_err(|_| VsmError::SymbolNameInvalidUtf8)?; + Ok((name, value)) + } }