diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs index 3a550ed..8ac166c 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs @@ -8,6 +8,23 @@ #[repr(u8)] #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] #[rr::refined_by("page_size")] +// PEq +#[rr::derive_instantiate("PEq" := "λ a b, bool_decide (a = b)")] +// Eq +#[rr::derive_instantiate("PEq_refl" := #proof "intros ??; solve_goal")] +#[rr::derive_instantiate("PEq_sym" := #proof "intros ???; solve_goal")] +#[rr::derive_instantiate("PEq_trans" := #proof "intros ????; solve_goal")] +// POrd +#[rr::derive_instantiate("POrd" := "λ a b, Some (page_size_cmp a b)")] +#[rr::derive_instantiate("POrd_eq_cons" := #proof "intros ? [] []; simpl; done")] +// Ord +#[rr::derive_instantiate("Ord" := "page_size_cmp")] +#[rr::derive_instantiate("Ord_POrd" := #proof "intros ???; solve_goal")] +#[rr::derive_instantiate("Ord_lt_trans" := #proof "intros ????; solve_goal")] +#[rr::derive_instantiate("Ord_eq_trans" := #proof "intros ????; solve_goal")] +#[rr::derive_instantiate("Ord_gt_trans" := #proof "intros ????; solve_goal")] +#[rr::derive_instantiate("Ord_leibniz" := #proof "intros ? [] []; simpl; done")] +#[rr::derive_instantiate("Ord_antisym" := #proof "intros ???; solve_goal")] pub enum PageSize { #[rr::pattern("Size4KiB")] Size4KiB, diff --git a/security-monitor/src/core/memory_layout/confidential_memory_address.rs b/security-monitor/src/core/memory_layout/confidential_memory_address.rs index d9a6c8d..80681c9 100644 --- a/security-monitor/src/core/memory_layout/confidential_memory_address.rs +++ b/security-monitor/src/core/memory_layout/confidential_memory_address.rs @@ -6,7 +6,7 @@ use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset}; /// The wrapper over a raw pointer that is guaranteed to be an address located in the confidential memory region. #[repr(transparent)] -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone, Copy)] /// Model: The memory address is represented by the location in memory. #[rr::refined_by("l" : "loc")] /// We require the ghost state for the global memory layout to be available. @@ -70,12 +70,12 @@ impl ConfidentialMemoryAddress { /// Precondition: The offset address is in the given range. #[rr::requires("self.2 + offset_in_bytes < upper_bound.2")] /// Precondition: The maximum (and thus the offset address) is in the confidential memory range. - #[rr::requires("upper_bound.2 < MEMORY_CONFIG.(conf_end).2")] + #[rr::requires("upper_bound.2 ≤ MEMORY_CONFIG.(conf_end).2")] /// Postcondition: The offset pointer is in the confidential memory range. #[rr::ensures("ret = self +ₗ offset_in_bytes")] pub fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result { let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInConfidentialMemory())?; - Ok(ConfidentialMemoryAddress(pointer)) + Ok(Self::new(pointer)) } /// Reads usize-sized sequence of bytes from the confidential memory region. diff --git a/security-monitor/src/core/memory_layout/mod.rs b/security-monitor/src/core/memory_layout/mod.rs index ef16303..a66757b 100644 --- a/security-monitor/src/core/memory_layout/mod.rs +++ b/security-monitor/src/core/memory_layout/mod.rs @@ -149,10 +149,10 @@ impl MemoryLayout { /// Offsets an address in the confidential memory by a given number of bytes. Returns an error if the resulting /// address is outside the confidential memory region or exceeds the given upper bound. - #[rr::trust_me] + #[rr::only_spec] #[rr::ok] /// Precondition: The offset address is in confidential memory. - #[rr::requires("address.2 + offset_in_bytes < self.(conf_end).2")] + #[rr::requires("address.2 + offset_in_bytes < upper_bound.2")] /// Precondition: The bounds we are checking are within confidential memory. #[rr::requires("upper_bound.2 ≤ self.(conf_end).2")] /// Postcondition: Then we can correctly offset the address and ensure it is in confidential @@ -162,7 +162,7 @@ impl MemoryLayout { &self, address: &ConfidentialMemoryAddress, offset_in_bytes: usize, upper_bound: *const usize, ) -> Result { ensure!(upper_bound <= self.confidential_memory_end, Error::AddressNotInConfidentialMemory())?; - Ok(self.confidential_address_at_offset(address, offset_in_bytes)?) + Ok(address.add(offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInConfidentialMemory())?) } /// Offsets an address in the non-confidential memory by given number of bytes. Returns an error if the resulting @@ -199,8 +199,8 @@ impl MemoryLayout { // We can safely cast the below offset to usize because the constructor guarantees that the confidential memory // range is valid, and so the memory size must be a valid usize let memory_size = ptr_byte_offset(self.confidential_memory_end, self.confidential_memory_start) as usize; - let usize_alligned_offsets = (0..memory_size).step_by(core::mem::size_of::()); - usize_alligned_offsets.for_each(|offset_in_bytes| { + let usize_aligned_offsets = (0..memory_size).step_by(core::mem::size_of::()); + usize_aligned_offsets.for_each(|offset_in_bytes| { let _ = ptr_byte_add_mut(self.confidential_memory_start, offset_in_bytes, self.confidential_memory_end) .and_then(|ptr| Ok(unsafe { ptr.write_volatile(0) })); }); diff --git a/security-monitor/src/core/page_allocator/allocator.rs b/security-monitor/src/core/page_allocator/allocator.rs index c002abd..a663eb5 100644 --- a/security-monitor/src/core/page_allocator/allocator.rs +++ b/security-monitor/src/core/page_allocator/allocator.rs @@ -23,9 +23,8 @@ static PAGE_ALLOCATOR: Once> = Once::new(); /// token of the maximum size, and it will be then stored in the root node. It is reasonable as long as we do not support systems that have /// more memory than 128TiB. For such systems, we must add larger page sizes. /// Specification: -/// We give a "ghost name" γ to the allocator, which is used to link up page tokens allocated -/// with this allocator. #[rr::refined_by("()" : "unit")] +#[rr::context("onceG Σ memory_layout")] /// Invariant: We abstract over the root node #[rr::exists("node" : "page_storage_node")] /// Invariant: the allocator tree covers the first 128TiB of memory @@ -41,12 +40,12 @@ pub struct PageAllocator { root: PageStorageTreeNode, } -#[rr::skip] #[rr::context("onceG Σ memory_layout")] #[rr::context("onceG Σ unit")] impl PageAllocator { const NOT_INITIALIZED: &'static str = "Bug. Page allocator not initialized."; + #[rr::only_spec] /// Initializes the global memory allocator with the given memory region as confidential memory. Must be called only once during the /// system initialization. /// @@ -57,21 +56,18 @@ impl PageAllocator { /// # Safety /// /// Caller must pass the ownership of the memory region [memory_start, memory_end). - #[rr::params("conf_start", "conf_end", "vs", "MEMORY_LAYOUT")] - #[rr::args("conf_start", "conf_end")] - - /// Precondition: The start address needs to be aligned to the minimum page size. - #[rr::requires("Hstart_aligned" : "conf_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] - /// Precondition: The minimum page size divides the memory size. - #[rr::requires("Hsz_div" : "(page_size_in_bytes_nat Size4KiB) | (conf_end.2 - conf_start.2)")] - /// Precondition: The memory range should not be negative. - #[rr::requires("Hstart_lt" : "conf_start.2 ≤ conf_end.2")] + #[rr::params("vs", "MEMORY_CONFIG")] + /// Precondition: The start and end addresses need to be aligned to the minimum page size. + #[rr::requires("memory_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + #[rr::requires("memory_end `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + /// Precondition: The memory range is positive. + #[rr::requires("memory_start.2 < memory_end.2")] - /// Precondition: The memory range does not exceed the maximum page size. - #[rr::requires("mend.2 ≤ page_size_in_bytes_Z Size128TiB")] + /// Precondition: The memory range is within the region covered by the page allocator. + #[rr::requires("memory_end.2 ≤ page_size_in_bytes_Z Size128TiB")] - /// Precondition: We have ownership of the memory range, having (conf_end - conf_start) bytes. - #[rr::requires(#type "conf_start" : "vs" @ "array_t (int u8) (Z.to_nat (conf_end.2 - conf_start.2))")] + /// Precondition: We have ownership of the memory range, having (memory_end - memory_start) bytes. + #[rr::requires(#type "memory_start" : "vs" @ "array_t (Z.to_nat (memory_end.2 - memory_start.2)) (int u8)")] /// Precondition: The memory needs to be in confidential memory #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] @@ -80,10 +76,9 @@ impl PageAllocator { /// Precondition: The page allocator should be uninitialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" None")] - /// Postcondition: The page allocator is initialized. #[rr::ensures(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] - #[rr::returns("Ok(#())")] + #[rr::returns("Ok(())")] pub unsafe fn initialize(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result<(), Error> { ensure_not!(PAGE_ALLOCATOR.is_completed(), Error::Reinitialization())?; let mut page_allocator = Self::empty(); @@ -95,46 +90,92 @@ impl PageAllocator { /// Specification: /// Postcondition: an initialized memory allocator is returned - #[rr::returns("()")] + #[rr::verify] fn empty() -> Self { Self { root: PageStorageTreeNode::empty(), base_address: 0, page_size: PageSize::largest() } } - #[rr::params("mstart", "mend", "γ", "vs", "MEMORY_CONFIG", "mreg")] - #[rr::args("(#(), γ)", "mstart", "mend")] - - /// Precondition: The start address needs to be aligned to the minimum page size. - #[rr::requires("Hstart_aligned" : "mstart `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + /// Find the largest page size we can create a page at. + /// Precondition: The current address is aligned to the given page size. + #[rr::requires("Haddr_aligned" : "address `aligned_to` page_size_in_bytes_nat page_size")] + /// Precondition: The current address is in bounds of the given memory region. + #[rr::requires("address.2 < memory_region_end.2")] + /// Precondition: The current address and the bound are in bounds of the global memory layout. + #[rr::requires("memory_layout.(conf_start).2 ≤ address.2")] + #[rr::requires("memory_region_end.2 ≤ memory_layout.(conf_end).2")] + /// Postcondition: The current address is aligned to the returned page size. + #[rr::ensures("address `aligned_to` page_size_in_bytes_nat ret")] + /// Postcondition: Either the returned page size is the smallest page size, or a page at the + /// returned size will fit into the memory region. + #[rr::ensures("ret = Size4KiB ∨ address.2 + page_size_in_bytes_nat ret ≤ memory_region_end.2")] + fn find_largest_page_size( + memory_layout: &MemoryLayout, mut page_size: PageSize, address: ConfidentialMemoryAddress, memory_region_end: *const usize, + ) -> PageSize { + // Let's find the largest possible size of a page that could align to this address. + // According to the RISC-V spec, pages must be aligned to their size. + while let Some(larger_size) = page_size.larger().filter( + #[rr::trust_me] + #[rr::returns("bool_decide ({address} `aligned_to` page_size_in_bytes_nat larger_size)")] + |larger_size| address.is_aligned_to(larger_size.in_bytes()), + ) { + #[rr::inv_vars("page_size")] + #[rr::inv("address `aligned_to` page_size_in_bytes_nat page_size")] + #[rr::ignore] + #[allow(unused)] + || {}; + page_size = larger_size; + } + // After the loop, we have the largest possible `page_size` for which the `address` is + // well-aligned. + + // Now let's find the largest size of a page that really fits in the given memory region. We do not have to check the alignment, + // because the larger pages sizes are multiples of the smaller page sizes. + while memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes() - 1, memory_region_end).is_err() + && let Some(smaller_size) = page_size.smaller() + { + #[rr::inv_vars("page_size")] + #[rr::inv("address `aligned_to` page_size_in_bytes_nat page_size")] + #[rr::ignore] + #[allow(unused)] + || {}; + page_size = smaller_size; + } + // After the loop, we know that either no smaller page size exists, or a page at `page_size` fits in the memory region. - /// Precondition: The minimum page size divides the memory size. - #[rr::requires("Hsz_div" : "(page_size_in_bytes_nat Size4KiB) | (mend.2 - mstart.2)")] + page_size + } + #[rr::params("vs", "MEMORY_CONFIG")] + /// Precondition: The start and end addresses need to be aligned to the minimum page size. + #[rr::requires("Hstart_aligned" : "memory_region_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + #[rr::requires("Hend_aligned" : "memory_region_end `aligned_to` (page_size_in_bytes_nat Size4KiB)")] /// Precondition: The memory range is positive. - #[rr::requires("Hstart_lt" : "mstart.2 < mend.2")] + #[rr::requires("Hstart_lt" : "memory_region_start.2 < memory_region_end.2")] /// Precondition: The memory range is within the region covered by the memory allocator. - #[rr::requires("mend.2 ≤ page_size_in_bytes_Z Size128TiB")] + #[rr::requires("memory_region_end.2 ≤ page_size_in_bytes_Z Size128TiB")] /// Precondition: We have ownership of the memory range, having (mend - mstart) bytes. - #[rr::requires(#type "mstart" : "vs" @ "array_t (int u8) (Z.to_nat (mend.2 - mstart.2))")] + #[rr::requires(#type "memory_region_start" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.2 - memory_region_start.2)) (int u8)")] /// Precondition: The memory layout needs to be initialized #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Precondition: the whole memory region should be part of confidential memory - #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ mstart.2")] - #[rr::requires("mend.2 ≤ MEMORY_CONFIG.(conf_end).2")] + #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ memory_region_start.2")] + #[rr::requires("memory_region_end.2 ≤ MEMORY_CONFIG.(conf_end).2")] /// Postcondition: There exists some correctly initialized page assignment. - #[rr::observe("γ": "()")] + #[rr::observe("self.ghost": "()")] + #[rr::returns("Ok(())")] unsafe fn add_memory_region( &mut self, memory_region_start: ConfidentialMemoryAddress, memory_region_end: *const usize, ) -> Result<(), Error> { - debug!("Memory tracker: adding memory region: 0x{:x} - 0x{:x}", memory_region_start.as_usize(), memory_region_end as usize); + //debug!("Memory tracker: adding memory region: 0x{:x} - 0x{:x}", memory_region_start.as_usize(), memory_region_end as usize); assert!(memory_region_start.is_aligned_to(PageSize::smallest().in_bytes())); assert!(memory_region_end.is_aligned_to(PageSize::smallest().in_bytes())); assert!(memory_region_start.as_usize() < memory_region_end as usize); - // Page allocator supports maximum one page of largest size. - ensure_not!(memory_region_start.offset_from(memory_region_end) > self.page_size.in_bytes() as isize, Error::TooMuchMemory())?; + // The page allocator can only handle memory regions up to the largest page size. + ensure_not!(memory_region_end as usize > self.page_size.in_bytes(), Error::TooMuchMemory())?; // Our strategy is to create as few page tokens as possible to keep the memory overhead as low as possible. Therefore, we prefer to // create page tokens for the largest page size when possible. We use a greedy approach. We look for the largest possible page that @@ -142,7 +183,7 @@ impl PageAllocator { // keep increasing it until we find the largest possible page size. Then, we keep decreasing the page size until we reach the end of // the memory region. let memory_layout = MemoryLayout::read(); - let mut memory_address = Some(memory_region_start); + let mut address = memory_region_start; let mut page_size = PageSize::smallest(); // We might have to create a few tokens of 4KiB until we reach the address at which we can fit a 2MiB page. Then, we might have to @@ -162,53 +203,48 @@ impl PageAllocator { // || | | | | | | | || | | | | | | | || | | | | | | | || | | | | | | | || ... // ^1 GiB ^2 MiB ^2 MiB ^2 MiB ^memory_region_end - // According to the RISC-V spec, pages must be aligned to their size. - let is_address_page_aligned = - |address: &ConfidentialMemoryAddress, page_size: &PageSize| address.is_aligned_to(page_size.in_bytes()); - - // Page can be created only if all bytes are belonging to the given memory region - let can_create_page = |address: &ConfidentialMemoryAddress, page_size: &PageSize| { - let page_last_address = page_size.in_bytes() - 1; - memory_layout.confidential_address_at_offset_bounded(&address, page_last_address, memory_region_end).is_ok() - }; - - while let Some(address) = memory_address.take() { - // Let's find the largest possible size of a page that could align to this address. - while let Some(larger_size) = page_size.larger().filter(|larger_size| is_address_page_aligned(&address, &larger_size)) { - page_size = larger_size; - } - // Now let's find the largest size of a page that really fits in the given memory region. We do not have to check the alignment, - // because the larger pages sizes are multiplies of the smaller page sizes. - while let Some(smaller_size) = page_size.smaller().filter(|smaller_size| !can_create_page(&address, &smaller_size)) { - page_size = smaller_size; - } - // The following line ensures that the while loop will complete because, regardless of whether we manage to create a page token - // or not, we will increment the `memory_address` in each loop so that it eventually passes the end of the given memory region. - memory_address = memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes(), memory_region_end).ok(); - // If the next memory address (`memory_address`) is still in the memory range, then we are sure we can create the page token. - // Otherwise, we must check the boundary condition: Are we creating the last page token over a memory whose last byte - // (`address`+`page_size.in_bytes()`) is next to the end of the memory region (`memory_region_end`)? - if memory_address.is_some() || can_create_page(&address, &page_size) { - let new_page_token = unsafe { Page::::init(address, page_size.clone()) }; + loop { + #[rr::inv_vars("page_size", "address")] + #[rr::inv("address `aligned_to` page_size_in_bytes_nat page_size")] + #[rr::inv("address.2 < memory_region_end.2")] + // Invariant: Remaining memory ownership + #[rr::exists("vs")] + #[rr::inv(#type "address" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.2 - address.2)) (int u8)")] + #[rr::ignore] + #[allow(unused)] + || {}; + + // get the largest well-aligned address that fits into the memory region + page_size = Self::find_largest_page_size(memory_layout, page_size, address, memory_region_end); + + // Check that we can actually create the page + if memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes() - 1, memory_region_end).is_ok() { + let new_page_token = unsafe { Page::::init(address, page_size) }; // NOTE: We show that the page token is within the range of the allocator self.root.store_page_token(self.base_address, self.page_size, new_page_token); } + + // The following line ensures that the while loop will progress because, regardless of whether we manage to create a page token + // or not, we will increment the `memory_address` in each loop so that it eventually passes the end of the given memory region. + let Ok(new_addr) = memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes(), memory_region_end) + else { + // we passed beyond the memory region + break; + }; + address = new_addr; } Ok(()) } + #[rr::only_spec] /// Returns a page token that has ownership over an unallocated memory region of the requested size. Returns error if it could not /// obtain write access to the global instance of the page allocator or if there are not enough page tokens satisfying the requested /// criteria. - /// Specification: - #[rr::params("sz")] - #[rr::args("sz")] /// Precondition: We require the page allocator to be initialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] /// Postcondition: If a page is returned, it has the right size. - #[rr::exists("res")] - #[rr::ensures("if_Ok (λ pg, pg.(page_sz) = sz)")] - #[rr::returns("<#>@{result} res")] + #[rr::ok] + #[rr::ensures("ret.(page_sz) = page_size_to_allocate")] pub fn acquire_page(page_size_to_allocate: PageSize) -> Result, Error> { Self::try_write(|page_allocator| { let base_address = page_allocator.base_address; @@ -217,25 +253,35 @@ impl PageAllocator { })? } + #[rr::only_spec] /// Consumes the page tokens given by the caller, allowing for their further acquisition. This is equivalent to deallocation of the /// physical memory region owned by the returned page tokens. Given vector of pages might contains pages of arbitrary sizes. - #[rr::params("pages")] - #[rr::args("<#> pages")] /// Precondition: We require the page allocator to be initialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] pub fn release_pages(released_pages: Vec>) { let _ = Self::try_write(|page_allocator| { let base_address = page_allocator.base_address; let page_size = page_allocator.page_size; - released_pages.into_iter().for_each(|page_token| { - // NOTE: we show that the token is within range of the allocator. - page_allocator.root.store_page_token(base_address, page_size, page_token); - }); + let root_node = &mut page_allocator.root; + for page_token in released_pages { + #[rr::params("γ")] + #[rr::inv_vars("root_node", "page_size", "base_address")] + #[rr::inv("base_address = 0%Z")] + #[rr::inv("page_size = Size128TiB")] + #[rr::inv("root_node.ghost = γ")] + #[rr::ignore] + #[allow(unused)] + || {}; + // NOTE: we show that the token is within range of the allocator, using the + // invariant of the page token. + root_node.store_page_token(base_address, page_size, page_token); + } Ok(()) }) .inspect_err(|_| debug!("Memory leak: failed to store released pages in the page allocator")); } + #[rr::skip] /// returns a mutable reference to the PageAllocator after obtaining a lock on the mutex fn try_write(op: O) -> Result where O: FnOnce(&mut RwLockWriteGuard<'static, PageAllocator>) -> Result { @@ -246,10 +292,7 @@ impl PageAllocator { /// A node of a tree data structure that stores page tokens and maintains additional metadata that simplifies acquisition and /// release of page tokens. /// Specification: -/// A node is refined by the size of this node, -/// its base address, -/// and the logical allocation state. -// TODO: consider using separate public and private interfaces +#[rr::context("onceG Σ memory_layout")] #[rr::refined_by("node" : "page_storage_node")] /// We abstract over the components stored here #[rr::exists("max_sz" : "option page_size")] @@ -257,22 +300,24 @@ impl PageAllocator { #[rr::exists("children" : "list page_storage_node")] /// Invariant: The representation invariant linking all these things together holds. #[rr::invariant("page_storage_node_invariant node max_sz maybe_page_token children")] +/// Invariant: The whole address space of this node is in addressable memory. +#[rr::invariant("INV_IN_RANGE": "node.(base_address) + (page_size_in_bytes_nat node.(max_node_size)) ≤ MaxInt usize")] struct PageStorageTreeNode { // Page token owned by this node. `None` means that this page token has already been allocated or that it has been divided into smaller // pages token that were stored in this node's children. - #[rr::field("<#>@{option} maybe_page_token")] + #[rr::field("<#>@{{ option }} maybe_page_token")] page_token: Option>, // Specifies what size of the page token can be allocated by exploring the tree starting at this node. // Invariant: if page token exist, then the its size is the max allocable size. Otherwise, the max allocable page size is the max // allocable page size of children - #[rr::field("<#>@{option} max_sz")] + #[rr::field("<#>@{{ option }} max_sz")] max_allocable_page_size: Option, // Invariant: Children store page tokens smaller than the page token stored in the parent node #[rr::field("<#> children")] children: Vec, } -#[rr::skip] +#[rr::context("onceG Σ memory_layout")] impl PageStorageTreeNode { /// Creates a new empty node with no allocation. /// Specification: @@ -280,8 +325,11 @@ impl PageStorageTreeNode { #[rr::params("node_size", "base_address")] /// Precondition: the base address needs to be suitably aligned. #[rr::requires("(page_size_align node_size | base_address)%Z")] + /// Precondition: the whole memory range is in addressable memory. + #[rr::requires("base_address + (page_size_in_bytes_nat node_size) ≤ MaxInt usize")] + /// Postcondition: we get a node with no available pages. #[rr::returns("mk_page_node node_size base_address PageTokenUnavailable false")] - pub fn empty() -> Self { + fn empty() -> Self { Self { page_token: None, max_allocable_page_size: None, children: vec![] } } @@ -289,74 +337,78 @@ impl PageStorageTreeNode { /// token that can be allocated from this node. This method has the max depth of recusrive invocation equal to the number of /// PageSize variants. This method has an upper bounded computation complexity. /// - /// Invariant: page token's size must not be greater than the page size allocable at this node - #[rr::params("node", "tok", "γ")] - #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)", "tok")] - - /// Precondition: The token we store has a smaller size than the node. - #[rr::requires("tok.(page_sz) ≤ₚ node.(max_node_size)")] - + /// Precondition: The size and base address arguments match the logical state. + #[rr::requires("this_node_base_address = self.cur.(base_address)")] + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Precondition: The token we store has a smaller-or-equal size than the node. + #[rr::requires("Hsz_le" : "page_token.(page_sz) ≤ₚ self.cur.(max_node_size)")] /// Precondition: The page token is within the range of the node. - #[rr::requires("page_within_range node.(base_address) node.(max_node_size) tok")] - #[rr::exists("available_sz" : "page_size")] - /// Postcondition: we obtain an available page size... - #[rr::returns("available_sz")] - /// Postcondition: ...which is at least the size that we stored. - #[rr::ensures("tok.(page_sz) ≤ₚ available_sz")] + #[rr::requires("Hrange" : "page_within_range this_node_base_address this_node_page_size page_token")] + /// Postcondition: The now available size is at least the size that we stored. + #[rr::ensures("page_token.(page_sz) ≤ₚ ret")] /// Postcondition: This node has been changed... #[rr::exists("node'")] - #[rr::observe("γ": "node'")] - /// ...and now it can allocate a page of size `available_sz` - #[rr::ensures("page_node_can_allocate node' = Some available_sz")] + #[rr::observe("self.ghost": "node'")] + /// ...and now it can allocate a page of the size given by the return value + #[rr::ensures("page_node_can_allocate node' = Some ret")] + /// ...which is at least the size of the token that was stored + #[rr::ensures("page_token.(page_sz) ≤ₚ ret")] /// ...while the rest is unchanged - #[rr::ensures("node'.(max_node_size) = node.(max_node_size)")] - #[rr::ensures("node'.(base_address) = node.(base_address)")] - pub fn store_page_token( + #[rr::ensures("node'.(max_node_size) = self.cur.(max_node_size)")] + #[rr::ensures("node'.(base_address) = self.cur.(base_address)")] + fn store_page_token( &mut self, this_node_base_address: usize, this_node_page_size: PageSize, page_token: Page, ) -> PageSize { - assert!(this_node_page_size >= *page_token.size()); - if &this_node_page_size == page_token.size() { + assert!(this_node_page_size >= page_token.size()); + if this_node_page_size == page_token.size() { // End of recursion, we found the node that can store the page token. assert!(this_node_base_address == page_token.start_address()); - assert!(&this_node_page_size == page_token.size()); - assert!(self.page_token.is_none()); + assert!(this_node_page_size == page_token.size()); + self.store_page_token_in_this_node(page_token); } else { assert!(this_node_page_size.smaller().is_some()); self.initialize_children_if_needed(this_node_page_size); // Calculate which child should we invoke recursively. - let index = self.calculate_child_index(this_node_base_address, this_node_page_size, &page_token); + let index = Self::calculate_child_index(this_node_base_address, this_node_page_size, &page_token); // Let's go recursively to the node where this page belongs to. let (child_base_address, child_page_size) = self.child_address_and_size(this_node_base_address, this_node_page_size, index); - let allocable_page_size = self.children[index].store_page_token(child_base_address, child_page_size, page_token); + let allocable_page_size = + vec_index_mut(&mut self.children, index).store_page_token(child_base_address, child_page_size, page_token); // We are coming back from the recursion. - self.try_to_merge_page_tokens(this_node_page_size); - if Some(allocable_page_size) > self.max_allocable_page_size { - // Some children can allocate page tokens of a size larger than the page token we stored because they could have been - // merged. - self.max_allocable_page_size = Some(allocable_page_size); + // Update the size, in case the recursion increased the allocable size. + self.max_allocable_page_size = self.max_allocable_page_size.max(Some(allocable_page_size)); + // In case this token became mergeable, merge. + // Safety: The children are initialized. + unsafe { + self.try_to_merge_page_tokens(this_node_page_size); } } self.max_allocable_page_size.unwrap() } + #[rr::only_spec] /// Recursively traverses the tree to reach a node that contains the page token of the requested size and returns this page token. This /// function returns also a set of page size that are not allocable anymore at the node. This method has the max depth of recusrive /// invocation equal to the number of PageSize variants. This method has an upper bounded computation complexity. /// - /// Invariants: requested page size to acquire must not be greater than a page size allocable at this node. - #[rr::trust_me] - #[rr::params("node", "γ", "sz_to_acquire")] - #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)", "sz_to_acquire")] - /// Precondition: The desired size must be bounded by this node's size (otherwise something went wrong) - #[rr::requires("sz_to_acquire ≤ₚ node.(max_node_size)")] - /// Postcondition: the function can either succeed to allocate a page or fail - #[rr::exists("res" : "result page _")] - #[rr::returns("res")] - /// If it succeeds, the returned page has the desired size. - #[rr::ensures("if_Ok res (λ pg, pg.(page_sz) = sz_to_acquire)")] - pub fn acquire_page_token( + /// Precondition: The size and base address arguments match the logical state. + #[rr::requires("this_node_base_address = self.cur.(base_address)")] + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Postcondition: This node has been changed... + #[rr::exists("node'")] + #[rr::observe("self.ghost": "node'")] + /// Postcondition: the size and base address remain unchanged + #[rr::ensures("node'.(max_node_size) = self.cur.(max_node_size)")] + #[rr::ensures("node'.(base_address) = self.cur.(base_address)")] + /// Postcondition: The function succeeds and returns a page iff the allocable page size of the + /// node is at least the required page size. + #[rr::ok] + #[rr::requires("∃ sz, page_node_can_allocate self.cur = Some sz ∧ page_size_to_acquire ≤ₚ sz")] + /// Postcondition: if it succeeds, the returned page has the desired size. + #[rr::ensures("ret.(page_sz) = page_size_to_acquire")] + fn acquire_page_token( &mut self, this_node_base_address: usize, this_node_page_size: PageSize, page_size_to_acquire: PageSize, ) -> Result, Error> { ensure!(self.max_allocable_page_size >= Some(page_size_to_acquire), Error::OutOfPages())?; @@ -365,7 +417,7 @@ impl PageStorageTreeNode { assert!(self.page_token.is_some()); let page_token = self.acquire_page_token_from_this_node(); assert!(this_node_base_address == page_token.start_address()); - assert!(&this_node_page_size == page_token.size()); + assert!(this_node_page_size == page_token.size()); Ok(page_token) } else { // We are too high in the tree, i.e., the current level represents page size allocations that are larger than the page size @@ -379,70 +431,88 @@ impl PageStorageTreeNode { if self.page_token.is_some() { self.divide_page_token(this_node_base_address, this_node_page_size); } - // Let's get the index of the first child that has requested allocation. It might be that there is no child that can has the - // required allocation. In such a case, we return an error. - let index = self - .children - .iter() - .position(|node| node.max_allocable_page_size >= Some(page_size_to_acquire)) - .ok_or(Error::OutOfPages())?; + + // Let's get the index of the first child that has requested allocation. + // This will succeed because we already checked above that the desired size can be acquired. + let index = vec_iter(&self.children) + .position( + #[rr::skip] + #[rr::returns("bool_decide (page_node_can_allocate )")] + |node| node.max_allocable_page_size >= Some(page_size_to_acquire), + ) + .unwrap(); + let (child_base_address, child_page_size) = self.child_address_and_size(this_node_base_address, this_node_page_size, index); // Invoke recursively this function to traverse to a node containing a page token of the requested size. // The below unwrap is ok because we found an index of a node that has requested allocation. - let page_token = self.children[index].acquire_page_token(child_base_address, child_page_size, page_size_to_acquire).unwrap(); + let page_token = vec_index_mut(&mut self.children, index) + .acquire_page_token(child_base_address, child_page_size, page_size_to_acquire) + .unwrap(); // Let's refresh information about the largest allocable page size available in children. - self.max_allocable_page_size = self.children.iter().map(|child| child.max_allocable_page_size).max().flatten(); + self.max_allocable_page_size = vec_iter(&self.children) + .map( + #[rr::skip] + #[rr::returns("page_node_can_allocate child")] + |child| child.max_allocable_page_size, + ) + .max() + .flatten(); Ok(page_token) } } + #[rr::only_spec] /// Creates children for the given node because the node gets created with an empty list of children, expecting that children will be /// created lazily with this function. /// - /// Invariant: This node has no children - /// Outcome: - /// - There is one child for every possible smaller page size that can be created for this node, - /// - Every child is empty, i.e., has no children, no page token, no possible allocation. - #[rr::params("node", "γ")] - #[rr::args("(#node, γ)", "node.(max_node_size)")] - /// Postcondition: leaves the page node unchanged except for initializing the - /// children if necessary - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) node.(allocation_state) true")] + /// Precondition the page size argument has to match the node's logical state. + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Postcondition: leaves the page node unchanged except for initializing the children if necessary + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) self.cur.(allocation_state) true")] fn initialize_children_if_needed(&mut self, this_node_page_size: PageSize) { if self.children.is_empty() { - self.children = (0..this_node_page_size.number_of_smaller_pages()).map(|_| Self::empty()).collect(); + self.children = (0..this_node_page_size.number_of_smaller_pages()) + .map( + // I think to handle this well I'll need invariants on closures. + // i.e., the address and so on need to become logical components of the type (even + // though they don't have a physical representation) + #[rr::skip] + #[rr::params("base_address", "node_size")] + #[rr::requires("(page_size_align node_size | base_address)%Z")] + #[rr::returns("mk_page_node node_size base_address PageTokenUnavailable false")] + |_| Self::empty(), + ) + .collect(); } } /// Stores the given page token in the current node. /// /// Invariant: if there is page token then all page size equal or lower than the page token are allocable from this node. - #[rr::params("node", "γ", "tok")] - #[rr::args("(#node, γ)", "tok")] - /// Precondition: the node is in the unavailable state - #[rr::requires("node.(allocation_state) = PageTokenUnavailable")] + /// /// Precondition: the token's size is equal to this node's size - #[rr::requires("node.(max_node_size) = tok.(page_sz)")] + #[rr::requires("self.cur.(max_node_size) = page_token.(page_sz)")] /// Precondition: the token's address matches the node's address - #[rr::requires("node.(base_address) = tok.(page_loc).2")] + #[rr::requires("self.cur.(base_address) = page_token.(page_loc).2")] /// Postcondition: the node changes to the available state - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) PageTokenAvailable node.(children_initialized)")] + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) PageTokenAvailable self.cur.(children_initialized)")] fn store_page_token_in_this_node(&mut self, page_token: Page) { - assert!(self.page_token.is_none()); - self.max_allocable_page_size = Some(*page_token.size()); + // TODO: add back the assert! + //assert!(self.page_token.is_none()); + self.max_allocable_page_size = Some(page_token.size()); self.page_token = Some(page_token); } /// Returns an ownership of a page token that has been stored in this node. /// - /// Invariant: This node owns a page token - /// Invariant: After returning the page token, this node does not own the page token and has no allocation - #[rr::params("node", "γ")] - #[rr::args("(#node, γ)")] - #[rr::requires("node.(allocation_state) = PageTokenAvailable")] - #[rr::exists("tok")] - #[rr::returns("tok")] - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) PageTokenUnavailable node.(children_initialized)")] + /// Precondition: The token in this node is fully available. + #[rr::requires("Hstate" : "self.cur.(allocation_state) = PageTokenAvailable")] + /// Postcondition: The returned token's size matches the node's size. + #[rr::ensures("ret.(page_sz) = self.cur.(max_node_size)")] + /// Postcondition: The returned token's address matches the node's address. + #[rr::ensures("ret.(page_loc).2 = self.cur.(base_address)")] + /// Postcondition: The node has been updated to the unavailable state. + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) PageTokenUnavailable self.cur.(children_initialized)")] fn acquire_page_token_from_this_node(&mut self) -> Page { assert!(self.page_token.is_some()); self.max_allocable_page_size = None; @@ -450,86 +520,111 @@ impl PageStorageTreeNode { } /// Divides the page token into smaller page tokens. - /// - /// Invariant: This node owns a page token - /// Invariant: After returning, this node can allocate pages of lower page sizes than the original page token. - /// Invariant: After returning, every child node owns a page token of smaller size than the original page token. - /// Invariant: After returning, every child can allocate a page token of smaller size than the original page token. - #[rr::params("node", "γ", "smaller_size")] - #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)")] - /// Precondition: The children should be initialized. - #[rr::requires("node.(children_initialized)")] + #[rr::params("smaller_size", "memly" : "memory_layout")] + /// Precondition: The global memory layout is initialized. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some memly)")] + /// Precondition: The argument base address and size match the node's logical state. + #[rr::requires("self.cur.(base_address) = this_node_base_address")] + #[rr::requires("self.cur.(max_node_size) = this_node_page_size")] + /// Precondition: The children are initialized. + #[rr::requires("self.cur.(children_initialized)")] /// Precondition: A token is available in the current node. - #[rr::requires("node.(allocation_state) = PageTokenAvailable")] - /// Precondition: We assume there is a smaller node size. - #[rr::requires("page_size_smaller node.(max_node_size) = Some smaller_size")] + #[rr::requires("self.cur.(allocation_state) = PageTokenAvailable")] + /// Precondition: There is a smaller node size. + #[rr::requires("page_size_smaller self.cur.(max_node_size) = Some smaller_size")] /// Postcondition: The node is updated to the "partially available" state, with a smaller node size being allocable - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) (PageTokenPartiallyAvailable smaller_size) true")] + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) (PageTokenPartiallyAvailable smaller_size) true")] fn divide_page_token(&mut self, this_node_base_address: usize, this_node_page_size: PageSize) { assert!(self.page_token.is_some()); - let mut smaller_pages = self.page_token.take().unwrap().divide(); + let smaller_pages = self.page_token.take().unwrap().divide(); assert!(smaller_pages.len() == self.children.len()); - smaller_pages.drain(..).for_each(|smaller_page_token| { - let index = self.calculate_child_index(this_node_base_address, this_node_page_size, &smaller_page_token); - self.children[index].store_page_token_in_this_node(smaller_page_token); - }); - let smaller_size = self.max_allocable_page_size.unwrap().smaller().unwrap(); + + let children = &mut self.children; + for smaller_page_token in smaller_pages { + #[rr::params("γ", "children_init")] + #[rr::inv_vars("children")] + #[rr::inv("children.ghost = γ")] + #[rr::inv( + "children.cur = ((λ node, page_node_update_state node PageTokenAvailable) <$> (take (length {Hist}) children_init)) ++ drop (length {Hist}) children_init" + )] + #[rr::ignore] + #[allow(unused)] + || {}; + let index = Self::calculate_child_index(this_node_base_address, this_node_page_size, &smaller_page_token); + vec_index_mut(children, index).store_page_token_in_this_node(smaller_page_token); + } + let smaller_size = this_node_page_size.smaller().unwrap(); self.max_allocable_page_size = Some(smaller_size); } + #[rr::only_spec] /// Merges page tokens owned by children. + /// Safety: Requires that all children have been initialized. /// - /// Invariant: after merging, there is no child that owns a page token - /// Invariant: after merging, this node owns a page token that has size larger than the ones owned before by children. - #[rr::params("node", "γ")] - #[rr::args("(#node, γ)", "node.(max_node_size)")] - #[rr::requires("node.(children_initialized)")] + /// Precondition: The children are initialized. + #[rr::requires("self.cur.(children_initialized)")] + /// Precondition: the argument page size matches the node's page size + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Postcondition: this node has been updated to a new state. #[rr::exists("state'")] - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) state' true")] - #[rr::ensures("state' = node.(allocation_state) ∨ state' = PageTokenAvailable")] - fn try_to_merge_page_tokens(&mut self, this_node_page_size: PageSize) { - if self.children.iter().all(|child| child.page_token.is_some()) { + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) state' true")] + /// Postcondition: the state is either unchanged, or the whole node is available now. + #[rr::ensures("state' = self.cur.(allocation_state) ∨ state' = PageTokenAvailable")] + unsafe fn try_to_merge_page_tokens(&mut self, this_node_page_size: PageSize) { + if self.children.iter().all( + #[rr::skip] + #[rr::returns("bool_decide (child.(allocation_state) = PageTokenAvailable)")] + |child| child.page_token.is_some(), + ) { // All children have page tokens, thus we can merge them. - let pages_to_merge = self.children.iter_mut().map(|child| child.acquire_page_token_from_this_node()).collect(); + let pages_to_merge = self.children.iter_mut().map( + #[rr::skip] + #[rr::requires("child.cur.(allocation_state) = PageTokenAvailable")] + #[rr::ensures("ret.(page_sz) = child.cur.(max_node_size)")] + #[rr::ensures("ret.(page_loc).2 = child.cur.(base_address)")] + #[rr::observe("child.ghost": "mk_page_node child.cur.(max_node_size) child.cur.(base_address) PageTokenUnavailable child.cur.(children_initialized)")] + |child| child.acquire_page_token_from_this_node() + ).collect(); + // Safety: Safe, because all children are initialized and have a page token available. self.store_page_token_in_this_node(unsafe { Page::merge(pages_to_merge, this_node_page_size) }); self.max_allocable_page_size = Some(this_node_page_size); } } /// Returns the index of a child that can store the page token. - /// - /// Invariant: children have been created - #[rr::skip] - #[rr::args(#raw "#(-[ #maybe_tok; #available_sz; #children])", "base_addr", "node_sz", "#tok")] - /// Precondition: The children need to have been initialized. - #[rr::requires("length children = page_size_multiplier node_sz")] + #[rr::params("child_node_sz")] /// Precondition: There is a smaller page size. - #[rr::requires("page_size_smaller node_sz = Some child_node_size")] + #[rr::requires("Hchild": "page_size_smaller this_node_page_size = Some child_node_sz")] /// Precondition: The token's page size is smaller than this node's size. - #[rr::requires("tok.(page_sz) ≤ₚ child_node_size")] + #[rr::requires("Hsz_lt": "page_token.(page_sz) ≤ₚ child_node_sz")] /// Precondition: The token is within the range of this node. - #[rr::requires("page_within_range base_addr node_sz tok")] - #[rr::exists("i")] - #[rr::returns("i")] - #[rr::ensures("is_Some (children !! i)")] - // TODO: the token is in the range of the child node. - // TODO: does not work at this level of abstraction. Use a raw specification. - fn calculate_child_index(&self, this_node_base_address: usize, this_node_page_size: PageSize, page_token: &Page) -> usize { - assert!(this_node_page_size > *page_token.size()); + #[rr::requires("Hrange": "page_within_range this_node_base_address this_node_page_size page_token")] + /// Postcondition: such that there is a matching child + #[rr::ensures("ret < page_size_multiplier this_node_page_size")] + /// Postcondition: The page is within the range of the child node. + #[rr::ensures("page_within_range (this_node_base_address + ret * page_size_in_bytes_Z child_node_sz) child_node_sz page_token")] + fn calculate_child_index(this_node_base_address: usize, this_node_page_size: PageSize, page_token: &Page) -> usize { + assert!(this_node_page_size > page_token.size()); let index = (page_token.start_address() - this_node_base_address) / this_node_page_size.smaller().unwrap().in_bytes(); - assert!(index < self.children.len()); index } /// Returns the base address and the page size of the child at the given index /// /// Invariant: children have been created - #[rr::params("node", "child_index", "child_node_size")] - #[rr::args("#node", "node.(base_address)", "node.(max_node_size)", "child_index")] - #[rr::requires("node.(children_initialized)")] - #[rr::requires("page_size_smaller node.(max_node_size) = Some child_node_size")] - #[rr::returns("-[child_base_address node.(base_address) child_node_size child_index; child_node_size] ")] + #[rr::params("child_node_size")] + /// Precondition: The base address and page size arguments match the node's logical state. + #[rr::requires("this_node_base_address = self.(base_address)")] + #[rr::requires("this_node_page_size = self.(max_node_size)")] + /// The children are initialized + #[rr::requires("H_child_init": "self.(children_initialized)")] + /// There exist children + #[rr::requires("page_size_smaller self.(max_node_size) = Some child_node_size")] + /// Precondition: the child index is in range + #[rr::requires("Hindex": "index < page_size_multiplier self.(max_node_size)")] + /// Postcondition: Returns a tuple of the child's base address and its size. + #[rr::returns("*[child_base_address self.(base_address) child_node_size index; child_node_size] ")] fn child_address_and_size(&self, this_node_base_address: usize, this_node_page_size: PageSize, index: usize) -> (usize, PageSize) { assert!(index < self.children.len()); assert!(this_node_page_size.smaller().is_some()); @@ -538,3 +633,28 @@ impl PageStorageTreeNode { (child_base_address, child_page_size) } } + +/// These wrappers serve as a temporary workaround until RefinedRust supports unsized types and in +/// particular slices: the indexing and iteration methods on `Vec` work by dereferencing to slices, +/// which currently are not supported by RefinedRust. +/// For now, we thus create wrappers for these methods to which we can attach RefinedRust +/// specifications. +mod wrappers { + use alloc::vec::Vec; + + #[rr::only_spec] + #[rr::requires("index < length x.cur")] + #[rr::exists("γi")] + #[rr::returns("(x.cur !!! Z.to_nat index, γi)")] + #[rr::observe("x.ghost": "<[Z.to_nat index := PlaceGhost γi]> (<$#> x.cur)")] + pub fn vec_index_mut(x: &mut Vec, index: usize) -> &mut T { + &mut x[index] + } + + #[rr::only_spec] + #[rr::returns("x")] + pub fn vec_iter(x: &Vec) -> core::slice::Iter<'_, T> { + x.iter() + } +} +use wrappers::*; diff --git a/security-monitor/src/core/page_allocator/page.rs b/security-monitor/src/core/page_allocator/page.rs index b3c7447..a3282c0 100644 --- a/security-monitor/src/core/page_allocator/page.rs +++ b/security-monitor/src/core/page_allocator/page.rs @@ -30,7 +30,8 @@ impl PageState for Allocated {} #[rr::invariant(#type "p.(page_loc)" : "<#> p.(page_val)" @ "array_t (page_size_in_words_nat p.(page_sz)) (int usize)")] /// Invariant: The page is well-formed. #[rr::invariant("page_wf p")] -/// We require the page to be in this bounded memory region. +/// We require the page to be in this bounded memory region that can be handled by the page +/// allocator. #[rr::invariant("(page_end_loc p).2 ≤ MAX_PAGE_ADDR")] /// We require the memory layout to have been initialized. #[rr::context("onceG Σ memory_layout")] @@ -53,7 +54,6 @@ pub struct Page { } #[rr::context("onceG Σ memory_layout")] -#[rr::context("onceG Σ gname")] impl Page { /// Creates a page token at the given address in the confidential memory. /// @@ -71,7 +71,8 @@ impl Page { #[rr::requires(#type "l" : "<#> v" @ "array_t (page_size_in_words_nat sz) (int usize)")] /// Precondition: The page needs to be sufficiently aligned. #[rr::requires("l `aligned_to` (page_size_align sz)")] - /// Precondition: The page is located in a bounded memory region. + /// Precondition: The page is located in a bounded memory region that can be handled by the + /// page allocator. #[rr::requires("l.2 + page_size_in_bytes_Z sz ≤ MAX_PAGE_ADDR")] /// Precondition: The memory layout is initialized. #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] @@ -85,14 +86,10 @@ impl Page { } /// Specification: - #[rr::only_spec] - #[rr::params("p")] - #[rr::args("p")] /// We return a page starting at `l` with size `sz`, but with all bytes initialized to zero. - #[rr::returns("mk_page p.(page_loc) p.(page_sz) (zero_page p.(page_sz))")] + #[rr::returns("mk_page self.(page_loc) self.(page_sz) (zero_page self.(page_sz))")] pub fn zeroize(mut self) -> Page { self.clear(); - // TODO: fix automation for consuming values Page { address: self.address, size: self.size, _marker: PhantomData } } @@ -128,7 +125,6 @@ impl Page { /// Returns a collection of all smaller pages that fit within the current page and /// are correctly aligned. If this page is the smallest page (4KiB for RISC-V), then /// the same page is returned. - #[rr::only_spec] #[rr::params("x" : "memory_layout")] /// Precondition: The memory layout needs to have been initialized. #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some x)")] @@ -143,7 +139,6 @@ impl Page { // NOTE: Currently using a wrapper around map, as we have to add an extra trait requirement // to the struct definition of Map to declare the invariant. Should be lifted soon. (0..number_of_smaller_pages).map( - #[rr::skip] #[rr::params("v")] // Precondition: the page bound is in confidential memory #[rr::requires("Hpage_end" : "{page_end}.2 < {*memory_layout}.(conf_end).2")] @@ -186,11 +181,27 @@ impl Page { /// * Merged pages are contiguous and cover the entire new size of the future page /// * Merged pages are of the same size /// * Merged pages are sorted + #[rr::only_spec] + #[rr::params("base_address" : "loc")] + #[rr::requires("base_address = (from_pages !!! 0%nat).(page_loc)")] + #[rr::requires("(from_pages !!! 0%nat).(page_loc) `aligned_to` page_size_align new_size")] + #[rr::requires( + "∀ (i : nat) pg, from_pages !! i = Some pg → + Some pg.(page_sz) = page_size_smaller new_size ∧ + pg.(page_loc) = base_address +ₗ (i * page_size_in_bytes_Z pg.(page_sz))" + )] + #[rr::requires("length from_pages = page_size_multiplier new_size")] + #[rr::returns("mk_page base_address new_size (mjoin (page_val <$> from_pages))")] pub unsafe fn merge(mut from_pages: Vec>, new_size: PageSize) -> Self { assert!(from_pages.len() > 2); assert!(from_pages[0].address.is_aligned_to(new_size.in_bytes())); assert!(new_size.in_bytes() / from_pages[0].size.in_bytes() == from_pages.len()); assert!(from_pages[0].start_address() + new_size.in_bytes() == from_pages[from_pages.len() - 1].end_address()); + + // NOTE: logically, this is a big step. + // - We need to go over the whole vector and get the ownership. + // - From each page, we extract the ownership + // - then merge the big array unsafe { Self::init(from_pages.swap_remove(0).address, new_size) } } } @@ -199,7 +210,6 @@ impl Page { impl Page { /// Clears the entire memory content by writing 0s to it and then converts the Page from Allocated to UnAllocated so it can be returned /// to the page allocator. - #[rr::only_spec] #[rr::returns("mk_page self.(page_loc) self.(page_sz) (zero_page self.(page_sz))")] pub fn deallocate(mut self) -> Page { self.clear(); @@ -267,8 +277,8 @@ impl Page { } #[rr::returns("self.(page_sz)")] - pub fn size(&self) -> &PageSize { - &self.size + pub fn size(&self) -> PageSize { + self.size } /// Writes data to a page at a given offset. Error is returned if an invalid offset was passed diff --git a/verification/RefinedRust.toml b/verification/RefinedRust.toml index 86811f9..544ff7d 100644 --- a/verification/RefinedRust.toml +++ b/verification/RefinedRust.toml @@ -2,7 +2,7 @@ dump_borrowck_info=false log_dir = "./log" output_dir = "./rust_proofs" run_check = false -admit_proofs = true +admit_proofs = false skip_unsupported_features = false extra_specs = "./extra_specs.v" generate_dune_project = false diff --git a/verification/refinedrust b/verification/refinedrust index 1d5e2ae..f6a2792 160000 --- a/verification/refinedrust +++ b/verification/refinedrust @@ -1 +1 @@ -Subproject commit 1d5e2aedc7732fdc4a865d792a3e20d91e8a35e1 +Subproject commit f6a2792b456d28ffbdef16d61324f44615f37b77 diff --git a/verification/rust_proofs/ace/.gitignore b/verification/rust_proofs/ace/.gitignore new file mode 100644 index 0000000..2295120 --- /dev/null +++ b/verification/rust_proofs/ace/.gitignore @@ -0,0 +1,5 @@ +generated +proofs/dune +interface.rrlib +*/.nia.cache +*/.lia.cache diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v index 40e3074..6d0886c 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v index 9e69438..257d788 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v index 4d72082..2d5df3c 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v index 67b4d47..142878f 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v @@ -19,5 +19,5 @@ Proof. Unshelve. all: sidecond_hammer. all: unsafe_unfold_common_caesium_defs; simpl; lia. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v index 305e456..1721b9f 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v index 7f4d65c..790200b 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v index 92dc9a1..fab8867 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v index 392c736..d6ad186 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v index 477e62c..fc71746 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v index 061025f..5897b40 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v @@ -22,5 +22,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v index 4c6a353..11bb03a 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v index 8ad9662..4af1aad 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v @@ -21,5 +21,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v index 1a44c76..182228b 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v index 8804b9a..a6df42f 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v index b10dcfc..9022894 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v index bcf0131..0a23020 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v index 3b0454f..ba0966e 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v index 99fbecd..73c99f8 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v index 632c5cd..6bcc26e 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v @@ -20,5 +20,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v index a33d90c..c740ad7 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v new file mode 100644 index 0000000..2208a9e --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v @@ -0,0 +1,64 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_add_memory_region. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_add_memory_region_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_add_memory_region_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_add_memory_region_prelude. + + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + { + + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + { rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + { + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. } + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep <-! liRStep; liShow. + { + Set Printing Depth 200. + (* + 1. split the array + 2. reinterpret the first part to an array of usize + *) + + + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v new file mode 100644 index 0000000..7db7f90 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v @@ -0,0 +1,30 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_empty. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_empty_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_empty_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_empty_prelude. + + repeat liRStep; liShow. + liInst Hevar Size128TiB. + liInst Hevar0 0. + repeat liRStep; liShow. + liInst Hevar (mk_page_node Size128TiB 0 PageTokenUnavailable false). + repeat liRStep. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. + { exists 0. lia. } + { apply page_size_in_bytes_nat_in_usize. } + all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v new file mode 100644 index 0000000..317a2ce --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v @@ -0,0 +1,36 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_find_largest_page_size. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_find_largest_page_size_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_find_largest_page_size_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_find_largest_page_size_prelude. + + rep liRStep; liShow. + 1: rewrite H152. + all: rep liRStep; liShow. + 1: rewrite H149. + all: rep liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + + all: try match goal with + | H : _ `aligned_to` page_size_in_bytes_nat ?x |- _ => + rename x into sz'; + specialize (page_size_in_bytes_nat_ge sz') as ? + end. + all: unfold page_size_in_bytes_Z in *. + all: try lia. + all: by eapply address_aligned_to_page_size_smaller. + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v new file mode 100644 index 0000000..0f8b88d --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v @@ -0,0 +1,31 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Hint Unfold Z.divide : lithium_rewrite. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node_prelude. + + rep <-! liRStep; liShow. + rep liRStep. + liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenUnavailable self.(children_initialized)). + rep liRStep. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + all: move: INV_CASE; + unfold page_storage_node_invariant_case; + rewrite Hstate/=; + sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v new file mode 100644 index 0000000..fd3b8c8 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v @@ -0,0 +1,49 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { move: Hrange; unfold page_within_range. solve_goal. } + { move: Hrange; unfold page_within_range; solve_goal. } + { specialize (page_size_in_words_nat_ge child_node_sz). sidecond_hammer. } + { unfold page_size_in_bytes_Z. specialize (page_size_in_words_nat_ge child_node_sz). sidecond_hammer. } + { (* compute the index *) + etrans; last apply Z.quot_pos; first done; first lia. + unfold page_size_in_bytes_Z. specialize (page_size_in_bytes_nat_ge child_node_sz). + lia. } + { unfold page_size_in_bytes_Z. specialize (page_size_in_bytes_nat_ge child_node_sz) as ?. + apply Z.quot_le_upper_bound; first lia. + nia. } + { unfold page_size_in_bytes_Z. specialize (page_size_in_bytes_nat_ge child_node_sz) as ?. + apply Z.quot_lt_upper_bound; [lia | lia | ]. + opose proof (page_size_multiplier_size_in_bytes this_node_page_size child_node_sz _) as Ha. + { by rewrite Hchild. } + rewrite -Nat2Z.inj_mul. rewrite -Ha. + destruct Hsz_lt as [Hsz_lt | ->]. + - apply page_size_in_bytes_nat_mono in Hsz_lt. + move: Hsz_lt. + move: Hrange. + admit. + - move: Hrange. + unfold page_within_range. simpl. + admit. + } + { admit. } + + Unshelve. all: print_remaining_sidecond. +Admitted. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v new file mode 100644 index 0000000..fb79e16 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v @@ -0,0 +1,55 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + all: rename select (page_size_smaller _ = Some _) into Hsz. + - move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + unfold page_size_in_bytes_Z. + nia. + - (* samei *) + move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + unfold page_size_in_bytes_Z. nia. + - move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + unfold page_size_in_bytes_Z. + nia. + - move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + unfold page_size_in_bytes_Z. + nia. + - unfold child_base_address. + f_equiv. rewrite Z.mul_comm. f_equiv. + congruence. + - congruence. + - move: Hsz. + revert select (max_node_size self = Size4KiB). + intros ->. + simpl. done. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token.v new file mode 100644 index 0000000..210f8bf --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token.v @@ -0,0 +1,53 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_prelude. + + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + { rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + { + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + rep 1000 liRStep; liShow. + { rep liRStep; liShow. } + { rep 1000 liRStep. + rep 1000 liRStep. + rep 100 liRStep. + rep 100 liRStep. + rep 100 liRStep. + rep <-! liRStep. + + rep liRStep. + liShow. + liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) (PageTokenPartiallyAvailable smaller_size) true). + + rep liRStep. } + } + rep liRStep. } + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: try lia. + + (* 12: from children_wf *) + (*Unshelve. all: sidecond_hammer.*) + (*Unshelve. all: print_remaining_sidecond.*) +(*Qed.*) +Admitted. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_empty.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_empty.v new file mode 100644 index 0000000..b8c747a --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_empty.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_empty. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_empty_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_empty_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_empty_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v new file mode 100644 index 0000000..2e05adb --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v @@ -0,0 +1,30 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Hint Unfold Z.divide : lithium_rewrite. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node_prelude. + + rep liRStep; liShow. + { liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable self.(children_initialized)). + rep liRStep; liShow. } + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { move: INV_CASE. + unfold name_hint. unfold page_storage_node_invariant_case. + solve_goal. } + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v index dfa896c..ceedbc8 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v index 9e65737..8e5fb97 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v @@ -18,11 +18,11 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. { revert select (_ < (conf_end _).2). - specialize (conf_end_in_usize x). + specialize (conf_end_in_usize x0). rewrite /page_size_in_bytes_Z /page_size_in_bytes_nat; solve_goal. } { revert select (_ < (conf_end _).2). - specialize (conf_end_in_usize x). + specialize (conf_end_in_usize x0). rewrite /page_size_in_bytes_Z /page_size_in_bytes_nat; solve_goal. } Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v index 1e059cd..69fa245 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v index 8364b0e..620be67 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v index 9d98b65..03a578d 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v index 3f403b7..623f5c8 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v @@ -7,6 +7,9 @@ Set Default Proof Using "Type". Section proof. Context `{RRGS : !refinedrustGS Σ}. +Global Instance bla_copy : Copyable (error_Error_ty ). +Proof. apply _. Qed. + Lemma core_page_allocator_page_Page_T_write_proof (π : thread_id) : core_page_allocator_page_Page_T_write_lemma π. Proof. @@ -20,14 +23,14 @@ Proof. apply_update (updateable_typed_array_access self off' (IntSynType usize)). - (*rep <-! liRStep; liShow.*) - rep liRStep; liShow. + repeat liRStep; liShow. } { rep liRStep; liShow. } { rep liRStep; liShow. } all: print_remaining_goal. - Unshelve. all: sidecond_solver. + Unshelve. + all: sidecond_solver. Unshelve. all: sidecond_hammer. { revert select (_ + off' * 8 < _ + page_size_in_bytes_Z _). @@ -46,7 +49,7 @@ Proof. rewrite bytes_per_int_usize bytes_per_addr_eq. normalize_and_simpl_goal. } { rewrite /name_hint. - rename select (¬ _ < _ < _) into Hnlt. + rename select (¬ _ < _ ≤ _) into Hnlt. intros []. apply Hnlt. split. { enough (bytes_per_int usize > 0) by lia. rewrite bytes_per_int_usize bytes_per_addr_eq. @@ -60,5 +63,5 @@ Proof. apply Z.rem_divide; done. } Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. (* admitted due to admit_proofs config flag *) End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v index 5665f31..9195c61 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v @@ -12,11 +12,11 @@ Lemma core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocat Proof. core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate_prelude. - repeat liRStep; liShow. + rep <-! liRStep; liShow. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) -End proof. \ No newline at end of file +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v index 8472b51..d7292b8 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v @@ -45,7 +45,7 @@ Proof. rewrite bytes_per_int_usize bytes_per_addr_eq. split; last done. nia. } { rewrite /name_hint. - rename select (¬ _ < _ < _) into Hnlt. + rename select (¬ _ < _ ≤ _) into Hnlt. intros []. apply Hnlt. split. { enough (bytes_per_int usize > 0) by lia. rewrite bytes_per_int_usize bytes_per_addr_eq. @@ -59,5 +59,5 @@ Proof. apply Z.rem_divide; done. } Unshelve. all: print_remaining_sidecond. -Admitted. +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v index d77fe58..7c37302 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v @@ -62,18 +62,6 @@ Proof. rep liRStep; liShow. liInst Hevar INV. - Search once_status once_status_tok. - Search once_status_tok. - iRename select (once_init_tok _ memly) into "Htok1". - iRename select (once_init_tok _ x) into "Htok2". - iPoseProof (once_init_tok_agree with "Htok1 Htok2") as "<-". - iRename select (once_status _ _) into "Hstatus". - rename select (static_has_refinement _ _) into Hrfn1. - iDestruct "Hstatus" as "(%x9 & %Hrfn & Htok3)". - specialize (static_has_refinement_inj _ _ _ Hrfn1 Hrfn) as (Heq & Heq2). - rewrite (UIP_refl _ _ Heq) in Heq2. - simpl in Heq2. subst x9. - (* Prove initialization of the invariant *) unfold INV at 1. simpl. @@ -82,7 +70,7 @@ Proof. { iR. rewrite -page_size_multiplier_quot; last done. iSplitR. { rewrite page_size_multiplier_quot_Z; done. } - iR. iR. iR. iR. iSplitR. { iExists x4. iR. done. } + iR. iR. iR. iR. iSplitR. { iExists _. iR. done. } iApply big_sepL2_elim_l. iPoseProof (big_sepL_extend_r with "Harr") as "Harr". 2: iApply (big_sepL2_wand with "Harr"). { rewrite List.length_seq length_reshape length_replicate. lia. } @@ -100,7 +88,9 @@ Proof. enough (x0 = (<#> take (page_size_in_words_nat smaller_sz) (drop (k * page_size_in_words_nat smaller_sz) pageval))) as -> by done. move: Hlook1. rewrite sublist_lookup_reshape. 2: { specialize (page_size_in_words_nat_ge smaller_sz). lia. } - 2: { rewrite length_fmap -H122. + 2: { + rename select (page_size_in_words_nat _ = length pageval) into Hpage_sz. + rewrite length_fmap -Hpage_sz. erewrite (page_size_multiplier_size_in_words _ smaller_sz); last done. lia. } rewrite sublist_lookup_Some'. @@ -166,15 +156,22 @@ Proof. injection Ha as <-. iIntros "Hinv". done. } + Arguments MapInv : simpl never. + Typeclasses Opaque MapInv. rep <-! liRStep. liShow. + rep liRStep. liShow. + iApply prove_with_subtype_default. + iRename select (MapInv _ _ _ _ _) into "Hinv". + iSplitL "Hinv". { done. } + + rep <-! liRStep. liShow. + (* discard the invariant on the original self token so that RefinedRust does not try to re-establish it *) iRename select (arg_self ◁ₗ[π, _] _ @ _)%I into "Hself". iPoseProof (opened_owned_discard with "Hself") as "Hself". - clear Heq. - (* derive inductive property about the result *) rename x' into new_pages. iRename select (IteratorNextFusedTrans _ _ _ _ _) into "Hiter". @@ -258,5 +255,5 @@ Proof. specialize (page_size_multiplier_in_usize self0). solve_goal. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v index c7147ef..c261879 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v @@ -29,7 +29,9 @@ Proof. rename select (if_Ok _ _) into Hok. destruct x' as [ | x']; first last. { exfalso. simpl in *. apply Herr. split; last lia. - move: Hinrange Hpage_end. unfold page_size_in_bytes_Z. lia. } + move: Hinrange . unfold page_size_in_bytes_Z. + specialize (page_size_in_bytes_nat_ge capture_smaller_page_size_). + nia. } simpl in *. revert Hok. rep liRStep. @@ -44,5 +46,5 @@ Proof. - move: Hinrange Hpage_end. rewrite /page_size_in_bytes_Z. nia. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut.v new file mode 100644 index 0000000..44f4e9e --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once.v new file mode 100644 index 0000000..be5605a --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v index 4e43e49..9104ae3 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v @@ -7,12 +7,13 @@ Set Default Proof Using "Type". Section proof. Context `{!refinedrustGS Σ}. + Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_proof (π : thread_id) : core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_lemma π. Proof. core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_prelude. - repeat liRStep; liShow. + repeat liRStep; liShow. all: print_remaining_goal. Unshelve. all: sidecond_solver. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize.v new file mode 100644 index 0000000..7074318 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize_prelude. + + rep <-! liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/pointers_utility/.gitignore b/verification/rust_proofs/pointers_utility/.gitignore new file mode 100644 index 0000000..4930370 --- /dev/null +++ b/verification/rust_proofs/pointers_utility/.gitignore @@ -0,0 +1,3 @@ +generated +proofs/dune +interface.rrlib \ No newline at end of file diff --git a/verification/theories/page_allocator/dune b/verification/theories/page_allocator/dune index c618803..584b4fc 100644 --- a/verification/theories/page_allocator/dune +++ b/verification/theories/page_allocator/dune @@ -3,4 +3,4 @@ (package ace) (flags -w -notation-overridden -w -redundant-canonical-projection) (synopsis "Manual proofs for ACE Security Monitor page allocator") - (theories Stdlib stdpp iris iris_contrib Ltac2 caesium lithium lrust Equations refinedrust ace.theories.base)) + (theories Stdlib stdpp iris iris_contrib Ltac2 caesium lithium lrust Equations refinedrust rrstd.cmp.theories ace.theories.base)) diff --git a/verification/theories/page_allocator/page.v b/verification/theories/page_allocator/page.v index a68a8f1..e75f42d 100644 --- a/verification/theories/page_allocator/page.v +++ b/verification/theories/page_allocator/page.v @@ -1,4 +1,5 @@ From refinedrust Require Import typing. +From rrstd.cmp.theories Require Import ordering. From ace.theories.base Require Import base. (** * Extra theories for page sizes and pages *) @@ -17,17 +18,19 @@ Global Instance page_size_inh : Inhabited page_size. Proof. exact (populate Size4KiB). Qed. Global Instance page_size_eqdec : EqDecision page_size. Proof. solve_decision. Defined. +Definition page_size_variant (a : page_size) : Z := + match a with + | Size4KiB => 0 + | Size16KiB => 1 + | Size2MiB => 2 + | Size1GiB => 3 + | Size512GiB => 4 + | Size128TiB => 5 + end +. Global Instance page_size_countable : Countable page_size. Proof. - refine (inj_countable (λ sz, - match sz with - | Size4KiB => 0 - | Size16KiB => 1 - | Size2MiB => 2 - | Size1GiB => 3 - | Size512GiB => 4 - | Size128TiB => 5 - end) + refine (inj_countable (λ a, Z.to_nat (page_size_variant a)) (λ a, match a with | 0 => Some Size4KiB @@ -37,12 +40,30 @@ Proof. | 4 => Some Size512GiB | 5 => Some Size128TiB | _ => None - end) _). + end%nat) _). intros []; naive_solver. Qed. +Global Instance page_size_variant_inj : + Inj (=) (=) (page_size_variant). +Proof. + intros sz1 sz2. + destruct sz1, sz2; done. +Qed. + +Definition page_size_cmp (a b : page_size) : ordering := + Z.cmp (page_size_variant a) (page_size_variant b). +Arguments page_size_cmp : simpl never. + +Notation "a '<ₚ' b" := (a ₚ' b" := (a >o{page_size_cmp} b) (at level 50). +Notation "a '≤ₚ' b" := (a ≤o{page_size_cmp} b) (at level 60). +Notation "a '≥ₚ' b" := (a ≥o{page_size_cmp} b) (at level 50). + + (** Number of 64-bit machine words in a page size *) -Definition page_size_in_words_nat (sz : page_size) : nat := +Definition page_size_in_words_nat_def (sz : page_size) : nat := match sz with | Size4KiB => 512 | Size16KiB => 4 * 512 @@ -51,6 +72,9 @@ Definition page_size_in_words_nat (sz : page_size) : nat := | Size512GiB => 512 * 512 * 512 * 512 | Size128TiB => 512 * 512 * 512 * 512 * 256 end. +Definition page_size_in_words_nat_aux : seal (page_size_in_words_nat_def). Proof. by econstructor. Qed. +Definition page_size_in_words_nat := page_size_in_words_nat_aux.(unseal). +Definition page_size_in_words_nat_unfold : page_size_in_words_nat = page_size_in_words_nat_def := page_size_in_words_nat_aux.(seal_eq). Definition page_size_in_bytes_nat (sz : page_size) : nat := bytes_per_addr * page_size_in_words_nat sz. @@ -67,7 +91,7 @@ Qed. Lemma page_size_in_words_nat_ge sz : page_size_in_words_nat sz ≥ 512. Proof. - unfold page_size_in_words_nat. + rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. destruct sz; lia. Qed. Lemma page_size_in_bytes_nat_ge sz : @@ -78,6 +102,48 @@ Proof. specialize (page_size_in_words_nat_ge sz). lia. Qed. +Lemma page_size_in_bytes_nat_in_usize sz : + (Z.of_nat $ page_size_in_bytes_nat sz) ∈ usize. +Proof. + rewrite /page_size_in_bytes_nat page_size_in_words_nat_unfold /page_size_in_words_nat_def. + rewrite bytes_per_addr_eq. + split. + all: unsafe_unfold_common_caesium_defs. + all: unfold it_signed, it_byte_size_log, bytes_per_addr_log. + all: destruct sz; try lia. +Qed. +Lemma page_size_in_bytes_Z_in_usize sz : + page_size_in_bytes_Z sz ∈ usize. +Proof. + apply page_size_in_bytes_nat_in_usize. +Qed. + +Lemma page_size_in_words_nat_mono sz1 sz2 : + page_size_variant sz1 < page_size_variant sz2 → + page_size_in_words_nat sz1 < page_size_in_words_nat sz2. +Proof. + rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. + unfold page_size_variant. + destruct sz1, sz2; lia. +Qed. +Lemma page_size_in_bytes_nat_mono sz1 sz2 : + page_size_variant sz1 < page_size_variant sz2 → + page_size_in_bytes_nat sz1 < page_size_in_bytes_nat sz2. +Proof. + unfold page_size_in_bytes_nat. + intros ?%page_size_in_words_nat_mono. + unfold bytes_per_addr. + nia. +Qed. +Lemma page_size_in_bytes_Z_mono sz1 sz2 : + page_size_variant sz1 < page_size_variant sz2 → + page_size_in_bytes_Z sz1 < page_size_in_bytes_Z sz2. +Proof. + unfold page_size_in_bytes_Z. + intros ?%page_size_in_bytes_nat_mono. + done. +Qed. + (** The next smaller page size *) Definition page_size_smaller (sz : page_size) : option page_size := @@ -93,6 +159,12 @@ Definition page_size_smaller (sz : page_size) : option page_size := Lemma page_size_smaller_None sz : page_size_smaller sz = None ↔ sz = Size4KiB. Proof. destruct sz; done. Qed. +Lemma page_size_smaller_page_size_variant sz sz' : + page_size_smaller sz = Some sz' → + page_size_variant sz' = page_size_variant sz - 1. +Proof. + destruct sz, sz'; simpl; naive_solver. +Qed. (** The next larger page size *) Definition page_size_larger (sz : page_size) : option page_size := @@ -107,6 +179,12 @@ Definition page_size_larger (sz : page_size) : option page_size := Lemma page_size_larger_None sz : page_size_larger sz = None ↔ sz = Size128TiB. Proof. destruct sz; done. Qed. +Lemma page_size_larger_page_size_variant sz sz' : + page_size_larger sz = Some sz' → + page_size_variant sz' = page_size_variant sz + 1. +Proof. + destruct sz, sz'; simpl; naive_solver. +Qed. (** Pages should be aligned to the size of the page; compute the log2 of this page's alignment *) @@ -126,11 +204,43 @@ Lemma page_size_align_is_size sz : (page_size_align sz = page_size_in_bytes_nat sz)%nat. Proof. rewrite /page_size_align/page_size_in_bytes_nat/bytes_per_addr/bytes_per_addr_log. + rewrite page_size_in_words_nat_unfold. Local Arguments Nat.mul : simpl never. destruct sz; simpl; lia. Qed. -(** The maximum address at which a page may be located (one-past-the-end address) *) +(** Join on page sizes (maximum) *) +Definition page_size_max (sz1 sz2 : page_size) : page_size := + match sz1, sz2 with + | Size128TiB, _ => Size128TiB + | _ , Size128TiB => Size128TiB + | Size512GiB, _ => Size512GiB + | _, Size512GiB => Size512GiB + | Size1GiB, _ => Size1GiB + | _, Size1GiB => Size1GiB + | Size2MiB, _ => Size2MiB + | _, Size2MiB => Size2MiB + | Size16KiB, _ => Size16KiB + | _, Size16KiB => Size16KiB + | _, _ => Size4KiB + end. +Global Instance page_size_join : Join page_size := page_size_max. + +Lemma page_size_max_ge_l sz1 sz2 : + sz1 ≤ₚ sz1 ⊔ sz2. +Proof. + unfold ord_le. + destruct sz1, sz2; cbn; naive_solver. +Qed. +Lemma page_size_max_ge_r sz1 sz2 : + sz1 ≤ₚ sz1 ⊔ sz2. +Proof. + unfold ord_le. + destruct sz1, sz2; cbn; naive_solver. +Qed. + + +(** The maximum address at which a page may be located (one-past-the-end address), limited by the page allocator implementation. *) (* Sealed because it is big and will slow down Coq *) Definition MAX_PAGE_ADDR_def : Z := page_size_in_bytes_Z Size128TiB. @@ -173,13 +283,6 @@ Qed. Definition page_end_loc (p : page) : loc := p.(page_loc) +ₗ (page_size_in_bytes_Z p.(page_sz)). -(** Order on page sizes *) -Definition page_size_le (p1 p2 : page_size) := - page_size_in_words_nat p1 ≤ page_size_in_words_nat p2. -Arguments page_size_le : simpl never. - -Infix "≤ₚ" := page_size_le (at level 50). - (** Well-formedness of a page *) Definition page_wf (p : page) : Prop := (** The value has the right size *) @@ -240,7 +343,8 @@ Proof. intros ->. destruct sz; simpl. all: unfold page_size_smaller; simplify_eq. - all: unfold page_size_in_words_nat, page_size_multiplier, page_size_multiplier_log. + all: rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. + all: unfold page_size_multiplier, page_size_multiplier_log. all: cbn [Nat.pow]; lia. Qed. Lemma page_size_multiplier_size_in_bytes sz sz' : @@ -434,6 +538,22 @@ Proof. rewrite /page_size_in_bytes_nat. lia. Qed. +Lemma address_aligned_to_page_size_smaller addr sz smaller_sz : + addr `aligned_to` page_size_in_bytes_nat sz → + page_size_smaller sz = Some smaller_sz → + addr `aligned_to` page_size_in_bytes_nat smaller_sz. +Proof. + rewrite -!page_size_align_is_size. + unfold page_size_align. + intros ? Hsmall. + enough ((2^page_size_align_log sz)%nat = 2^page_size_align_log smaller_sz * (2^(page_size_align_log sz - page_size_align_log smaller_sz)))%nat as Heq. + { eapply (base.aligned_to_mul_inv _ _ (2^(page_size_align_log sz - page_size_align_log smaller_sz))). rewrite -Heq. done. } + destruct sz; simpl in Hsmall; try done. + all: injection Hsmall as <-. + all: simpl. + all: lia. +Qed. + (** Lithium automation *) Global Instance simpl_exist_page Q : @@ -449,3 +569,31 @@ Global Instance simpl_forall_page Q : Proof. intros ?. intros []. done. Qed. + +Global Instance simpl_impl_page_size_smaller sz sz' : + SimplImpl false (page_size_smaller sz = Some sz') (λ T, page_size_smaller sz = Some sz' → page_size_variant sz' = page_size_variant sz - 1 → T). +Proof. + intros T. split. + - intros Ha Hsmall. specialize (page_size_smaller_page_size_variant _ _ Hsmall) as ?. + by apply Ha. + - intros Ha. intros. by apply Ha. +Qed. +Global Instance simpl_impl_page_size_larger sz sz' : + SimplImpl false (page_size_larger sz = Some sz') (λ T, page_size_larger sz = Some sz' → page_size_variant sz' = page_size_variant sz + 1 → T). +Proof. + intros T. split. + - intros Ha Hlarge. specialize (page_size_larger_page_size_variant _ _ Hlarge) as ?. + by apply Ha. + - intros Ha. intros. by apply Ha. +Qed. + +Global Instance simpl_both_page_size_smaller_none sz : + SimplBothRel (=) (page_size_smaller sz) None (sz = Size4KiB). +Proof. + split; destruct sz; simpl; done. +Qed. +Global Instance simpl_both_page_size_larger_none sz : + SimplBothRel (=) (page_size_larger sz) None (sz = Size128TiB). +Proof. + split; destruct sz; simpl; done. +Qed. diff --git a/verification/theories/page_allocator/page_allocator.v b/verification/theories/page_allocator/page_allocator.v index 2d3e804..cc16c4e 100644 --- a/verification/theories/page_allocator/page_allocator.v +++ b/verification/theories/page_allocator/page_allocator.v @@ -50,15 +50,28 @@ End page_allocator. (** * Page allocator invariants *) Inductive node_allocation_state := - (** This page node is completely allocated *) - | PageTokenUnavailable - (** The page token in this node is available *) - | PageTokenAvailable - (** The page node is partially available, with a page of the given size being allocable *) - | PageTokenPartiallyAvailable (allocable_sz : page_size) +(** This page node is completely allocated *) +| PageTokenUnavailable +(** The page token in this node is available *) +| PageTokenAvailable +(** The page node is partially available, with a page of the given size being allocable *) +| PageTokenPartiallyAvailable (allocable_sz : page_size) . Global Instance node_allocation_state_eqdec : EqDecision node_allocation_state. Proof. solve_decision. Defined. +Global Instance node_allocation_state_inhabited : Inhabited node_allocation_state. +Proof. apply (populate PageTokenUnavailable). Qed. + +Global Instance node_allocation_state_join : Join node_allocation_state := λ s1 s2, + match s1, s2 with + | PageTokenAvailable, _ => PageTokenAvailable + | _, PageTokenAvailable => PageTokenAvailable + | PageTokenPartiallyAvailable sz1, PageTokenPartiallyAvailable sz2 => + PageTokenPartiallyAvailable (sz1 ⊔ sz2) + | PageTokenPartiallyAvailable sz1, _ => PageTokenPartiallyAvailable sz1 + | _, PageTokenPartiallyAvailable sz1 => PageTokenPartiallyAvailable sz1 + | _, _ => PageTokenUnavailable + end. (** Our logical representation of storage nodes *) Record page_storage_node : Type := mk_page_node { @@ -73,6 +86,13 @@ Record page_storage_node : Type := mk_page_node { (* whether the child nodes have been initialized *) children_initialized : bool; }. +Global Canonical Structure page_storage_nodeRT := directRT page_storage_node. + +Global Instance page_storage_node_inh : Inhabited page_storage_node. +Proof. + constructor. eapply mk_page_node. + all: apply inhabitant. +Qed. (** Compute the base address of a child node *) Definition child_base_address (parent_base_address : Z) (child_size : page_size) (child_index : Z) : Z := @@ -88,7 +108,7 @@ Definition page_storage_node_children_wf (parent_base_address : Z) (parent_node_ (* Then all children are sorted *) (∀ (i : nat) child, children !! i = Some child → child.(max_node_size) = child_node_size ∧ - child.(base_address) = child_base_address parent_base_address parent_node_size i)) ∧ + child.(base_address) = child_base_address parent_base_address child_node_size i)) ∧ (* If there can't be any children, there are no children *) (page_size_smaller parent_node_size = None → children = []) @@ -103,27 +123,17 @@ Definition page_node_can_allocate (node : page_storage_node) : option page_size end. (** The logical invariant on a page node *) -Definition page_storage_node_invariant +Definition page_storage_node_invariant_case (node : page_storage_node) (max_sz : option page_size) (maybe_page_token : option page) (children : list page_storage_node) := - - (* The children, if any, are well-formed *) - page_storage_node_children_wf node.(base_address) node.(max_node_size) children ∧ - (* the base address is suitably aligned *) - (page_size_align node.(max_node_size) | node.(base_address))%Z ∧ - - (* initialization of child nodes *) - (if node.(children_initialized) then length children = page_size_multiplier node.(max_node_size) else True) ∧ - - (* invariant depending on the allocation state: *) if decide (node.(allocation_state) = PageTokenUnavailable) then (* No allocation is possible *) - maybe_page_token = None ∧ max_sz = None ∧ + maybe_page_token = None ∧ max_sz = None (* all children are unavailable *) (* TODO do we need this *) - Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children + (*Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) else if decide (node.(allocation_state) = PageTokenAvailable) then (* This node is completely available *) @@ -134,11 +144,9 @@ Definition page_storage_node_invariant max_sz = Some (node.(max_node_size)) ∧ (* the token spans the whole node *) token.(page_loc).2 = node.(base_address) ∧ - token.(page_sz) = node.(max_node_size) ∧ - + token.(page_sz) = node.(max_node_size) (* all children are unavailable *) - (* TODO: do we need this? *) - Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children + (*Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) else (* This node is partially available with initialized children *) @@ -157,6 +165,29 @@ Definition page_storage_node_invariant page_node_can_allocate child = Some allocable_sz . +Global Typeclasses Opaque page_storage_node_children_wf. +Global Typeclasses Opaque page_storage_node_invariant_case. + +Global Arguments page_storage_node_children_wf : simpl never. +Global Arguments page_storage_node_invariant_case : simpl never. + + +Definition page_storage_node_invariant + (node : page_storage_node) + (max_sz : option page_size) (maybe_page_token : option page) (children : list page_storage_node) := + + (* The children, if any, are well-formed *) + name_hint "INV_WF" (page_storage_node_children_wf node.(base_address) node.(max_node_size) children) ∧ + (* the base address is suitably aligned *) + (page_size_align node.(max_node_size) | node.(base_address))%Z ∧ + + (* initialization of child nodes *) + (name_hint "INV_INIT_CHILDREN" (if node.(children_initialized) then length children = page_size_multiplier node.(max_node_size) else children = [])) ∧ + + (* invariant depending on the allocation state: *) + name_hint "INV_CASE" (page_storage_node_invariant_case node max_sz maybe_page_token children) +. + Lemma page_storage_node_invariant_empty node_size base_address : (page_size_align node_size | base_address) → page_storage_node_invariant (mk_page_node node_size base_address PageTokenUnavailable false) None None []. @@ -170,3 +201,45 @@ Qed. (* TODO unify all the memory range stuff *) Definition page_within_range (base_address : Z) (sz : page_size) (p : page) : Prop := (base_address ≤ p.(page_loc).2 ∧ p.(page_loc).2 + page_size_in_bytes_Z p.(page_sz) < base_address + page_size_in_bytes_Z sz)%Z. + +Global Arguments page_within_range : simpl never. +Global Typeclasses Opaque page_within_range. + +Definition page_node_update_state (node : page_storage_node) (new_state : node_allocation_state) : page_storage_node := + mk_page_node node.(max_node_size) node.(base_address) new_state node.(children_initialized). + + +Global Instance page_storage_node_invariant_simpl_available node max_sz maybe_tok children `{!TCDone (node.(allocation_state) = PageTokenAvailable)} `{!IsVar max_sz} `{!IsVar maybe_tok} : + SimplImpl false (page_storage_node_invariant_case node max_sz maybe_tok children) (λ T, + page_storage_node_invariant_case node max_sz maybe_tok children → + is_Some max_sz → is_Some maybe_tok → + T). +Proof. + rename select (TCDone _) into Hstate. unfold TCDone in Hstate. + intros T. split. + - intros Ha Hinv. apply Ha. + + done. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + - naive_solver. +Qed. + +Global Instance page_storage_node_invariant_simpl_partially_available node max_sz maybe_tok children sz `{!TCDone (node.(allocation_state) = PageTokenPartiallyAvailable sz)} `{!IsVar max_sz} `{!IsVar maybe_tok} : + SimplImpl false (page_storage_node_invariant_case node max_sz maybe_tok children) (λ T, + page_storage_node_invariant_case node max_sz maybe_tok children → + max_sz = Some sz → maybe_tok = None → + T). +Proof. + rename select (TCDone _) into Hstate. unfold TCDone in Hstate. + intros T. split. + - intros Ha Hinv. apply Ha. + + done. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + - naive_solver. +Qed. +