// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// capDL bootstrap for CantripOs.
//
// Constructs a system of multiple components according to the capDL
// specification generated by CAmkES. Derived from the C version of
// capdl-loader-app.
#![no_std]
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]

use capdl::CDL_CapType::*;
use capdl::CDL_FrameFillType_t::*;
use capdl::CDL_FrameFill_BootInfoEnum_t::*;
use capdl::CDL_ObjectType::*;
use capdl::*;
use core::cmp;
use core::convert::TryInto;
use core::mem::size_of;
use core::mem::MaybeUninit;
use core::ptr;
use cpio::CpioNewcReader;
use cstr_core::CStr;
use log::{debug, error, trace};
use sel4_sys::*;
use smallvec::SmallVec;
use static_assertions::*;

// Setup arch- & feature-specific support. Note these must be named
// explicitly below; e.g. loader_alloc::check_untypeds.

// Target-architecture specific support (please keep sorted)
#[cfg_attr(target_arch = "aarch64", path = "arch/aarch64.rs")]
#[cfg_attr(
    all(target_arch = "arm", target_pointer_width = "32"),
    path = "arch/aarch32.rs"
)]
#[cfg_attr(target_arch = "riscv32", path = "arch/riscv32.rs")]
#[cfg_attr(target_arch = "riscv64", path = "arch/riscv64.rs")]
#[cfg_attr(target_arch = "x86", path = "arch/x86.rs")]
#[cfg_attr(target_arch = "x86_64", path = "arch/x86_64.rs")]
mod arch;

use arch::is_irq;
use arch::seL4_Page_Map_Flush;
use arch::PAGE_SIZE; // Base  page size, typically 4KB // XXX temp until moved to sel4_sys

// Allocation-specific support
#[cfg_attr(
    feature = "CONFIG_CAPDL_LOADER_STATIC_ALLOC",
    path = "feature/static_alloc.rs"
)]
#[cfg_attr(
    not(feature = "CONFIG_CAPDL_LOADER_STATIC_ALLOC"),
    path = "feature/dynamic_alloc.rs"
)]
mod loader_alloc;

// MCS feature support
#[cfg_attr(feature = "CONFIG_KERNEL_MCS", path = "feature/mcs.rs")]
#[cfg_attr(not(feature = "CONFIG_KERNEL_MCS"), path = "feature/no_mcs.rs")]
mod scheduler;

// SMP feature support
#[cfg_attr(feature = "CONFIG_SMP_SUPPORT", path = "feature/smp.rs")]
#[cfg_attr(not(feature = "CONFIG_SMP_SUPPORT"), path = "feature/no_smp.rs")]
mod smp;

#[cfg_attr(
    any(feature = "CONFIG_ARM_HYPERVISOR_SUPPORT", feature = "CONFIG_VTX"),
    path = "feature/vcpu.rs"
)]
#[cfg_attr(
    not(any(feature = "CONFIG_ARM_HYPERVISOR_SUPPORT", feature = "CONFIG_VTX")),
    path = "feature/no_vcpu.rs"
)]
mod vcpu;

// Spill TCB arguments to stack support
#[cfg_attr(
    feature = "CONFIG_CAPDL_LOADER_CC_REGISTERS",
    path = "feature/no_spill_tcb_args.rs"
)]
#[cfg_attr(
    not(feature = "CONFIG_CAPDL_LOADER_CC_REGISTERS"),
    path = "feature/spill_tcb_args.rs"
)]
mod tcb_args;

// seL4_WordBits is passed as a depth parameter for CSpace addresses
// (sigh); verify it fits in a u8 until we can change the api's
const_assert!(seL4_WordBits == 32 || seL4_WordBits == 64);

const CONFIG_CAPDL_LOADER_FILLS_PER_FRAME: usize = 1;

fn BIT(bit_num: usize) -> usize { 1 << bit_num }
fn ROUND_UP(a: usize, b: usize) -> usize {
    if (a % b) == 0 {
        a
    } else {
        usize::checked_add(a, b).unwrap() - (a % b)
    }
}

#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum InitCnodeCmode {
    MOVE,
    COPY,
}

// Contract with the client for storage that may not fit on the stack or
// in the heap. This also allows the client to better size data structures.
pub trait ModelState {
    fn get_max_objects(&self) -> usize;
    fn get_max_irqs(&self) -> usize;
    fn get_max_sched_ctrl(&self) -> usize;
    fn get_max_untyped_caps(&self) -> usize;

    fn get_orig_cap(&self, obj_id: CDL_ObjID) -> seL4_CPtr;
    fn set_orig_cap(&mut self, obj_id: CDL_ObjID, slot: seL4_CPtr);

    fn get_dup_cap(&self, obj_id: CDL_ObjID) -> seL4_CPtr;
    fn set_dup_cap(&mut self, obj_id: CDL_ObjID, slot: seL4_CPtr);

    fn get_irq_cap(&self, irq: CDL_IRQ) -> seL4_CPtr;
    fn set_irq_cap(&mut self, irq: CDL_IRQ, slot: seL4_CPtr);

    fn get_sched_ctrl_cap(&self, id: CDL_Core) -> seL4_CPtr;
    fn set_sched_ctrl_cap(&mut self, id: CDL_Core, slot: seL4_CPtr);

    fn get_untyped_cptr(&self, ix: usize) -> seL4_CPtr;
    fn set_untyped_cptr(&mut self, ix: usize, slot: seL4_CPtr);
}

// Region for mapping one page of data for fills and when (optionally)
// spilling arguments to a TCB's stack. init_copy_region unmaps the
// pages backing this struct so target data can be mapped in.
// NB: C code uses 64KB but we only map 1 PAGE_SIZE frame
#[repr(align(65536))]
struct CopyRegion {
    data: [seL4_Word; PAGE_SIZE / size_of::<seL4_Word>()],
}
static mut copy_region: CopyRegion = CopyRegion {
    data: [0; PAGE_SIZE / size_of::<seL4_Word>()],
};

// NB: keep this small as it must fit on a limited-size stack
pub struct CantripOsModel<'a> {
    state: &'a mut dyn ModelState,
    spec: &'a CDL_Model,
    bootinfo: &'a seL4_BootInfo,
    capdl_archive: &'a [u8],
    executable: &'a [u8],

    free_slot_start: seL4_CPtr,
    free_slot_end: seL4_CPtr,
    first_arm_iospace: seL4_CPtr,

    #[cfg(feature = "CONFIG_ARM_SMMU")]
    sid_number: usize,

    untyped_cnode: CDL_ObjID,
    untyped_index: usize,
    untyped_max: usize,

    extended_bootinfo_table: [*const seL4_BootInfoHeader; SEL4_BOOTINFO_HEADER_NUM],

    vspace_roots: SmallVec<[CDL_ObjID; 32]>, // NB: essentially #components

    // CPIO archive lookup cache.
    last_filename: &'a str, // NB: ref into self.spec
    last_data: &'a [u8],    // NB: ref into self.capdl_archive
}
impl<'a> CantripOsModel<'a> {
    pub fn new(
        state: &'a mut dyn ModelState,
        model: &'a CDL_Model,
        bootinfo: &'a seL4_BootInfo,
        capdl_archive: &'a [u8], // CPIO archive of components
        executable: &'a [u8],    // Executable image.
    ) -> Self {
        CantripOsModel {
            state: state,
            spec: model,
            bootinfo: bootinfo,
            capdl_archive: capdl_archive,
            executable: executable,

            free_slot_start: 0,
            free_slot_end: 0,
            first_arm_iospace: 0,

            #[cfg(feature = "CONFIG_ARM_SMMU")]
            sid_number: 0,

            untyped_cnode: CDL_ObjID_Invalid,
            untyped_index: 0,
            untyped_max: 0,

            extended_bootinfo_table: [ptr::null(); SEL4_BOOTINFO_HEADER_NUM],

            vspace_roots: SmallVec::new(),

            last_filename: "",
            last_data: capdl_archive,
        }
    }

    pub fn init_system(&mut self) -> seL4_Result {
        self.init_copy_region();

        self.process_bootinfo()?;

        trace!("Create objects...");
        self.create_objects()?; // loader_alloc::create_objects

        self.create_irq_caps()?;
        self.init_sched_ctrl()?; // scheduler::init_sched_ctrl
        self.init_irqs()?;
        self.init_pd_asids()?;

        trace!("Initialize page frames...");
        self.init_frames()?;

        trace!("Initialize VSpaces...");
        self.init_vspaces()?;
        self.init_scs()?; // scheduler::init_scs

        trace!("Initialize TCBs...");
        self.init_tcbs()?;

        trace!("Initialize CSpaces...");
        self.init_cspaces()?;

        Ok(())
    }

    // Hand-off capabilities needed to dynamically create seL4 objects.
    // The MemoryManager needs the UntypedMemory slabs to instantiate
    // objects.
    fn handoff_untypeds(&mut self) -> seL4_Result {
        assert!(is_objid_valid(self.untyped_cnode), "No handoff CNode found");

        // UntypedMemory caps are appended to the CAmkES-generated slots
        // in the CNode identified for handoff. CAmkES is assumed to have
        // sized the CNode to handle moving the untyped memory slabs + 1
        // slot for a "holding CNode".
        let cnode = self.get_object(self.untyped_cnode);
        let dest_root = self.get_orig_cap(self.untyped_cnode);
        let dest_start = cnode.next_free_slot() + 1;
        let num_untypeds = self.bootinfo.untyped.end - self.bootinfo.untyped.start;

        trace!(
            "Hand-off {} untypeds from src slot {} to dest slot {}",
            num_untypeds,
            self.bootinfo.untyped.start,
            dest_start
        );
        // NB: we let kernel tell us if the CNode is too small.
        for ut in 0..num_untypeds {
            self.handoff_cap(
                self.untyped_cnode,
                /*src_index=*/ self.bootinfo.untyped.start + ut,
                /*dest_root=*/ dest_root,
                /*dest_index=*/ dest_start + ut,
            )?;
        }
        Ok(())
    }

    // Allocates a CNode for use by handoff_holding.
    fn create_holding_cnode(&mut self, kernel_caps: usize) -> (seL4_CPtr, usize) {
        fn count_log2(count: usize) -> usize {
            assert!(count != 0);
            (usize::BITS - usize::leading_zeros(count)) as usize
        }

        // Calculate the cnode size. NB: get_free_slot() is initialized to the
        // first empty slot so adjust in calculating the depth.
        let cnode_slot = self.get_free_slot();
        let cnode_depth = count_log2(kernel_caps + (cnode_slot - self.bootinfo.empty.start));
        trace!(
            "Create holding cnode for {} caps, depth {}",
            kernel_caps + (cnode_slot - self.bootinfo.empty.start),
            cnode_depth
        );

        // NB: only setup the bits we know will be used.
        #[allow(unused_variables)]
        let cnode = CDL_Object {
            #[cfg(feature = "CONFIG_DEBUG_BUILD")]
            name: "HoldingCNode".as_ptr(),
            slots: unsafe { MaybeUninit::<CDL_CapMap>::zeroed().assume_init() },
            extra: unsafe { MaybeUninit::<CDL_ObjectExtra>::zeroed().assume_init() },
            type_: CDL_CNode,
            size_bits: cnode_depth as u32,
        };

        // Start from last untyped memory object used by loader_alloc::create_objects.
        let mut ut_index = self.untyped_index;
        while let Err(e) = self.create_object(
            &cnode,
            CDL_ObjID_Invalid,
            self.state.get_untyped_cptr(ut_index),
            cnode_slot,
        ) {
            debug!("create error {:?}", e);
            if e != seL4_Error::seL4_NotEnoughMemory {
                panic!("Untyped retype failed, error {:?}", e);
            }
            // This untyped is exhausted, go to the next entry.
            ut_index += 1;
            if ut_index >= self.untyped_max {
                panic!("Out of untyped memory.");
            }
        }
        self.next_free_slot();
        // NB: we intentionally do not record the cap in the orig table

        (cnode_slot, cnode_depth)
    }

    // Hand-off objects that need to be held for safekeeping. We create a
    // suitably-sized "holding CNode" and sweep the necessary capabilities
    // from our top-level CNode into the holding CNode. This allows the
    // designated component (e.g MemoryManager) to revoke designated untyped
    // memory slabs without reclaiming the objects the rootserver constructed
    // for CAmkES components.
    pub fn handoff_capabilities(&mut self) -> seL4_Result {
        fn delete_cap(cptr: seL4_CPtr) -> seL4_Result {
            unsafe {
                seL4_CNode_Delete(
                    /*src_root=*/ seL4_CapInitThreadCNode,
                    /*src_index=*/ cptr,
                    /*src_depth=*/ seL4_WordBits as u8,
                )
            }
        }
        // Well-known caps created by the kernel.
        let kernel_caps = [
            seL4_CapIRQControl,         // global IRQ controller
            seL4_CapASIDControl,        // global ASID controller
            seL4_CapInitThreadASIDPool, // initial thread's ASID pool
            seL4_CapIOPort,             // global IO port (or null)
            seL4_CapIOSpace,            // global IO space (or null)
            seL4_CapDomain,             // global domain controller
        ];

        if !is_objid_valid(self.untyped_cnode) {
            // Do not fail if there is no cnode, this is not setup when
            // running sel4test (for example).
            return Ok(());
        }

        // Create the CNode for the holding caps.
        let (holding_cnode_slot, holding_cnode_depth) =
            self.create_holding_cnode(kernel_caps.len());

        // Handoff untypeds now that we've allocated the holding CNode.
        self.handoff_untypeds()?;

        let dest_cnode_orig = self.get_orig_cap(self.untyped_cnode);
        let dest_cnode_dup = self.dup_cap(self.untyped_cnode)?;

        let mut next_slot = 0; // Next open slot in the holding CNode

        // Moves |src| from the toplevel CNode to the next available
        // slot in the holding CNode. We filter out errors that occur
        // if the source slot is empty.
        let hold_cap = |src, dest| {
            match unsafe {
                seL4_CNode_Move(
                    /*dest_root=*/ holding_cnode_slot,
                    /*dest_index=*/ dest,
                    /*dest_depth=*/ holding_cnode_depth as u8,
                    /*src_root=*/ seL4_CapInitThreadCNode,
                    /*src_index=*/ src,
                    /*src_depth=*/ seL4_WordBits as u8,
                )
            } {
                Err(seL4_FailedLookup) | Ok(_) => Ok(()),
                e => e,
            }
        };

        // Like hold_cap but hold a dup of |src|. Used for TCB's so a
        // reference remains usable to start threads. The original caps
        // remain in the top-level CNode until the MemoryManager reclaim
        // them. NB: this should never fail so there's no error filtering.
        let hold_dup_cap = |src, dest| {
            let seL4_AllRights = seL4_CapRights::new(
                /*grant_reply=*/ 1, /*grant=*/ 1, /*read=*/ 1, /*write=*/ 1,
            );
            unsafe {
                seL4_CNode_Copy(
                    /*dest_root=*/ holding_cnode_slot,
                    /*dest_index=*/ dest,
                    /*dest_depth=*/ holding_cnode_depth as u8,
                    /*src_root=*/ seL4_CapInitThreadCNode,
                    /*src_index=*/ src,
                    /*src_depth=*/ seL4_WordBits as u8,
                    seL4_AllRights,
                )
            }
        };

        // Move well-known caps setup by the kernel.
        for cap in kernel_caps.into_iter() {
            hold_cap(cap, next_slot)?;
            next_slot += 1;
        }

        // Then sweep in caps created from the capDL spec. The holding
        // CNode is not enumerated here because it (intentionally) does
        // not appear in the orig or dup tables. See above about TCB's.
        for obj_id in 0..self.get_free_slot() {
            // Skip handoff destination, we'll deal with it below.
            if obj_id == self.untyped_cnode {
                continue;
            }

            let orig_cap = self.get_orig_cap(obj_id);
            if orig_cap != 0 {
                if self.get_object(obj_id).r#type() == CDL_TCB {
                    hold_dup_cap(orig_cap, next_slot)?;
                    next_slot += 1;
                } else {
                    hold_cap(orig_cap, next_slot)?;
                    next_slot += 1;
                }
            }
            let dup_cap = self.get_dup_cap(obj_id);
            if dup_cap != 0 && dup_cap != orig_cap {
                // TCB's are not dup'd so no need to check
                hold_cap(dup_cap, next_slot)?;
                next_slot += 1;
            }
        }

        // IRQ caps are in a separate table.
        for irq in 0..self.state.get_max_irqs() {
            let irq_cap = self.get_irq_cap(irq);
            if irq_cap != 0 {
                hold_cap(irq_cap, next_slot)?;
                next_slot += 1;
            }
        }

        // Almost done with holding handoffs. The last one to do
        // is the destination CNode. We have a dup to do the final
        // handoff of the holding CNode so move the original now.
        hold_cap(dest_cnode_orig, next_slot)?;

        // Finally handoff the holding CNode for safekeeping. We must
        // use the dup of the destination CNode since we just released
        // the original cap.
        let dest_cnode = self.get_object(self.untyped_cnode);
        assert_eq!(dest_cnode.r#type(), CDL_CNode);
        self.handoff_cap(
            self.untyped_cnode,
            /*src_index=*/ holding_cnode_slot,
            /*dest_root=*/ dest_cnode_dup,
            /*dest_index=*/ dest_cnode.next_free_slot() + 0,
        )?;

        delete_cap(dest_cnode_dup)?;

        Ok(())
    }

    pub fn start_threads(&self) -> seL4_Result {
        trace!("Starting threads...");
        for (obj_id, obj) in self.spec.obj_slice().iter().enumerate() {
            if obj.r#type() == CDL_TCB && obj.tcb_resume() {
                unsafe { seL4_TCB_Resume(self.get_orig_cap(obj_id)) }?;
            }
        }
        Ok(())
    }

    fn init_copy_region(&mut self) {
        /* copy_region is mapped into bss. It will be used as an
         * address range to map other data for frame loading so unmap
         * the existing pages. We locate the frame caps by looking in
         * the boot info and knowing that the userImageFrames are
         * ordered by virtual address in our address space.
         */

        /* Find the number of frames in the user image according to
         * bootinfo, and compare that to the number of frames backing
         * the image computed by comparing start and end symbols. If
         * these numbers are different, assume the image was padded
         * to the left.
         */
        let num_user_image_frames_reported =
            self.bootinfo.userImageFrames.end - self.bootinfo.userImageFrames.start;

        fn slice_to_frames(slice: &[u8]) -> usize {
            // NB: assume start is page-aligned, otherwise could round down
            let first_page = ptr::addr_of!(slice[0]) as usize;
            let last_page = ROUND_UP(ptr::addr_of!(slice[slice.len() - 1]) as usize, PAGE_SIZE);
            (last_page - first_page) / PAGE_SIZE
        }
        let num_user_image_frames_measured = slice_to_frames(self.executable);
        assert!(
            num_user_image_frames_reported >= num_user_image_frames_measured,
            "Not enough frame caps: bootinfo {} measured {}",
            num_user_image_frames_reported,
            num_user_image_frames_measured
        );

        let executable_start = ptr::addr_of!(self.executable[0]) as usize;
        let additional_user_image_bytes =
            (num_user_image_frames_reported - num_user_image_frames_measured) * PAGE_SIZE;
        assert!(
            additional_user_image_bytes <= executable_start,
            "User image padding overlaps executable: start {} image {}",
            executable_start,
            additional_user_image_bytes
        );

        let lowest_mapped_vaddr = executable_start - additional_user_image_bytes;

        let copy_addr_frame: seL4_CPtr = self.bootinfo.userImageFrames.start
            + (unsafe { ptr::addr_of!(copy_region.data[0]) as usize } / PAGE_SIZE)
            - (lowest_mapped_vaddr / PAGE_SIZE);
        for i in 0..(size_of::<CopyRegion>() / PAGE_SIZE) {
            unsafe { seL4_Page_Unmap(copy_addr_frame + i) }.expect("copy_region");
        }
    }

    pub fn process_bootinfo(&mut self) -> seL4_Result {
        self.free_slot_start = self.bootinfo.empty.start;
        self.free_slot_end = self.bootinfo.empty.end;

        /* When using libsel4platsupport for printing support, we end up using some
         * of our free slots during serial port initialisation. Skip over these to
         * avoid failing our own allocations. Note, this value is just hardcoded
         * for the amount of slots this initialisation currently uses up.
         * JIRA: CAMKES-204.
         */
        // XXX libsel4platsupport is not used
        self.free_slot_start += 16;

        self.cache_extended_bootinfo_headers();
        self.check_untypeds()?;

        debug!("Rootserver is running in domain {}", self.bootinfo.initThreadDomain);

        self.first_arm_iospace = self.bootinfo.ioSpaceCaps.start;

        Ok(())
    }

    fn cache_extended_bootinfo_headers(&mut self) {
        // Extended bootinfo data starts in the page following the fixed part.
        let mut cur = (ptr::addr_of!(self.bootinfo.extraLen) as usize) + PAGE_SIZE;
        let end = cur + self.bootinfo.extraLen;
        while cur < end {
            let header = &unsafe { *(cur as *const seL4_BootInfoHeader) };
            self.extended_bootinfo_table[header.id] = header;
            assert!(header.len != 0);
            cur += header.len;
        }
    }

    fn create_object(
        &mut self,
        obj: &CDL_Object,
        id: CDL_ObjID,
        untyped_slot: seL4_CPtr,
        free_slot: usize,
    ) -> seL4_Result {
        fn retype_untyped(
            free_slot: seL4_CPtr,
            free_untyped: seL4_CPtr,
            obj_type: seL4_ObjectType,
            obj_size: usize,
        ) -> seL4_Result {
            unsafe {
                seL4_Untyped_Retype(
                    free_untyped,
                    obj_type.into(),
                    obj_size,
                    seL4_CapInitThreadCNode,
                    /*node_index=*/ 0,
                    /*node_depth=*/ 0,
                    /*node_offset=*/ free_slot,
                    /*num_objects=*/ 1,
                )
            }
        }

        let mut obj_size = obj.size_bits();
        let arch_type: seL4_ObjectType = match obj.r#type() {
            CDL_Frame => {
                // Calculate the frame-size-specific cap.
                arch::get_frame_type(obj_size)
            }
            CDL_ASIDPool => {
                obj_size = seL4_ASIDPoolBits;
                seL4_UntypedObject
            }
            CDL_SchedContext => {
                obj_size = core::cmp::max(obj_size, seL4_MinSchedContextBits);
                CDL_SchedContext.into()
            }
            obj_type => obj_type.into(),
        };
        // Give arch handler a first chance; we handle memory-backed objects.
        match self.create_arch_object(obj, id, free_slot) {
            None => {
                // Device objects are handled by
                // loader_alloc::find_device_object, using bootinfo.
                // Memory-backed objects are handled directly
                // (find_device_object just does a retype_untyped).
                if obj.is_device() {
                    let paddr = obj.paddr().unwrap();
                    #[cfg(feature = "CONFIG_NOISY_CREATE_OBJECT")]
                    debug!(
                        " Find device frame/untyped {}, paddr {:#x}, size {}",
                        obj.name(),
                        paddr,
                        obj_size
                    );
                    self.find_device_object(free_slot, untyped_slot, arch_type, obj_size, paddr, id)
                } else {
                    retype_untyped(free_slot, untyped_slot, arch_type, obj_size)
                }
            }
            Some(e) => e.into(),
        }
    }

    pub fn create_irq_caps(&mut self) -> seL4_Result {
        let iter = self.spec.irq_slice().iter();
        for (irq, irq_num) in iter
            .enumerate()
            .filter(|(_, irq_num)| is_objid_valid(**irq_num))
        {
            // NB: could push slot management down but then create_irq_cap
            //     would need to be more intelligent.
            let free_slot = self.get_free_slot();
            // NB: original code continues even if error
            arch::create_irq_cap(irq, self.get_object(*irq_num), free_slot)?;
            self.set_irq_cap(irq, free_slot);
            self.next_free_slot();
        }
        Ok(())
    }

    pub fn init_irqs(&mut self) -> seL4_Result {
        for (irq, irq_num) in self
            .spec
            .irq_slice()
            .iter()
            .enumerate()
            .filter(|(_, irq_num)| is_objid_valid(**irq_num))
        {
            self.init_irq(*irq_num, self.get_irq_cap(irq))?;
        }
        Ok(())
    }

    fn init_irq(&mut self, irq: CDL_ObjID, irq_handler_cptr: seL4_CPtr) -> seL4_Result {
        let cdl_irq = self.get_object(irq);
        assert!(is_irq(cdl_irq.r#type()));
        assert!(cdl_irq.size_bits() == 0);
        assert!(cdl_irq.num_slots() <= 1);

        if cdl_irq.num_slots() == 1 {
            // Bind IRQ
            let endpoint_cap = cdl_irq.get_cap_at(0).unwrap();
            let endpoint_obj = endpoint_cap.obj_id;
            let badge = endpoint_cap.cap_data();
            let endpoint_cptr = if badge != 0 {
                // Endpoint needs badging, mint a new cap
                self.mint_cap(endpoint_obj, badge)?
            } else {
                self.get_orig_cap(endpoint_obj)
            };
            unsafe { seL4_IRQHandler_SetNotification(irq_handler_cptr, endpoint_cptr) }?;
        }
        Ok(())
    }

    // Initialise ASID's for VSpaces.
    pub fn init_pd_asids(&mut self) -> seL4_Result {
        let vspace_copy = self.vspace_roots.clone();
        for obj in vspace_copy.iter() {
            let sel4_page = self.get_orig_cap(*obj);
            debug!("Assign ASIDPool {} for {}", sel4_page, self.get_object(*obj).name());
            unsafe { seL4_ASIDPool_Assign(seL4_CapInitThreadASIDPool, sel4_page) }?;
        }
        Ok(())
    }

    // Initialise the contents of page frames.
    pub fn init_frames(&mut self) -> seL4_Result {
        for (obj_id, obj) in self
            .spec
            .obj_slice()
            .iter()
            .enumerate()
            .filter(|(_, obj)| obj.r#type() == CDL_Frame)
        {
            for i in 0..CONFIG_CAPDL_LOADER_FILLS_PER_FRAME {
                let frame_fill = &obj.frame_fill(i).unwrap();
                // NB: previous code did a break, continue'ing makes more
                //     sense but does not matter because
                //     CONFIG_CAPDL_LOADER_FILLS_PER_FRAME is 1.
                if frame_fill.type_ != CDL_FrameFill_None {
                    self.init_frame(self.get_orig_cap(obj_id), frame_fill)?;
                }
            }
        }
        Ok(())
    }

    fn init_frame(
        &mut self,
        sel4_frame: seL4_CPtr,
        frame_fill: &CDL_FrameFill_Element_t,
    ) -> seL4_Result {
        let seL4_ReadWrite = seL4_CapRights::new(
            /*grant_reply=*/ 0, /*grant=*/ 0, /*read=*/ 1, /*write=*/ 1,
        );

        // Map the frame to do the fill.
        let base = unsafe { ptr::addr_of!(copy_region.data[0]) as usize };
        unsafe {
            seL4_Page_Map(
                sel4_frame,
                seL4_CapInitThreadVSpace,
                base,
                seL4_ReadWrite,
                seL4_Default_VMAttributes,
            )
        }?;

        // Fill the frame contents from bootinfo or file data that's
        // embedded in our image.
        match frame_fill.type_ {
            CDL_FrameFill_BootInfo => {
                self.fill_frame_with_bootinfo(base, frame_fill);
            }
            CDL_FrameFill_FileData => {
                self.fill_frame_with_filedata(base, frame_fill);
            }
            type_ => {
                panic!("Unsupported frame fill type {:?}", type_);
            }
        }

        // NB: an error causes an unwind through the callers but might
        //     be better to just panic directly.
        unsafe { seL4_Page_Unmap(sel4_frame) } // Done, unnmap the frame
    }

    // Fill a frame's contents from bootinfo. At the moment these are all
    // arch-specific so this may be better moved to arch:
    fn fill_frame_with_bootinfo(&self, base: usize, frame_fill: &CDL_FrameFill_Element_t) {
        let bi = frame_fill.get_bootinfo();
        match bi.type_ {
            CDL_FrameFill_BootInfo_X86_VBE
            | CDL_FrameFill_BootInfo_X86_TSC_Freq
            | CDL_FrameFill_BootInfo_FDT
            | CDL_FrameFill_BootInfo_BootInfo => {}
            _ => {
                panic!("Unsupported bootinfo frame fill type {:?}", bi.type_)
            }
        }

        let dest = (base + frame_fill.dest_offset) as *mut u8;
        let max_len = frame_fill.dest_len;
        let block_offset = bi.src_offset;
        let header: *const seL4_BootInfoHeader =
            self.extended_bootinfo_table[usize::from(bi.type_)];

        // Check if the bootinfo has been found.
        if bi.type_ == CDL_FrameFill_BootInfo_BootInfo {
            self.fill_with_bootinfo(dest, max_len);
        } else if header.is_null() {
            /* No bootinfo.
             * If we're at the start of a block, copy an empty header across, otherwise skip the copy */
            if block_offset == 0 {
                let empty = seL4_BootInfoHeader {
                    id: seL4_Word::MAX,
                    len: seL4_Word::MAX,
                };
                let copy_len = cmp::min(size_of::<seL4_BootInfoHeader>(), max_len);
                unsafe { ptr::copy_nonoverlapping(ptr::addr_of!(empty) as _, dest, copy_len) }
            }
        } else {
            let href = unsafe { &*header };
            let copy_len = cmp::min(
                // Don't copy past the bootinfo
                if href.len < block_offset {
                    0
                } else {
                    href.len - block_offset
                },
                // Don't copy more than what the frame can hold
                max_len,
            );
            let copy_start = (header as usize) + block_offset;
            unsafe { ptr::copy_nonoverlapping(copy_start as _, dest, copy_len) }
        }
    }

    // Fill with our BootInfo patched per the destination cnode.
    fn fill_with_bootinfo(&self, dest: *mut u8, max_len: usize) {
        unsafe {
            ptr::copy_nonoverlapping(
                ptr::addr_of!(self.bootinfo.extraLen) as *const u8,
                dest as _,
                max_len,
            )
        }

        assert!(is_objid_valid(self.untyped_cnode));
        let cnode = self.get_object(self.untyped_cnode);
        assert_eq!(cnode.r#type(), CDL_CNode);

        // NB: page-aligned so safe to deref.
        let bootinfo = unsafe { &mut *(dest as *mut seL4_BootInfo) };

        // UntypedMemory caps are appended to the designated CNode.
        // Calculate the first unoccupied slot in the CNode; immediately
        // after a "holding CNode" will be dropped and the untypeds go
        // one past that.
        let next_free_slot = cnode.next_free_slot() + 1;
        bootinfo.untyped = sel4_sys::seL4_SlotRegion {
            start: next_free_slot,
            end: next_free_slot + (self.bootinfo.untyped.end - self.bootinfo.untyped.start),
        };
        // Update the empty region to support dynamic cap allocation.
        bootinfo.empty = sel4_sys::seL4_SlotRegion {
            start: bootinfo.untyped.end + 1,
            end: BIT(cnode.size_bits()),
        };
        // NB: we could adjust the UntypedDesc's but they will not
        //   reflect rootserver resources that are released at exit
    }

    // Fill a frame's contents from a file in the cpio archive;
    // in particular this loads each CAmkES component's executable.
    fn fill_frame_with_filedata(&mut self, base: usize, frame_fill: &CDL_FrameFill_Element_t) {
        let cpio_lookup = |filename: &str| -> &[u8] {
            for e in CpioNewcReader::new(self.capdl_archive) {
                let entry = e.unwrap();
                if entry.name == filename {
                    return entry.data;
                }
            }
            panic!("{} not found in cpio archive", filename);
        };
        let file_data = frame_fill.get_file_data();
        let filename = unsafe { CStr::from_ptr(file_data.filename) }
            .to_str()
            .unwrap();
        // Check the last lookup before scanning the cpio archive.
        if filename != self.last_filename {
            trace!("switch filedata fill to {}", self.last_filename);
            self.last_data = cpio_lookup(filename);
            self.last_filename = filename;
        }
        unsafe {
            ptr::copy_nonoverlapping(
                ptr::addr_of!(self.last_data[file_data.file_offset]),
                (base + frame_fill.dest_offset) as *mut u8,
                frame_fill.dest_len,
            )
        }
    }

    // Initialize the page tables for each VSpace.
    pub fn init_vspaces(&mut self) -> seL4_Result {
        let vspace_copy = self.vspace_roots.clone();
        for obj_id in vspace_copy.iter() {
            // arch::init_vspace
            self.init_vspace(*obj_id)?;
        }
        Ok(())
    }

    fn map_page_table(
        &self,
        page_cap: &CDL_Cap,
        pd_id: CDL_ObjID,
        vaddr: seL4_Word,
    ) -> seL4_Result {
        assert_eq!(page_cap.r#type(), CDL_PTCap);

        // TODO: We should not be using the original cap here
        let sel4_page = self.get_orig_cap(page_cap.obj_id);
        let sel4_pd = self.get_orig_cap(pd_id);

        #[cfg(feature = "CONFIG_NOISY_INIT_VSPACE")]
        trace!(
            "  Map PT {} into {} @{:#x}, vm_attribs={:#x}",
            self.get_object(page_cap.obj_id).name(),
            self.get_object(pd_id).name(),
            vaddr,
            page_cap.vm_attribs()
        );

        let vm_attribs: seL4_VMAttributes = page_cap.vm_attribs().into();
        unsafe { seL4_PageTable_Map(sel4_page, sel4_pd, vaddr, vm_attribs) }
    }

    fn map_page_frame(
        &mut self,
        page_cap: &CDL_Cap,
        pd_id: CDL_ObjID,
        rights_: seL4_CapRights,
        vaddr: seL4_Word,
    ) -> seL4_Result {
        assert_eq!(page_cap.r#type(), CDL_FrameCap);

        fn DebugMapFrameError(sel4_page: seL4_Word, vaddr: usize, error: seL4_Error) {
            // Provide more info to help in diagnosing an error.
            let addr = unsafe { seL4_Page_GetAddress(sel4_page) };
            if addr.error != 0 {
                error!(
                    "Failed to map frame: \
                       <unknown physical address (error = {})> \
                       -> 0x{:x}, error {:?}",
                    addr.error, vaddr, error
                );
            } else {
                error!(
                    "Failed to map frame: \
                       0x{:x} -> {:x}, error {:?}",
                    addr.paddr, vaddr, error
                );
            }
        }

        let page = page_cap.obj_id;
        let vm_attribs: seL4_VMAttributes = page_cap.vm_attribs().into();

        // TODO: We should not be using the original cap here
        let sel4_page = self.get_orig_cap(page);
        let sel4_pd = self.get_orig_cap(pd_id);

        let mut rights = rights_;
        if cfg!(feature = "CONFIG_CAPDL_LOADER_WRITEABLE_PAGES") {
            // Make instruction pages writeable to support software breakpoints.
            if rights.get_capAllowGrant() != 0 {
                rights.set_capAllowWrite(1);
            }
        }

        /* XXX: Write-only mappings are silently downgraded by the kernel to
         * kernel-only. This is clearly not what the user intended if they
         * passed us a write-only mapping. Help them out by upgrading it here.
         */
        if rights.get_capAllowWrite() != 0 {
            rights.set_capAllowRead(1);
        }

        // Store the cap used for the mapped page for later use in
        // map_page_frame. We use the dup_cap table knowing that it is only
        // used otherwise for TCB & CNode caps (see duplicate_caps) so there
        // will be no collisions. The original code wrote this value back
        // into the CDL_Model but we treat that data as immutable.
        // NB: if sel4_page changes because of sharing, dup_cap will overwrite
        //     the dup_cap entry (though shared pages never need fixup).
        self.set_dup_cap(page, sel4_page);

        #[cfg(feature = "CONFIG_NOISY_INIT_VSPACE")]
        trace!(
            "  Map {} into {} @{:#x} with cap={} rights=(G: {}, R: {}, W: {}) vm_attribs={:#x}",
            self.get_object(page_cap.obj_id).name(),
            self.get_object(pd_id).name(),
            vaddr,
            sel4_page,
            rights.get_capAllowGrant(),
            rights.get_capAllowRead(),
            rights.get_capAllowWrite(),
            page_cap.vm_attribs()
        );

        // FIXME: Add support for super-pages.
        // NB: seL4_Page_Map handles marking the NX bit, we explicitly
        //   call seL4_Page_Map_Flush to invalidate and/or flush caches.
        let mut result = unsafe {
            seL4_Page_Map(sel4_page, sel4_pd, vaddr, rights, vm_attribs).and_then(|_| {
                seL4_Page_Map_Flush(
                    self.get_object(page).r#type().into(),
                    sel4_page,
                    rights,
                    vm_attribs,
                )
            })
        };
        #[cfg(feature = "CONFIG_CAPDL_SHARED_FRAMES")]
        if result == Err(seL4_InvalidCapability) {
            // Page is shared (and already mapped); clone a copy of the page
            // cap and try again. Builds with CONFIG_PRINITNG will spam the
            // console with error msgs so log a message to (hopefully) explain.
            debug!("Shared page {}@{:#x}; dup'ing", self.get_object(page).name(), vaddr);

            let shared_sel4_page = self.dup_cap(page)?;
            result = unsafe { seL4_Page_Map(shared_sel4_page, sel4_pd, vaddr, rights, vm_attribs) };
        }
        if result.is_err() {
            DebugMapFrameError(sel4_page, vaddr, result.clone().unwrap_err());
        }
        result
    }

    // Initialize each TCB contents.
    pub fn init_tcbs(&mut self) -> seL4_Result {
        let iter = self.spec.obj_slice().iter();
        for (obj_id, obj) in iter.enumerate().filter(|(_, obj)| obj.r#type() == CDL_TCB) {
            let sel4_tcb = self.get_orig_cap(obj_id);
            self.init_tcb(obj, sel4_tcb)?;
            self.configure_tcb(obj, sel4_tcb)?;
        }
        Ok(())
    }

    // Initialize a TCB
    fn init_tcb(&mut self, cdl_tcb: &CDL_Object, sel4_tcb: seL4_CPtr) -> seL4_Result {
        let cap_to_cptr = |opt: Option<CDL_Cap>| -> seL4_CPtr {
            match opt {
                Some(obj) => self.get_orig_cap(obj.obj_id),
                _ => 0,
            }
        };

        // XXX will panic if missing any of CSpace, VSpace, IPCBuffer
        let cdl_cspace_root = cdl_tcb.get_cap_at(CDL_TCB_CTable_Slot).unwrap();
        let cdl_vspace_root = cdl_tcb.get_cap_at(CDL_TCB_VTable_Slot).unwrap();
        let cdl_ipcbuffer = cdl_tcb.get_cap_at(CDL_TCB_IPCBuffer_Slot).unwrap();

        let cdl_sc_opt = cdl_tcb.get_cap_at(CDL_TCB_SC_Slot);
        let sel4_sc = cap_to_cptr(cdl_sc_opt);

        let max_priority = cdl_tcb.tcb_max_priority();
        let priority = cdl_tcb.tcb_priority();
        let ipcbuffer_addr = cdl_tcb.tcb_ipcbuffer_addr();
        let affinity = cdl_tcb.tcb_affinity();

        let sel4_cspace_root = self.get_orig_cap(cdl_cspace_root.obj_id);
        let sel4_cspace_root_data = cdl_cspace_root.cap_data();
        let sel4_vspace_root = self.get_orig_cap(cdl_vspace_root.obj_id);
        let sel4_vspace_root_data = cdl_vspace_root.cap_data();
        let sel4_ipcbuffer = self.get_orig_cap(cdl_ipcbuffer.obj_id);

        // scheduler::init_fault_ep
        let (sel4_fault_ep, sel4_tempfault_ep) = self.init_fault_ep(cdl_tcb)?;

        /*
        // XXX original comment, code does not do this (yet)
         * seL4_TCB_Configure requires a valid CSpace, VSpace and IPC buffer cap to
         * succeed at assigning any of them. We first try and perform seL4_TCB_Configure
         * but if any of these objects are missing we fall back to only trying to assign
         * an IPC buffer if we have one using seL4_TCB_SetIPCBuffer.  We print an error
         * if a CSpace is available but a VSpace is not or if there is a VSpace but no CSpace.
         */
        if sel4_sc != 0 {
            let cdl_sc = self.get_object(cdl_sc_opt.unwrap().obj_id);
            scheduler::SchedControl_Configure(
                self.get_sched_ctrl_cap(affinity),
                sel4_sc,
                affinity,
                cdl_sc.sc_budget(),
                cdl_sc.sc_period(),
                cdl_sc.sc_data(),
            )?;
        }
        scheduler::TCB_Configure(
            sel4_tcb,
            // NB: sel4_fault_ep is ignored here with MCS
            sel4_fault_ep,
            sel4_cspace_root,
            sel4_cspace_root_data,
            sel4_vspace_root,
            sel4_vspace_root_data,
            ipcbuffer_addr,
            sel4_ipcbuffer,
        )?;
        scheduler::TCB_SchedParams(sel4_tcb, max_priority, priority, sel4_sc, sel4_fault_ep)?;
        scheduler::TCB_SetTimeoutEndpoint(sel4_tcb, sel4_tempfault_ep)?;

        smp::TCB_SetAffinity(sel4_tcb, affinity)?;

        // vcpu::set_tcb_vcpu
        self.set_tcb_vcpu(cdl_tcb, sel4_tcb)?;

        #[cfg(feature = "CONFIG_DEBUG_BUILD")]
        // Name the thread after its TCB name if possible.
        if let Ok(cstr) = cstr_core::CString::new(cdl_tcb.name()) {
            unsafe { seL4_DebugNameThread(sel4_tcb, cstr.to_bytes_with_nul()) };
        }
        Ok(())
    }

    // Configure TCB execution context.
    fn configure_tcb(&self, cdl_tcb: &CDL_Object, sel4_tcb: seL4_CPtr) -> seL4_Result {
        let mut sp = cdl_tcb.tcb_sp();
        assert_eq!(sp % arch::STACK_ALIGNMENT_BYTES, 0, "TCB stack pointer mis-aligned");

        // NB: tcb_args::maybe_spill_tcb_args may write arg data to the
        // stack causing the stack pointer to be adjusted.
        sp = self.maybe_spill_tcb_args(cdl_tcb, sp)?;
        assert_eq!(
            sp % arch::STACK_ALIGNMENT_BYTES,
            0,
            "Spilled TCB stack pointer mis-aligned"
        );

        unsafe {
            seL4_TCB_WriteRegisters(
                sel4_tcb,
                0,
                0,
                size_of::<seL4_UserContext>() / size_of::<seL4_Word>(),
                arch::get_user_context(cdl_tcb, sp),
            )?;
            seL4_DomainSet_Set(seL4_CapDomain, cdl_tcb.tcb_domain() as u8, sel4_tcb)?;
        }
        Ok(())
    }

    // Initialize each CSpace by populating the top-level CNode's.
    pub fn init_cspaces(&mut self) -> seL4_Result {
        // XXX can the loops be merged?
        for (obj_id, obj) in self.spec.obj_slice().iter().enumerate() {
            if obj.r#type() == CDL_CNode {
                self.init_cnode(InitCnodeCmode::COPY, obj_id)?;
            }
        }
        for (obj_id, obj) in self.spec.obj_slice().iter().enumerate() {
            if obj.r#type() == CDL_CNode {
                self.init_cnode(InitCnodeCmode::MOVE, obj_id)?;
            }
        }
        Ok(())
    }

    // Populate a CNode's slots using previously created objects.
    fn init_cnode(&self, mode: InitCnodeCmode, cnode: CDL_ObjID) -> seL4_Result {
        let cdl_cnode = self.get_object(cnode);
        #[cfg(feature = "CONFIG_NOISY_INIT_CNODE")]
        trace!(
            "Init {}: {} slots, orig {} dup {}",
            cdl_cnode.name(),
            cdl_cnode.num_slots(),
            self.get_orig_cap(cnode),
            self.get_orig_cap(cnode)
        );
        for slot_index in 0..cdl_cnode.num_slots() {
            self.init_cnode_slot(mode, cnode, &cdl_cnode.get_slot(slot_index))?;
        }
        Ok(())
    }

    fn handoff_cap(
        &self,
        cnode_obj_id: CDL_ObjID,
        src_index: seL4_CPtr,
        dest_root: seL4_CPtr,
        dest_index: seL4_CPtr,
    ) -> seL4_Result {
        let cnode = self.get_object(cnode_obj_id);
        assert_eq!(cnode.r#type(), CDL_CNode);

        let src_root = seL4_CapInitThreadCNode;
        let src_depth = seL4_WordBits as u8;

        let dest_depth: u8 = cnode.size_bits.try_into().unwrap();

        unsafe {
            seL4_CNode_Move(dest_root, dest_index, dest_depth, src_root, src_index, src_depth)
        }
    }

    fn init_cnode_slot(
        &self,
        mode: InitCnodeCmode,
        cnode_id: CDL_ObjID,
        cnode_slot: &CDL_CapSlot,
    ) -> seL4_Result {
        fn is_ep_related_cap(c: CDL_CapType) -> bool {
            c == CDL_EPCap || c == CDL_NotificationCap || c == CDL_ReplyCap
        }

        let target_cap = &cnode_slot.cap;
        let target_cap_obj = target_cap.obj_id;
        let target_cap_irq = target_cap.irq;
        let target_cap_type = target_cap.r#type();
        let target_cap_rights = target_cap.cap_rights();

        // For endpoint this is the badge, for cnodes, this is the (encoded) guard.
        #[allow(unused_mut)]
        let mut target_cap_data = target_cap.cap_data();

        /* To support moving original caps, we need a spec with a CDT (most don't).
         * This shoud probably become a separate configuration option for when to
         * use the CDT, and when to just copy. For now, let's just copy.
         */
        let is_ep_cap = is_ep_related_cap(target_cap_type);
        let mut is_irq_handler_cap = target_cap_type == CDL_IRQHandlerCap;
        let is_frame_cap = target_cap_type == CDL_FrameCap;

        let dest_obj = self.get_object(cnode_id);
        let dest_size = dest_obj.size_bits;

        let dest_root = self.get_orig_cap(cnode_id);
        let dest_index = cnode_slot.slot;
        let dest_depth: u8 = dest_size.try_into().unwrap();

        // Use an original cap to reference the object to copy.
        let src_root = seL4_CapInitThreadCNode;
        let (move_cap, src_index) = match target_cap_type {
            #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
            CDL_IOSpaceCap => (false, seL4_CapIOSpace),

            #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
            CDL_ARMIOSpaceCap => {
                target_cap_data = 0;
                (false, self.first_arm_iospace + target_cap_data)
            }

            CDL_IRQHandlerCap => (false, self.get_irq_cap(target_cap_irq)),
            CDL_IRQControlCap => {
                is_irq_handler_cap = true;
                // there's only one cap
                (true, seL4_CapIRQControl)
            }
            CDL_SchedControlCap => (false, self.get_sched_ctrl_cap(target_cap_obj)),
            CDL_DomainCap => {
                // there's only one cap
                (false, seL4_CapDomain)
            }
            CDL_ASIDControlCap => {
                // there's only one cap
                (false, seL4_CapASIDControl)
            }
            _ => (false, self.get_orig_cap(target_cap_obj)),
        };
        let src_depth = seL4_WordBits as u8;

        if mode == InitCnodeCmode::MOVE && move_cap {
            if is_ep_cap || is_irq_handler_cap {
                #[cfg(feature = "CONFIG_NOISY_INIT_CNODE")]
                debug!(
                    "  Populate {:?} slot {} by moving {:?} -> {:?}",
                    target_cap_type,
                    { cnode_slot.slot },
                    (src_root, src_index, src_depth),
                    (dest_root, dest_index, dest_depth),
                );
                unsafe {
                    seL4_CNode_Move(
                        dest_root, dest_index, dest_depth, src_root, src_index, src_depth,
                    )
                }?;
            } else {
                #[cfg(feature = "CONFIG_NOISY_INIT_CNODE")]
                debug!(
                    "  Populate {:?} slot {} by mutating {:?} -> {:?}",
                    target_cap_type,
                    { cnode_slot.slot },
                    (src_root, src_index, src_depth),
                    (dest_root, dest_index, dest_depth),
                );
                unsafe {
                    seL4_CNode_Mutate(
                        dest_root,
                        dest_index,
                        dest_depth,
                        src_root,
                        src_index,
                        src_depth,
                        target_cap_data,
                    )
                }?;
            }
        } else if mode == InitCnodeCmode::COPY && !move_cap {
            if is_frame_cap && is_objid_valid(target_cap.mapping_container_id) {
                // The spec requires the frame cap in the current slot be
                // the same one used to map the frame in some container
                // (either a page table or page directory).
                let container_id = target_cap.mapping_container_id;
                let slot_index = target_cap.mapping_slot;

                // Look up the container object that holds the mapping.
                let container = self.get_object(container_id);

                // When the frame was mapped in, a copy of the cap was first
                // created, and the copy was used for the mapping. This copy
                // is the cap that must be moved into the current slot.
                let frame_cap = container.get_cap_at(slot_index).unwrap();
                assert_eq!(frame_cap.r#type(), CDL_FrameCap);
                // NB: the mapped frame cap is stored in the dup table.
                let mapped_frame_cap = self.get_dup_cap(frame_cap.obj_id);
                #[cfg(feature = "CONFIG_NOISY_INIT_CNODE")]
                debug!(
                    "  Map {:?} slot {} by moving {:?} -> {:?}",
                    target_cap_type,
                    { cnode_slot.slot },
                    (src_root, mapped_frame_cap, src_depth),
                    (dest_root, dest_index, dest_depth),
                );
                // Move the cap to the frame used for the mapping into the
                // destination slot.
                unsafe {
                    seL4_CNode_Move(
                        dest_root,
                        dest_index,
                        dest_depth,
                        src_root,
                        mapped_frame_cap,
                        src_depth,
                    )
                }?;
            } else {
                #[cfg(feature = "CONFIG_NOISY_INIT_CNODE")]
                debug!(
                    "  Populate {:?} slot {} by minting {:?} -> {:?} {:?} data {:#x}",
                    target_cap_type,
                    { cnode_slot.slot },
                    (src_root, src_index, src_depth),
                    (dest_root, dest_index, dest_depth),
                    target_cap_rights,
                    target_cap_data,
                );
                unsafe {
                    seL4_CNode_Mint(
                        dest_root,
                        dest_index,
                        dest_depth,
                        src_root,
                        src_index,
                        src_depth,
                        target_cap_rights.into(),
                        target_cap_data,
                    )
                }?;
            }
        }
        Ok(())
    }

    fn get_object(&self, object_id: CDL_ObjID) -> &'a CDL_Object {
        &self.spec.obj_slice()[object_id]
    }
    fn get_asid(&self, asid_num: usize) -> Option<CDL_ObjID> {
        Some(self.spec.asid_slot_slice()[asid_num])
    }

    // Top-level CSpace management: a one-level CSpace impl for now
    // with sequential allocation.
    // NB: pub for rootserver use
    pub fn get_free_slot(&self) -> seL4_CPtr { self.free_slot_start }
    fn next_free_slot(&mut self) {
        self.free_slot_start += 1;
        assert!(self.free_slot_start < self.free_slot_end, "Ran out of free slots!");
    }

    // Get/Set the original capability assigned to each object.
    fn get_orig_cap(&self, obj_id: CDL_ObjID) -> seL4_CPtr { self.state.get_orig_cap(obj_id) }
    fn set_orig_cap(&mut self, obj_id: CDL_ObjID, slot: seL4_CPtr) {
        self.state.set_orig_cap(obj_id, slot);
    }

    // Get/Set any duplicate capability assigned to an object. This is
    // mostly used to record the original mapped page frame caps (in
    // map_page_frame) for resolving container usage in init_cnode_slot.
    fn get_dup_cap(&self, obj_id: CDL_ObjID) -> seL4_CPtr { self.state.get_dup_cap(obj_id) }
    fn set_dup_cap(&mut self, obj_id: CDL_ObjID, slot: seL4_CPtr) {
        let old_cap = self.get_dup_cap(obj_id);
        if old_cap != slot && old_cap != 0 {
            // Overwriting a previous dup should only happen for shared page
            // frames & badging endpoints. We need a record of them for the
            // handoff_holding work; stash 'em in the dup table at the next
            // available slot--we know at the point this happens slots will be
            // allocated only with next_free_slot so there cannot be a collision.
            // NB: caller's must be aware another slot may be allocated
            let overflow_slot = self.get_free_slot();
            self.set_dup_cap(overflow_slot, old_cap);
            self.next_free_slot();
            trace!(
                "dup: obj {:?} obj_id {} old {} new {} overflow {}",
                self.get_object(obj_id).r#type(),
                obj_id,
                old_cap,
                slot,
                overflow_slot
            );
        };
        self.state.set_dup_cap(obj_id, slot);
    }

    // Set/Set IRQ capabilities.
    fn get_irq_cap(&self, irq: CDL_IRQ) -> seL4_CPtr { self.state.get_irq_cap(irq) }
    fn set_irq_cap(&mut self, irq: CDL_IRQ, slot: seL4_CPtr) { self.state.set_irq_cap(irq, slot); }

    // Set/Set SchedControl capabilities.
    fn get_sched_ctrl_cap(&self, id: CDL_Core) -> seL4_CPtr { self.state.get_sched_ctrl_cap(id) }
    #[allow(dead_code)]
    fn set_sched_ctrl_cap(&mut self, id: CDL_Core, slot: seL4_CPtr) {
        self.state.set_sched_ctrl_cap(id, slot);
    }

    // Mint a cap that will not be given to the user. Used for badging
    // interrupt notifications and fault endpoints when MCS is enabled.
    fn mint_cap(&mut self, obj_id: CDL_ObjID, badge: seL4_Word) -> Result<seL4_CPtr, seL4_Error> {
        let seL4_AllRights = seL4_CapRights::new(
            /*grant_reply=*/ 1, /*grant=*/ 1, /*read=*/ 1, /*write=*/ 1,
        );
        let free_slot = self.get_free_slot();
        unsafe {
            seL4_CNode_Mint(
                /*dest_root=*/ seL4_CapInitThreadCNode,
                /*dest_index=*/ free_slot,
                /*dest_depth*/ seL4_WordBits as u8,
                /*src_root=*/ seL4_CapInitThreadCNode,
                /*src_index=*/ self.get_orig_cap(obj_id),
                /*src_depth=*/ seL4_WordBits as u8,
                seL4_AllRights,
                badge,
            )
        }?;
        self.next_free_slot();
        // NB: record in dup table for handoff_holding
        self.set_dup_cap(obj_id, free_slot);
        Ok(free_slot)
    }

    fn dup_cap(&mut self, obj_id: CDL_ObjID) -> Result<seL4_CPtr, seL4_Error> {
        let seL4_AllRights = seL4_CapRights::new(
            /*grant_reply=*/ 1, /*grant=*/ 1, /*read=*/ 1, /*write=*/ 1,
        );
        let free_slot = self.get_free_slot();
        unsafe {
            seL4_CNode_Copy(
                /*dest_root=*/ seL4_CapInitThreadCNode,
                /*dest_index=*/ free_slot,
                /*dest_depth=*/ seL4_WordBits as u8,
                /*src_root=*/ seL4_CapInitThreadCNode,
                /*src_index=*/ self.get_orig_cap(obj_id),
                /*src_depth=*/ seL4_WordBits as u8,
                seL4_AllRights,
            )
        }?;
        self.next_free_slot();
        self.set_dup_cap(obj_id, free_slot);
        Ok(free_slot)
    }
}
