use {
    super::*, crate::object::*, alloc::sync::Arc, alloc::vec, alloc::vec::Vec, bitflags::bitflags,
    kernel_hal::PageTableTrait, spin::Mutex,
};
bitflags! {
    
    pub struct VmarFlags: u32 {
        #[allow(clippy::identity_op)]
        
        
        const COMPACT               = 1 << 0;
        
        const SPECIFIC              = 1 << 1;
        
        
        const SPECIFIC_OVERWRITE    = 1 << 2;
        
        
        const CAN_MAP_SPECIFIC      = 1 << 3;
        
        const CAN_MAP_READ          = 1 << 4;
        
        const CAN_MAP_WRITE         = 1 << 5;
        
        const CAN_MAP_EXECUTE       = 1 << 6;
        
        const REQUIRE_NON_RESIZABLE = 1 << 7;
        
        const ALLOW_FAULTS          = 1 << 8;
        
        const CAN_MAP_RXW           = Self::CAN_MAP_READ.bits | Self::CAN_MAP_EXECUTE.bits | Self::CAN_MAP_WRITE.bits;
        
        const ROOT_FLAGS            = Self::CAN_MAP_RXW.bits | Self::CAN_MAP_SPECIFIC.bits;
    }
}
pub struct VmAddressRegion {
    flags: VmarFlags,
    base: KObjectBase,
    _counter: CountHelper,
    addr: VirtAddr,
    size: usize,
    parent: Option<Arc<VmAddressRegion>>,
    page_table: Arc<Mutex<dyn PageTableTrait>>,
    
    inner: Mutex<Option<VmarInner>>,
}
impl_kobject!(VmAddressRegion);
define_count_helper!(VmAddressRegion);
#[derive(Default)]
struct VmarInner {
    children: Vec<Arc<VmAddressRegion>>,
    mappings: Vec<Arc<VmMapping>>,
}
impl VmAddressRegion {
    
    pub fn new_root() -> Arc<Self> {
        #[cfg(feature = "aspace-separate")]
        let (addr, size) = {
            use core::sync::atomic::*;
            static VMAR_ID: AtomicUsize = AtomicUsize::new(0);
            let i = VMAR_ID.fetch_add(1, Ordering::SeqCst);
            (0x2_0000_0000 + 0x100_0000_0000 * i, 0x100_0000_0000)
        };
        #[cfg(not(feature = "aspace-separate"))]
        let (addr, size) = (USER_ASPACE_BASE as usize, USER_ASPACE_SIZE as usize);
        Arc::new(VmAddressRegion {
            flags: VmarFlags::ROOT_FLAGS,
            base: KObjectBase::new(),
            _counter: CountHelper::new(),
            addr,
            size,
            parent: None,
            page_table: Arc::new(Mutex::new(kernel_hal::PageTable::new())),
            inner: Mutex::new(Some(VmarInner::default())),
        })
    }
    
    pub fn new_kernel() -> Arc<Self> {
        let kernel_vmar_base = KERNEL_ASPACE_BASE as usize; 
        let kernel_vmar_size = KERNEL_ASPACE_SIZE as usize;
        Arc::new(VmAddressRegion {
            flags: VmarFlags::ROOT_FLAGS,
            base: KObjectBase::new(),
            _counter: CountHelper::new(),
            addr: kernel_vmar_base,
            size: kernel_vmar_size,
            parent: None,
            page_table: Arc::new(Mutex::new(kernel_hal::PageTable::new())),
            inner: Mutex::new(Some(VmarInner::default())),
        })
    }
    
    #[cfg(feature = "hypervisor")]
    pub fn new_guest() -> Arc<Self> {
        let guest_vmar_base = crate::hypervisor::GUEST_PHYSICAL_ASPACE_BASE as usize;
        let guest_vmar_size = crate::hypervisor::GUEST_PHYSICAL_ASPACE_SIZE as usize;
        Arc::new(VmAddressRegion {
            flags: VmarFlags::ROOT_FLAGS,
            base: KObjectBase::new(),
            _counter: CountHelper::new(),
            addr: guest_vmar_base,
            size: guest_vmar_size,
            parent: None,
            page_table: Arc::new(Mutex::new(crate::hypervisor::VmmPageTable::new())),
            inner: Mutex::new(Some(VmarInner::default())),
        })
    }
    
    pub fn allocate_at(
        self: &Arc<Self>,
        offset: usize,
        len: usize,
        flags: VmarFlags,
        align: usize,
    ) -> ZxResult<Arc<Self>> {
        self.allocate(Some(offset), len, flags, align)
    }
    
    pub fn allocate(
        self: &Arc<Self>,
        offset: Option<usize>,
        len: usize,
        flags: VmarFlags,
        align: usize,
    ) -> ZxResult<Arc<Self>> {
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
        let offset = self.determine_offset(inner, offset, len, align)?;
        let child = Arc::new(VmAddressRegion {
            flags,
            base: KObjectBase::new(),
            _counter: CountHelper::new(),
            addr: self.addr + offset,
            size: len,
            parent: Some(self.clone()),
            page_table: self.page_table.clone(),
            inner: Mutex::new(Some(VmarInner::default())),
        });
        inner.children.push(child.clone());
        Ok(child)
    }
    
    pub fn map_at(
        &self,
        vmar_offset: usize,
        vmo: Arc<VmObject>,
        vmo_offset: usize,
        len: usize,
        flags: MMUFlags,
    ) -> ZxResult<VirtAddr> {
        self.map(Some(vmar_offset), vmo, vmo_offset, len, flags)
    }
    
    pub fn map(
        &self,
        vmar_offset: Option<usize>,
        vmo: Arc<VmObject>,
        vmo_offset: usize,
        len: usize,
        flags: MMUFlags,
    ) -> ZxResult<VirtAddr> {
        self.map_ext(
            vmar_offset,
            vmo,
            vmo_offset,
            len,
            MMUFlags::RXW,
            flags,
            false,
            true,
        )
    }
    
    #[allow(clippy::too_many_arguments)]
    pub fn map_ext(
        &self,
        vmar_offset: Option<usize>,
        vmo: Arc<VmObject>,
        vmo_offset: usize,
        len: usize,
        permissions: MMUFlags,
        flags: MMUFlags,
        overwrite: bool,
        map_range: bool,
    ) -> ZxResult<VirtAddr> {
        if !page_aligned(vmo_offset) || !page_aligned(len) || vmo_offset.overflowing_add(len).1 {
            return Err(ZxError::INVALID_ARGS);
        }
        if !permissions.contains(flags & MMUFlags::RXW) {
            return Err(ZxError::ACCESS_DENIED);
        }
        
        if vmo_offset > vmo.len() || len > vmo.len() - vmo_offset {
            return Err(ZxError::INVALID_ARGS);
        }
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
        let offset = self.determine_offset(inner, vmar_offset, len, PAGE_SIZE)?;
        let addr = self.addr + offset;
        let mut flags = flags;
        
        {
            flags |= MMUFlags::from_bits_truncate(vmo.cache_policy() as u32 as usize);
        }
        
        if !self.test_map(inner, offset, len, PAGE_SIZE) {
            if overwrite {
                self.unmap_inner(addr, len, inner)?;
            } else {
                return Err(ZxError::NO_MEMORY);
            }
        }
        
        let map_range = map_range || vmo.name() != "";
        let mapping = VmMapping::new(
            addr,
            len,
            vmo,
            vmo_offset,
            permissions,
            flags,
            self.page_table.clone(),
        );
        if map_range {
            mapping.map()?;
        }
        inner.mappings.push(mapping);
        Ok(addr)
    }
    
    
    
    
    
    
    pub fn unmap(&self, addr: VirtAddr, len: usize) -> ZxResult {
        if !page_aligned(addr) || !page_aligned(len) || len == 0 {
            return Err(ZxError::INVALID_ARGS);
        }
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
        self.unmap_inner(addr, len, inner)
    }
    
    fn unmap_inner(&self, addr: VirtAddr, len: usize, inner: &mut VmarInner) -> ZxResult {
        if !page_aligned(addr) || !page_aligned(len) || len == 0 {
            return Err(ZxError::INVALID_ARGS);
        }
        let begin = addr;
        let end = addr + len;
        
        for vmar in inner.children.iter() {
            if vmar.partial_overlap(begin, end) {
                return Err(ZxError::INVALID_ARGS);
            }
        }
        let mut new_maps = Vec::new();
        inner.mappings.drain_filter(|map| {
            if let Some(new) = map.cut(begin, end) {
                new_maps.push(new);
            }
            map.size() == 0
        });
        inner.mappings.extend(new_maps);
        for vmar in inner.children.drain_filter(|vmar| vmar.within(begin, end)) {
            vmar.destroy_internal()?;
        }
        Ok(())
    }
    
    
    
    pub fn protect(&self, addr: usize, len: usize, flags: MMUFlags) -> ZxResult {
        if !page_aligned(addr) || !page_aligned(len) {
            return Err(ZxError::INVALID_ARGS);
        }
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
        let end_addr = addr + len;
        
        if inner
            .children
            .iter()
            .any(|child| child.end_addr() >= addr && child.addr() <= end_addr)
        {
            return Err(ZxError::INVALID_ARGS);
        }
        let length: usize = inner
            .mappings
            .iter()
            .filter_map(|map| {
                if map.end_addr() >= addr && map.addr() <= end_addr {
                    Some(end_addr.min(map.end_addr()) - addr.max(map.addr()))
                } else {
                    None
                }
            })
            .sum();
        if length != len {
            return Err(ZxError::NOT_FOUND);
        }
        
        if inner
            .mappings
            .iter()
            .filter(|map| map.end_addr() >= addr && map.addr() <= end_addr) 
            .any(|map| !map.is_valid_mapping_flags(flags))
        {
            return Err(ZxError::ACCESS_DENIED);
        }
        inner
            .mappings
            .iter()
            .filter(|map| map.end_addr() >= addr && map.addr() <= end_addr)
            .for_each(|map| {
                let start_index = pages(addr.max(map.addr()) - map.addr());
                let end_index = pages(end_addr.min(map.end_addr()) - map.addr());
                map.protect(flags, start_index, end_index);
            });
        Ok(())
    }
    
    pub fn destroy(self: &Arc<Self>) -> ZxResult {
        self.destroy_internal()?;
        
        if let Some(parent) = &self.parent {
            let mut guard = parent.inner.lock();
            let inner = guard.as_mut().unwrap();
            inner.children.retain(|vmar| !Arc::ptr_eq(self, vmar));
        }
        Ok(())
    }
    
    fn destroy_internal(&self) -> ZxResult {
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
        for vmar in inner.children.drain(..) {
            vmar.destroy_internal()?;
        }
        for mapping in inner.mappings.drain(..) {
            drop(mapping);
        }
        *guard = None;
        Ok(())
    }
    
    pub fn clear(&self) -> ZxResult {
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
        for vmar in inner.children.drain(..) {
            vmar.destroy_internal()?;
        }
        inner.mappings.clear();
        Ok(())
    }
    
    pub fn table_phys(&self) -> PhysAddr {
        self.page_table.lock().table_phys()
    }
    
    pub fn addr(&self) -> usize {
        self.addr
    }
    
    pub fn is_dead(&self) -> bool {
        self.inner.lock().is_none()
    }
    
    pub fn is_alive(&self) -> bool {
        !self.is_dead()
    }
    
    fn determine_offset(
        &self,
        inner: &VmarInner,
        offset: Option<usize>,
        len: usize,
        align: usize,
    ) -> ZxResult<VirtAddr> {
        if !check_aligned(len, align) {
            Err(ZxError::INVALID_ARGS)
        } else if let Some(offset) = offset {
            if check_aligned(offset, align) && self.test_map(&inner, offset, len, align) {
                Ok(offset)
            } else {
                Err(ZxError::INVALID_ARGS)
            }
        } else if len > self.size {
            Err(ZxError::INVALID_ARGS)
        } else {
            match self.find_free_area(&inner, 0, len, align) {
                Some(offset) => Ok(offset),
                None => Err(ZxError::NO_MEMORY),
            }
        }
    }
    
    fn test_map(&self, inner: &VmarInner, offset: usize, len: usize, align: usize) -> bool {
        debug_assert!(check_aligned(offset, align));
        debug_assert!(check_aligned(len, align));
        let begin = self.addr + offset;
        let end = begin + len;
        if end > self.addr + self.size {
            return false;
        }
        
        if inner.children.iter().any(|vmar| vmar.overlap(begin, end)) {
            return false;
        }
        if inner.mappings.iter().any(|map| map.overlap(begin, end)) {
            return false;
        }
        true
    }
    
    fn find_free_area(
        &self,
        inner: &VmarInner,
        offset_hint: usize,
        len: usize,
        align: usize,
    ) -> Option<usize> {
        
        debug_assert!(check_aligned(offset_hint, align));
        debug_assert!(check_aligned(len, align));
        
        
        core::iter::once(offset_hint)
            .chain(inner.children.iter().map(|map| map.end_addr() - self.addr))
            .chain(inner.mappings.iter().map(|map| map.end_addr() - self.addr))
            .find(|&offset| self.test_map(inner, offset, len, align))
    }
    fn end_addr(&self) -> VirtAddr {
        self.addr + self.size
    }
    fn overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
        !(self.addr >= end || self.end_addr() <= begin)
    }
    fn within(&self, begin: VirtAddr, end: VirtAddr) -> bool {
        begin <= self.addr && self.end_addr() <= end
    }
    fn partial_overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
        self.overlap(begin, end) && !self.within(begin, end)
    }
    fn contains(&self, vaddr: VirtAddr) -> bool {
        self.addr <= vaddr && vaddr < self.end_addr()
    }
    
    pub fn get_info(&self) -> VmarInfo {
        VmarInfo {
            base: self.addr(),
            len: self.size,
        }
    }
    
    pub fn get_flags(&self) -> VmarFlags {
        self.flags
    }
    
    pub fn dump(&self) {
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().unwrap();
        for map in inner.mappings.iter() {
            debug!("{:x?}", map);
        }
        for child in inner.children.iter() {
            child.dump();
        }
    }
    
    pub fn vdso_base_addr(&self) -> Option<usize> {
        let guard = self.inner.lock();
        let inner = guard.as_ref().unwrap();
        for map in inner.mappings.iter() {
            if map.vmo.name().starts_with("vdso") && map.inner.lock().vmo_offset == 0x7000 {
                return Some(map.addr());
            }
        }
        for vmar in inner.children.iter() {
            if let Some(addr) = vmar.vdso_base_addr() {
                return Some(addr);
            }
        }
        None
    }
    
    
    
    pub fn handle_page_fault(&self, vaddr: VirtAddr, flags: MMUFlags) -> ZxResult {
        let guard = self.inner.lock();
        let inner = guard.as_ref().unwrap();
        if !self.contains(vaddr) {
            return Err(ZxError::NOT_FOUND);
        }
        if let Some(child) = inner.children.iter().find(|ch| ch.contains(vaddr)) {
            return child.handle_page_fault(vaddr, flags);
        }
        if let Some(mapping) = inner.mappings.iter().find(|map| map.contains(vaddr)) {
            return mapping.handle_page_fault(vaddr, flags);
        }
        Err(ZxError::NOT_FOUND)
    }
    fn for_each_mapping(&self, f: &mut impl FnMut(&Arc<VmMapping>)) {
        let guard = self.inner.lock();
        let inner = guard.as_ref().unwrap();
        for map in inner.mappings.iter() {
            f(map);
        }
        for child in inner.children.iter() {
            child.for_each_mapping(f);
        }
    }
    
    pub fn fork_from(&self, src: &Arc<Self>) -> ZxResult {
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().unwrap();
        inner.fork_from(src, &self.page_table)
    }
    
    pub fn get_task_stats(&self) -> TaskStatsInfo {
        let mut task_stats = TaskStatsInfo::default();
        self.for_each_mapping(&mut |map| map.fill_in_task_status(&mut task_stats));
        task_stats
    }
    
    
    
    pub fn read_memory(&self, vaddr: usize, buf: &mut [u8]) -> ZxResult<usize> {
        
        let map = self.find_mapping(vaddr).ok_or(ZxError::NO_MEMORY)?;
        let map_inner = map.inner.lock();
        let vmo_offset = vaddr - map_inner.addr + map_inner.vmo_offset;
        let size_limit = map_inner.addr + map_inner.size - vaddr;
        let actual_size = buf.len().min(size_limit);
        map.vmo.read(vmo_offset, &mut buf[0..actual_size])?;
        Ok(actual_size)
    }
    
    
    
    pub fn write_memory(&self, vaddr: usize, buf: &[u8]) -> ZxResult<usize> {
        
        let map = self.find_mapping(vaddr).ok_or(ZxError::NO_MEMORY)?;
        let map_inner = map.inner.lock();
        let vmo_offset = vaddr - map_inner.addr + map_inner.vmo_offset;
        let size_limit = map_inner.addr + map_inner.size - vaddr;
        let actual_size = buf.len().min(size_limit);
        map.vmo.write(vmo_offset, &buf[0..actual_size])?;
        Ok(actual_size)
    }
    
    pub fn find_mapping(&self, vaddr: usize) -> Option<Arc<VmMapping>> {
        let guard = self.inner.lock();
        let inner = guard.as_ref().unwrap();
        if let Some(mapping) = inner.mappings.iter().find(|map| map.contains(vaddr)) {
            return Some(mapping.clone());
        }
        if let Some(child) = inner.children.iter().find(|ch| ch.contains(vaddr)) {
            return child.find_mapping(vaddr);
        }
        None
    }
    #[cfg(test)]
    fn count(&self) -> usize {
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().unwrap();
        inner.mappings.len() + inner.children.len()
    }
    #[cfg(test)]
    fn used_size(&self) -> usize {
        let mut guard = self.inner.lock();
        let inner = guard.as_mut().unwrap();
        let map_size: usize = inner.mappings.iter().map(|map| map.size()).sum();
        let vmar_size: usize = inner.children.iter().map(|vmar| vmar.size).sum();
        map_size + vmar_size
    }
}
impl VmarInner {
    
    fn fork_from(
        &mut self,
        src: &Arc<VmAddressRegion>,
        page_table: &Arc<Mutex<dyn PageTableTrait>>,
    ) -> ZxResult {
        let src_guard = src.inner.lock();
        let src_inner = src_guard.as_ref().unwrap();
        for child in src_inner.children.iter() {
            self.fork_from(child, page_table)?;
        }
        for map in src_inner.mappings.iter() {
            let mapping = map.clone_map(page_table.clone())?;
            mapping.map()?;
            self.mappings.push(mapping);
        }
        Ok(())
    }
}
#[repr(C)]
#[derive(Debug)]
pub struct VmarInfo {
    base: usize,
    len: usize,
}
pub struct VmMapping {
    
    permissions: MMUFlags,
    vmo: Arc<VmObject>,
    page_table: Arc<Mutex<dyn PageTableTrait>>,
    inner: Mutex<VmMappingInner>,
}
#[derive(Debug, Clone)]
struct VmMappingInner {
    
    flags: Vec<MMUFlags>,
    addr: VirtAddr,
    size: usize,
    vmo_offset: usize,
}
#[repr(C)]
#[derive(Default)]
pub struct TaskStatsInfo {
    mapped_bytes: u64,
    private_bytes: u64,
    shared_bytes: u64,
    scaled_shared_bytes: u64,
}
impl core::fmt::Debug for VmMapping {
    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
        let inner = self.inner.lock();
        f.debug_struct("VmMapping")
            .field("addr", &inner.addr)
            .field("size", &inner.size)
            .field("permissions", &self.permissions)
            .field("flags", &inner.flags)
            .field("vmo_id", &self.vmo.id())
            .field("vmo_offset", &inner.vmo_offset)
            .finish()
    }
}
impl VmMapping {
    fn new(
        addr: VirtAddr,
        size: usize,
        vmo: Arc<VmObject>,
        vmo_offset: usize,
        permissions: MMUFlags,
        flags: MMUFlags,
        page_table: Arc<Mutex<dyn PageTableTrait>>,
    ) -> Arc<Self> {
        let mapping = Arc::new(VmMapping {
            inner: Mutex::new(VmMappingInner {
                flags: vec![flags; pages(size)],
                addr,
                size,
                vmo_offset,
            }),
            permissions,
            page_table,
            vmo: vmo.clone(),
        });
        vmo.append_mapping(Arc::downgrade(&mapping));
        mapping
    }
    
    
    
    
    fn map(self: &Arc<Self>) -> ZxResult {
        self.vmo.commit_pages_with(&mut |commit| {
            let inner = self.inner.lock();
            let mut page_table = self.page_table.lock();
            let page_num = inner.size / PAGE_SIZE;
            let vmo_offset = inner.vmo_offset / PAGE_SIZE;
            for i in 0..page_num {
                let paddr = commit(vmo_offset + i, inner.flags[i])?;
                page_table
                    .map(inner.addr + i * PAGE_SIZE, paddr, inner.flags[i])
                    .expect("failed to map");
            }
            Ok(())
        })
    }
    fn unmap(&self) {
        let inner = self.inner.lock();
        let pages = inner.size / PAGE_SIZE;
        
        self.page_table
            .lock()
            .unmap_cont(inner.addr, pages)
            .expect("failed to unmap")
    }
    fn fill_in_task_status(&self, task_stats: &mut TaskStatsInfo) {
        let (start_idx, end_idx) = {
            let inner = self.inner.lock();
            let start_idx = inner.vmo_offset / PAGE_SIZE;
            (start_idx, start_idx + inner.size / PAGE_SIZE)
        };
        task_stats.mapped_bytes += self.vmo.len() as u64;
        let committed_pages = self.vmo.committed_pages_in_range(start_idx, end_idx);
        let share_count = self.vmo.share_count();
        if share_count == 1 {
            task_stats.private_bytes += (committed_pages * PAGE_SIZE) as u64;
        } else {
            task_stats.shared_bytes += (committed_pages * PAGE_SIZE) as u64;
            task_stats.scaled_shared_bytes += (committed_pages * PAGE_SIZE / share_count) as u64;
        }
    }
    
    
    
    fn cut(&self, begin: VirtAddr, end: VirtAddr) -> Option<Arc<Self>> {
        if !self.overlap(begin, end) {
            return None;
        }
        let mut inner = self.inner.lock();
        let mut page_table = self.page_table.lock();
        if inner.addr >= begin && inner.end_addr() <= end {
            
            page_table
                .unmap_cont(inner.addr, pages(inner.size))
                .expect("failed to unmap");
            inner.size = 0;
            inner.flags.clear();
            None
        } else if inner.addr >= begin && inner.addr < end {
            
            let cut_len = end - inner.addr;
            page_table
                .unmap_cont(inner.addr, pages(cut_len))
                .expect("failed to unmap");
            inner.addr = end;
            inner.size -= cut_len;
            inner.vmo_offset += cut_len;
            inner.flags.drain(0..pages(cut_len));
            None
        } else if inner.end_addr() <= end && inner.end_addr() > begin {
            
            let cut_len = inner.end_addr() - begin;
            let new_len = begin - inner.addr;
            page_table
                .unmap_cont(begin, pages(cut_len))
                .expect("failed to unmap");
            inner.size = new_len;
            inner.flags.truncate(new_len);
            None
        } else {
            
            let cut_len = end - begin;
            let new_len1 = begin - inner.addr;
            let new_len2 = inner.end_addr() - end;
            page_table
                .unmap_cont(begin, pages(cut_len))
                .expect("failed to unmap");
            let new_flags_range = (pages(inner.size) - pages(new_len2))..pages(inner.size);
            let new_mapping = Arc::new(VmMapping {
                permissions: self.permissions,
                vmo: self.vmo.clone(),
                page_table: self.page_table.clone(),
                inner: Mutex::new(VmMappingInner {
                    flags: inner.flags.drain(new_flags_range).collect(),
                    addr: end,
                    size: new_len2,
                    vmo_offset: inner.vmo_offset + (end - inner.addr),
                }),
            });
            inner.size = new_len1;
            inner.flags.truncate(new_len1);
            Some(new_mapping)
        }
    }
    fn overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
        let inner = self.inner.lock();
        !(inner.addr >= end || inner.end_addr() <= begin)
    }
    fn contains(&self, vaddr: VirtAddr) -> bool {
        let inner = self.inner.lock();
        inner.addr <= vaddr && vaddr < inner.end_addr()
    }
    fn is_valid_mapping_flags(&self, flags: MMUFlags) -> bool {
        self.permissions.contains(flags & MMUFlags::RXW)
    }
    fn protect(&self, flags: MMUFlags, start_index: usize, end_index: usize) {
        let mut inner = self.inner.lock();
        let mut pg_table = self.page_table.lock();
        for i in start_index..end_index {
            let mut new_flags = inner.flags[i];
            new_flags.remove(MMUFlags::RXW);
            new_flags.insert(flags & MMUFlags::RXW);
            inner.flags[i] = new_flags;
            pg_table
                .protect(inner.addr + i * PAGE_SIZE, new_flags)
                .unwrap();
        }
    }
    fn size(&self) -> usize {
        self.inner.lock().size
    }
    fn addr(&self) -> VirtAddr {
        self.inner.lock().addr
    }
    fn end_addr(&self) -> VirtAddr {
        self.inner.lock().end_addr()
    }
    
    pub fn get_flags(&self, vaddr: usize) -> ZxResult<MMUFlags> {
        if self.contains(vaddr) {
            let page_id = (vaddr - self.addr()) / PAGE_SIZE;
            Ok(self.inner.lock().flags[page_id])
        } else {
            Err(ZxError::NO_MEMORY)
        }
    }
    
    pub(super) fn range_change(&self, offset: usize, len: usize, op: RangeChangeOp) {
        let inner = self.inner.try_lock();
        
        
        if let Some(inner) = inner {
            let start = offset.max(inner.vmo_offset);
            let end = (inner.vmo_offset + inner.size / PAGE_SIZE).min(offset + len);
            if !(start..end).is_empty() {
                let mut pg_table = self.page_table.lock();
                for i in (start - inner.vmo_offset)..(end - inner.vmo_offset) {
                    match op {
                        RangeChangeOp::RemoveWrite => {
                            let mut new_flag = inner.flags[i];
                            new_flag.remove(MMUFlags::WRITE);
                            pg_table
                                .protect(inner.addr + i * PAGE_SIZE, new_flag)
                                .unwrap()
                        }
                        RangeChangeOp::Unmap => pg_table.unmap(inner.addr + i * PAGE_SIZE).unwrap(),
                    }
                }
            }
        }
    }
    
    pub(crate) fn handle_page_fault(&self, vaddr: VirtAddr, access_flags: MMUFlags) -> ZxResult {
        let vaddr = round_down_pages(vaddr);
        let page_idx = (vaddr - self.addr()) / PAGE_SIZE;
        let mut flags = self.inner.lock().flags[page_idx];
        if !flags.contains(access_flags) {
            return Err(ZxError::ACCESS_DENIED);
        }
        if !access_flags.contains(MMUFlags::WRITE) {
            flags.remove(MMUFlags::WRITE)
        }
        let paddr = self.vmo.commit_page(page_idx, access_flags)?;
        let mut pg_table = self.page_table.lock();
        pg_table.unmap(vaddr).unwrap();
        pg_table
            .map(vaddr, paddr, flags)
            .map_err(|_| ZxError::ACCESS_DENIED)?;
        Ok(())
    }
    
    fn clone_map(&self, page_table: Arc<Mutex<dyn PageTableTrait>>) -> ZxResult<Arc<Self>> {
        let new_vmo = self.vmo.create_child(false, 0, self.vmo.len())?;
        let mapping = Arc::new(VmMapping {
            inner: Mutex::new(self.inner.lock().clone()),
            permissions: self.permissions,
            page_table,
            vmo: new_vmo.clone(),
        });
        new_vmo.append_mapping(Arc::downgrade(&mapping));
        Ok(mapping)
    }
}
impl VmMappingInner {
    fn end_addr(&self) -> VirtAddr {
        self.addr + self.size
    }
}
impl Drop for VmMapping {
    fn drop(&mut self) {
        self.unmap();
    }
}
pub const KERNEL_ASPACE_BASE: u64 = 0xffff_ff02_0000_0000;
pub const KERNEL_ASPACE_SIZE: u64 = 0x0000_0080_0000_0000;
pub const USER_ASPACE_BASE: u64 = 0x0000_0000_0100_0000;
pub const USER_ASPACE_SIZE: u64 = (1u64 << 47) - 4096 - USER_ASPACE_BASE;
#[cfg(test)]
mod tests {
    use super::*;
    #[test]
    fn create_child() {
        let root_vmar = VmAddressRegion::new_root();
        let child = root_vmar
            .allocate_at(0, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
            .expect("failed to create child VMAR");
        
        assert_eq!(
            root_vmar
                .allocate_at(0x2001, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .err(),
            Some(ZxError::INVALID_ARGS)
        );
        assert_eq!(
            root_vmar
                .allocate_at(0x2000, 1, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .err(),
            Some(ZxError::INVALID_ARGS)
        );
        assert_eq!(
            root_vmar
                .allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .err(),
            Some(ZxError::INVALID_ARGS)
        );
        assert_eq!(
            child
                .allocate_at(0x1000, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .err(),
            Some(ZxError::INVALID_ARGS)
        );
    }
    
    const MAGIC: usize = 0xdead_beaf;
    #[test]
    #[allow(unsafe_code)]
    fn map() {
        let vmar = VmAddressRegion::new_root();
        let vmo = VmObject::new_paged(4);
        let flags = MMUFlags::READ | MMUFlags::WRITE;
        
        assert_eq!(
            vmar.map_at(0, vmo.clone(), 0x4000, 0x1000, flags),
            Err(ZxError::INVALID_ARGS)
        );
        assert_eq!(
            vmar.map_at(0, vmo.clone(), 0, 0x5000, flags),
            Err(ZxError::INVALID_ARGS)
        );
        assert_eq!(
            vmar.map_at(0, vmo.clone(), 0x1000, 1, flags),
            Err(ZxError::INVALID_ARGS)
        );
        assert_eq!(
            vmar.map_at(0, vmo.clone(), 1, 0x1000, flags),
            Err(ZxError::INVALID_ARGS)
        );
        vmar.map_at(0, vmo.clone(), 0, 0x4000, flags).unwrap();
        vmar.map_at(0x12000, vmo.clone(), 0x2000, 0x1000, flags)
            .unwrap();
        unsafe {
            ((vmar.addr() + 0x2000) as *mut usize).write(MAGIC);
            assert_eq!(((vmar.addr() + 0x12000) as *const usize).read(), MAGIC);
        }
    }
    
    
    
    
    
    
    
    
    
    struct Sample {
        root: Arc<VmAddressRegion>,
        child1: Arc<VmAddressRegion>,
        child2: Arc<VmAddressRegion>,
        grandson1: Arc<VmAddressRegion>,
        grandson2: Arc<VmAddressRegion>,
    }
    impl Sample {
        fn new() -> Self {
            let root = VmAddressRegion::new_root();
            let child1 = root
                .allocate_at(0, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .unwrap();
            let child2 = root
                .allocate_at(0x2000, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .unwrap();
            let grandson1 = child1
                .allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .unwrap();
            let grandson2 = child1
                .allocate_at(0x1000, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
                .unwrap();
            Sample {
                root,
                child1,
                child2,
                grandson1,
                grandson2,
            }
        }
    }
    #[test]
    fn unmap_vmar() {
        let s = Sample::new();
        let base = s.root.addr();
        s.child1.unmap(base, 0x1000).unwrap();
        assert!(s.grandson1.is_dead());
        assert!(s.grandson2.is_alive());
        
        let s = Sample::new();
        let base = s.root.addr();
        assert_eq!(
            s.root.unmap(base + 0x1000, 0x2000),
            Err(ZxError::INVALID_ARGS)
        );
        
        let s = Sample::new();
        let base = s.root.addr();
        s.child1.unmap(base + 0x8000, 0x1000).unwrap();
    }
    #[test]
    fn destroy() {
        let s = Sample::new();
        s.child1.destroy().unwrap();
        assert!(s.child1.is_dead());
        assert!(s.grandson1.is_dead());
        assert!(s.grandson2.is_dead());
        assert!(s.child2.is_alive());
        
        assert!(s
            .root
            .allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
            .is_ok());
    }
    #[test]
    fn unmap_mapping() {
        
        
        
        
        
        let vmar = VmAddressRegion::new_root();
        let base = vmar.addr();
        let vmo = VmObject::new_paged(5);
        let flags = MMUFlags::READ | MMUFlags::WRITE;
        vmar.map_at(0, vmo, 0, 0x5000, flags).unwrap();
        assert_eq!(vmar.count(), 1);
        assert_eq!(vmar.used_size(), 0x5000);
        
        vmar.unmap(base + 0x5000, 0x1000).unwrap();
        assert_eq!(vmar.count(), 1);
        assert_eq!(vmar.used_size(), 0x5000);
        
        vmar.unmap(base + 0x3000, 0x1000).unwrap();
        assert_eq!(vmar.count(), 2);
        assert_eq!(vmar.used_size(), 0x4000);
        
        vmar.unmap(base, 0x1000).unwrap();
        assert_eq!(vmar.count(), 2);
        assert_eq!(vmar.used_size(), 0x3000);
        
        vmar.unmap(base + 0x2000, 0x1000).unwrap();
        assert_eq!(vmar.count(), 2);
        assert_eq!(vmar.used_size(), 0x2000);
        
        vmar.unmap(base + 0x1000, 0x1000).unwrap();
        assert_eq!(vmar.count(), 1);
        assert_eq!(vmar.used_size(), 0x1000);
    }
    #[test]
    #[allow(unsafe_code)]
    fn copy_on_write_update_mapping() {
        let vmar = VmAddressRegion::new_root();
        let vmo = VmObject::new_paged(1);
        vmo.test_write(0, 1);
        vmar.map_at(0, vmo.clone(), 0, PAGE_SIZE, MMUFlags::RXW)
            .unwrap();
        let child_vmo = vmo.create_child(false, 0, 1 * PAGE_SIZE).unwrap();
        
        assert_eq!(
            vmo.commit_page(0, MMUFlags::READ),
            child_vmo.commit_page(0, MMUFlags::READ)
        );
        assert_eq!(vmo.test_read(0), 1);
        assert_eq!(child_vmo.test_read(0), 1);
        unsafe {
            assert_eq!((vmar.addr() as *const u8).read(), 1);
        }
        vmo.test_write(0, 2);
        
        assert_ne!(
            vmo.commit_page(0, MMUFlags::READ),
            child_vmo.commit_page(0, MMUFlags::READ)
        );
        assert_eq!(vmo.test_read(0), 2);
        assert_eq!(child_vmo.test_read(0), 1);
        
        
        
        vmar.handle_page_fault(vmar.addr(), MMUFlags::READ).unwrap();
        unsafe {
            assert_eq!((vmar.addr() as *const u8).read(), 2);
        }
    }
}