use {
self::{paged::*, physical::*, slice::*},
super::*,
crate::object::*,
alloc::{
sync::{Arc, Weak},
vec::Vec,
},
bitflags::bitflags,
core::ops::Deref,
kernel_hal::CachePolicy,
spin::Mutex,
};
mod paged;
mod physical;
mod slice;
kcounter!(VMO_PAGE_ALLOC, "vmo.page_alloc");
kcounter!(VMO_PAGE_DEALLOC, "vmo.page_dealloc");
pub fn vmo_page_bytes() -> usize {
(VMO_PAGE_ALLOC.get() - VMO_PAGE_DEALLOC.get()) * PAGE_SIZE
}
#[allow(clippy::len_without_is_empty)]
pub trait VMObjectTrait: Sync + Send {
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult;
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult;
fn zero(&self, offset: usize, len: usize) -> ZxResult;
fn len(&self) -> usize;
fn set_len(&self, len: usize) -> ZxResult;
fn commit_page(&self, page_idx: usize, flags: MMUFlags) -> ZxResult<PhysAddr>;
fn commit_pages_with(
&self,
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
) -> ZxResult;
fn commit(&self, offset: usize, len: usize) -> ZxResult;
fn decommit(&self, offset: usize, len: usize) -> ZxResult;
fn create_child(&self, offset: usize, len: usize) -> ZxResult<Arc<dyn VMObjectTrait>>;
fn append_mapping(&self, _mapping: Weak<VmMapping>) {}
fn remove_mapping(&self, _mapping: Weak<VmMapping>) {}
fn complete_info(&self, info: &mut VmoInfo);
fn cache_policy(&self) -> CachePolicy;
fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult;
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize;
fn pin(&self, _offset: usize, _len: usize) -> ZxResult {
Err(ZxError::NOT_SUPPORTED)
}
fn unpin(&self, _offset: usize, _len: usize) -> ZxResult {
Err(ZxError::NOT_SUPPORTED)
}
fn is_contiguous(&self) -> bool {
false
}
fn is_paged(&self) -> bool {
false
}
}
pub struct VmObject {
base: KObjectBase,
_counter: CountHelper,
resizable: bool,
trait_: Arc<dyn VMObjectTrait>,
inner: Mutex<VmObjectInner>,
}
impl_kobject!(VmObject);
define_count_helper!(VmObject);
#[derive(Default)]
struct VmObjectInner {
parent: Weak<VmObject>,
children: Vec<Weak<VmObject>>,
mapping_count: usize,
content_size: usize,
}
impl VmObject {
pub fn new_paged(pages: usize) -> Arc<Self> {
Self::new_paged_with_resizable(false, pages)
}
pub fn new_paged_with_resizable(resizable: bool, pages: usize) -> Arc<Self> {
let base = KObjectBase::with_signal(Signal::VMO_ZERO_CHILDREN);
Arc::new(VmObject {
resizable,
_counter: CountHelper::new(),
trait_: VMObjectPaged::new(pages),
inner: Mutex::new(VmObjectInner::default()),
base,
})
}
pub fn new_physical(paddr: PhysAddr, pages: usize) -> Arc<Self> {
Arc::new(VmObject {
base: KObjectBase::with_signal(Signal::VMO_ZERO_CHILDREN),
resizable: false,
_counter: CountHelper::new(),
trait_: VMObjectPhysical::new(paddr, pages),
inner: Mutex::new(VmObjectInner::default()),
})
}
pub fn new_contiguous(pages: usize, align_log2: usize) -> ZxResult<Arc<Self>> {
let vmo = Arc::new(VmObject {
base: KObjectBase::with_signal(Signal::VMO_ZERO_CHILDREN),
resizable: false,
_counter: CountHelper::new(),
trait_: VMObjectPaged::new_contiguous(pages, align_log2)?,
inner: Mutex::new(VmObjectInner::default()),
});
Ok(vmo)
}
pub fn create_child(
self: &Arc<Self>,
resizable: bool,
offset: usize,
len: usize,
) -> ZxResult<Arc<Self>> {
let base = KObjectBase::with_signal(Signal::VMO_ZERO_CHILDREN);
base.set_name(&self.base.name());
let trait_ = self.trait_.create_child(offset, len)?;
let child = Arc::new(VmObject {
base,
resizable,
_counter: CountHelper::new(),
trait_,
inner: Mutex::new(VmObjectInner {
parent: Arc::downgrade(self),
..VmObjectInner::default()
}),
});
self.add_child(&child);
Ok(child)
}
pub fn create_slice(self: &Arc<Self>, offset: usize, p_size: usize) -> ZxResult<Arc<Self>> {
let size = roundup_pages(p_size);
if size < p_size || size > usize::MAX & !(32 * PAGE_SIZE) {
return Err(ZxError::OUT_OF_RANGE);
}
let parent_size = self.trait_.len();
if !page_aligned(offset) {
return Err(ZxError::INVALID_ARGS);
}
if offset > parent_size || size > parent_size - offset {
return Err(ZxError::INVALID_ARGS);
}
if self.resizable {
return Err(ZxError::NOT_SUPPORTED);
}
if self.trait_.cache_policy() != CachePolicy::Cached && !self.trait_.is_contiguous() {
return Err(ZxError::BAD_STATE);
}
let child = Arc::new(VmObject {
base: KObjectBase::with(&self.base.name(), Signal::VMO_ZERO_CHILDREN),
resizable: false,
_counter: CountHelper::new(),
trait_: VMObjectSlice::new(self.trait_.clone(), offset, size),
inner: Mutex::new(VmObjectInner {
parent: Arc::downgrade(self),
..VmObjectInner::default()
}),
});
self.add_child(&child);
Ok(child)
}
fn add_child(&self, child: &Arc<VmObject>) {
let mut inner = self.inner.lock();
inner.children.retain(|x| x.strong_count() != 0);
inner.children.push(Arc::downgrade(child));
if inner.children.len() == 1 {
self.base.signal_clear(Signal::VMO_ZERO_CHILDREN);
}
}
pub fn set_len(&self, len: usize) -> ZxResult {
let size = roundup_pages(len);
if size < len {
return Err(ZxError::OUT_OF_RANGE);
}
if !self.resizable {
return Err(ZxError::UNAVAILABLE);
}
self.trait_.set_len(size)
}
pub fn set_content_size_and_resize(
&self,
size: usize,
zero_until_offset: usize,
) -> ZxResult<usize> {
let mut inner = self.inner.lock();
let content_size = inner.content_size;
let len = self.trait_.len();
if size < content_size {
return Ok(content_size);
}
let required_len = roundup_pages(size);
let new_content_size = if required_len > len && self.set_len(required_len).is_err() {
len
} else {
size
};
let zero_until_offset = zero_until_offset.min(new_content_size);
if zero_until_offset > content_size {
self.trait_
.zero(content_size, zero_until_offset - content_size)?;
}
inner.content_size = new_content_size;
Ok(new_content_size)
}
pub fn content_size(&self) -> usize {
let inner = self.inner.lock();
inner.content_size
}
pub fn set_content_size(&self, size: usize) -> ZxResult {
let mut inner = self.inner.lock();
inner.content_size = size;
Ok(())
}
pub fn get_info(&self) -> VmoInfo {
let inner = self.inner.lock();
let mut ret = VmoInfo {
koid: self.base.id,
name: {
let mut arr = [0u8; 32];
let name = self.base.name();
let length = name.len().min(32);
arr[..length].copy_from_slice(&name.as_bytes()[..length]);
arr
},
size: self.trait_.len() as u64,
parent_koid: inner.parent.upgrade().map(|p| p.id()).unwrap_or(0),
num_children: inner.children.len() as u64,
flags: if self.resizable {
VmoInfoFlags::RESIZABLE
} else {
VmoInfoFlags::empty()
},
cache_policy: self.trait_.cache_policy() as u32,
share_count: inner.mapping_count as u64,
..Default::default()
};
self.trait_.complete_info(&mut ret);
ret
}
pub fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult {
let inner = self.inner.lock();
if !inner.children.is_empty() {
return Err(ZxError::BAD_STATE);
}
if inner.mapping_count != 0 {
return Err(ZxError::BAD_STATE);
}
self.trait_.set_cache_policy(policy)
}
pub fn append_mapping(&self, mapping: Weak<VmMapping>) {
self.inner.lock().mapping_count += 1;
self.trait_.append_mapping(mapping);
}
pub fn remove_mapping(&self, mapping: Weak<VmMapping>) {
self.inner.lock().mapping_count -= 1;
self.trait_.remove_mapping(mapping);
}
pub fn share_count(&self) -> usize {
let inner = self.inner.lock();
inner.mapping_count
}
pub fn is_resizable(&self) -> bool {
self.resizable
}
pub fn is_contiguous(&self) -> bool {
self.trait_.is_contiguous()
}
}
impl Deref for VmObject {
type Target = Arc<dyn VMObjectTrait>;
fn deref(&self) -> &Self::Target {
&self.trait_
}
}
impl Drop for VmObject {
fn drop(&mut self) {
let mut inner = self.inner.lock();
let parent = match inner.parent.upgrade() {
Some(parent) => parent,
None => return,
};
for child in inner.children.iter() {
if let Some(child) = child.upgrade() {
child.inner.lock().parent = Arc::downgrade(&parent);
}
}
let mut parent_inner = parent.inner.lock();
let children = &mut parent_inner.children;
children.append(&mut inner.children);
children.retain(|c| c.strong_count() != 0);
for child in children.iter() {
let child = child.upgrade().unwrap();
let mut inner = child.inner.lock();
inner.children.retain(|c| c.strong_count() != 0);
if inner.children.is_empty() {
child.base.signal_set(Signal::VMO_ZERO_CHILDREN);
}
}
if children.is_empty() {
parent.base.signal_set(Signal::VMO_ZERO_CHILDREN);
}
}
}
#[repr(C)]
#[derive(Default)]
pub struct VmoInfo {
koid: KoID,
name: [u8; 32],
size: u64,
parent_koid: KoID,
num_children: u64,
num_mappings: u64,
share_count: u64,
pub flags: VmoInfoFlags,
padding1: [u8; 4],
committed_bytes: u64,
pub rights: Rights,
cache_policy: u32,
}
bitflags! {
#[derive(Default)]
pub struct VmoInfoFlags: u32 {
const TYPE_PHYSICAL = 0;
#[allow(clippy::identity_op)]
const TYPE_PAGED = 1 << 0;
const RESIZABLE = 1 << 1;
const IS_COW_CLONE = 1 << 2;
const VIA_HANDLE = 1 << 3;
const VIA_MAPPING = 1 << 4;
const PAGER_BACKED = 1 << 5;
const CONTIGUOUS = 1 << 6;
}
}
#[allow(dead_code)]
#[derive(PartialEq, Eq, Clone, Copy)]
pub(super) enum RangeChangeOp {
Unmap,
RemoveWrite,
}
#[cfg(test)]
mod tests {
use super::*;
pub fn read_write(vmo: &VmObject) {
let mut buf = [0u8; 4];
vmo.write(0, &[0, 1, 2, 3]).unwrap();
vmo.read(0, &mut buf).unwrap();
assert_eq!(&buf, &[0, 1, 2, 3]);
}
}