1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
use {
super::*,
crate::object::*,
crate::vm::*,
alloc::{
sync::{Arc, Weak},
vec,
vec::Vec,
},
};
pub struct PinnedMemoryToken {
base: KObjectBase,
bti: Weak<BusTransactionInitiator>,
vmo: Arc<VmObject>,
offset: usize,
size: usize,
mapped_addrs: Vec<DevVAddr>,
}
impl_kobject!(PinnedMemoryToken);
impl Drop for PinnedMemoryToken {
fn drop(&mut self) {
if self.vmo.is_paged() {
self.vmo.unpin(self.offset, self.size).unwrap();
}
}
}
impl PinnedMemoryToken {
pub(crate) fn create(
bti: &Arc<BusTransactionInitiator>,
vmo: Arc<VmObject>,
perms: IommuPerms,
offset: usize,
size: usize,
) -> ZxResult<Arc<Self>> {
if vmo.is_paged() {
vmo.commit(offset, size)?;
vmo.pin(offset, size)?;
}
let mapped_addrs = Self::map_into_iommu(&bti.iommu(), vmo.clone(), offset, size, perms)?;
Ok(Arc::new(PinnedMemoryToken {
base: KObjectBase::new(),
bti: Arc::downgrade(bti),
vmo,
offset,
size,
mapped_addrs,
}))
}
fn map_into_iommu(
iommu: &Arc<Iommu>,
vmo: Arc<VmObject>,
offset: usize,
size: usize,
perms: IommuPerms,
) -> ZxResult<Vec<DevVAddr>> {
if vmo.is_contiguous() {
let (vaddr, _mapped_len) = iommu.map_contiguous(vmo, offset, size, perms)?;
Ok(vec![vaddr])
} else {
assert_eq!(size % iommu.minimum_contiguity(), 0);
let mut mapped_addrs: Vec<DevVAddr> = Vec::new();
let mut remaining = size;
let mut cur_offset = offset;
while remaining > 0 {
let (mut vaddr, mapped_len) =
iommu.map(vmo.clone(), cur_offset, remaining, perms)?;
assert_eq!(mapped_len % iommu.minimum_contiguity(), 0);
for _ in 0..mapped_len / iommu.minimum_contiguity() {
mapped_addrs.push(vaddr);
vaddr += iommu.minimum_contiguity();
}
remaining -= mapped_len;
cur_offset += mapped_len;
}
Ok(mapped_addrs)
}
}
pub fn encode_addrs(
&self,
compress_results: bool,
contiguous: bool,
) -> ZxResult<Vec<DevVAddr>> {
let iommu = self.bti.upgrade().unwrap().iommu();
if compress_results {
if self.vmo.is_contiguous() {
let num_addrs = ceil(self.size, iommu.minimum_contiguity());
let min_contig = iommu.minimum_contiguity();
let base = self.mapped_addrs[0];
Ok((0..num_addrs).map(|i| base + min_contig * i).collect())
} else {
Ok(self.mapped_addrs.clone())
}
} else if contiguous {
if !self.vmo.is_contiguous() {
Err(ZxError::INVALID_ARGS)
} else {
Ok(vec![self.mapped_addrs[0]])
}
} else {
let min_contig = if self.vmo.is_contiguous() {
self.size
} else {
iommu.minimum_contiguity()
};
let num_pages = self.size / PAGE_SIZE;
let mut encoded_addrs: Vec<DevVAddr> = Vec::new();
for base in &self.mapped_addrs {
let mut addr = *base;
while addr < base + min_contig && encoded_addrs.len() < num_pages {
encoded_addrs.push(addr);
addr += PAGE_SIZE;
}
}
Ok(encoded_addrs)
}
}
pub fn unpin(&self) {
if let Some(bti) = self.bti.upgrade() {
bti.release_pmt(self.base.id);
}
}
}