vm_memory/guest_memory.rs
1// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2//
3// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4//
5// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6// Use of this source code is governed by a BSD-style license that can be
7// found in the LICENSE-BSD-3-Clause file.
8//
9// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10
11//! Traits to track and access the physical memory of the guest.
12//!
13//! To make the abstraction as generic as possible, all the core traits declared here only define
14//! methods to access guest's memory, and never define methods to manage (create, delete, insert,
15//! remove etc) guest's memory. This way, the guest memory consumers (virtio device drivers,
16//! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically
17//! a hypervisor).
18//!
19//! Traits and Structs
20//! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA).
21//! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a
22//! region.
23//! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's
24//! physical memory.
25//! - [`GuestMemory`](trait.GuestMemory.html): represent a collection of `GuestMemoryRegion`
26//! objects.
27//! The main responsibilities of the `GuestMemory` trait are:
28//! - hide the detail of accessing guest's physical address.
29//! - map a request address to a `GuestMemoryRegion` object and relay the request to it.
30//! - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
31//!
32//! Whenever a collection of `GuestMemoryRegion` objects is mutable,
33//! [`GuestAddressSpace`](trait.GuestAddressSpace.html) should be implemented
34//! for clients to obtain a [`GuestMemory`] reference or smart pointer.
35//!
36//! The `GuestMemoryRegion` trait has an associated `B: Bitmap` type which is used to handle
37//! dirty bitmap tracking. Backends are free to define the granularity (or whether tracking is
38//! actually performed at all). Those that do implement tracking functionality are expected to
39//! ensure the correctness of the underlying `Bytes` implementation. The user has to explicitly
40//! record (using the handle returned by `GuestRegionMmap::bitmap`) write accesses performed
41//! via pointers, references, or slices returned by methods of `GuestMemory`,`GuestMemoryRegion`,
42//! `VolatileSlice`, `VolatileRef`, or `VolatileArrayRef`.
43
44use std::convert::From;
45use std::fmt::{self, Display};
46use std::fs::File;
47use std::io::{self, Read, Write};
48use std::ops::{BitAnd, BitOr, Deref};
49use std::rc::Rc;
50use std::sync::atomic::Ordering;
51use std::sync::Arc;
52
53use crate::address::{Address, AddressValue};
54use crate::bitmap::{Bitmap, BS, MS};
55use crate::bytes::{AtomicAccess, Bytes};
56use crate::volatile_memory::{self, VolatileSlice};
57
58static MAX_ACCESS_CHUNK: usize = 4096;
59
60/// Errors associated with handling guest memory accesses.
61#[allow(missing_docs)]
62#[derive(Debug)]
63pub enum Error {
64 /// Failure in finding a guest address in any memory regions mapped by this guest.
65 InvalidGuestAddress(GuestAddress),
66 /// Couldn't read/write from the given source.
67 IOError(io::Error),
68 /// Incomplete read or write.
69 PartialBuffer { expected: usize, completed: usize },
70 /// Requested backend address is out of range.
71 InvalidBackendAddress,
72 /// Host virtual address not available.
73 HostAddressNotAvailable,
74}
75
76impl From<volatile_memory::Error> for Error {
77 fn from(e: volatile_memory::Error) -> Self {
78 match e {
79 volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
80 volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
81 volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress,
82 volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress,
83 volatile_memory::Error::IOError(e) => Error::IOError(e),
84 volatile_memory::Error::PartialBuffer {
85 expected,
86 completed,
87 } => Error::PartialBuffer {
88 expected,
89 completed,
90 },
91 }
92 }
93}
94
95/// Result of guest memory operations.
96pub type Result<T> = std::result::Result<T, Error>;
97
98impl std::error::Error for Error {}
99
100impl Display for Error {
101 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
102 write!(f, "Guest memory error: ")?;
103 match self {
104 Error::InvalidGuestAddress(addr) => {
105 write!(f, "invalid guest address {}", addr.raw_value())
106 }
107 Error::IOError(error) => write!(f, "{}", error),
108 Error::PartialBuffer {
109 expected,
110 completed,
111 } => write!(
112 f,
113 "only used {} bytes in {} long buffer",
114 completed, expected,
115 ),
116 Error::InvalidBackendAddress => write!(f, "invalid backend address"),
117 Error::HostAddressNotAvailable => write!(f, "host virtual address not available"),
118 }
119 }
120}
121
122/// Represents a guest physical address (GPA).
123///
124/// # Notes:
125/// On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity,
126/// `u64` is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual
127/// machine.
128#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
129pub struct GuestAddress(pub u64);
130impl_address_ops!(GuestAddress, u64);
131
132/// Represents an offset inside a region.
133#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
134pub struct MemoryRegionAddress(pub u64);
135impl_address_ops!(MemoryRegionAddress, u64);
136
137/// Type of the raw value stored in a `GuestAddress` object.
138pub type GuestUsize = <GuestAddress as AddressValue>::V;
139
140/// Represents the start point within a `File` that backs a `GuestMemoryRegion`.
141#[derive(Clone, Debug)]
142pub struct FileOffset {
143 file: Arc<File>,
144 start: u64,
145}
146
147impl FileOffset {
148 /// Creates a new `FileOffset` object.
149 pub fn new(file: File, start: u64) -> Self {
150 FileOffset::from_arc(Arc::new(file), start)
151 }
152
153 /// Creates a new `FileOffset` object based on an exiting `Arc<File>`.
154 pub fn from_arc(file: Arc<File>, start: u64) -> Self {
155 FileOffset { file, start }
156 }
157
158 /// Returns a reference to the inner `File` object.
159 pub fn file(&self) -> &File {
160 self.file.as_ref()
161 }
162
163 /// Return a reference to the inner `Arc<File>` object.
164 pub fn arc(&self) -> &Arc<File> {
165 &self.file
166 }
167
168 /// Returns the start offset within the file.
169 pub fn start(&self) -> u64 {
170 self.start
171 }
172}
173
174/// Represents a continuous region of guest physical memory.
175#[allow(clippy::len_without_is_empty)]
176pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
177 /// Type used for dirty memory tracking.
178 type B: Bitmap;
179
180 /// Returns the size of the region.
181 fn len(&self) -> GuestUsize;
182
183 /// Returns the minimum (inclusive) address managed by the region.
184 fn start_addr(&self) -> GuestAddress;
185
186 /// Returns the maximum (inclusive) address managed by the region.
187 fn last_addr(&self) -> GuestAddress {
188 // unchecked_add is safe as the region bounds were checked when it was created.
189 self.start_addr().unchecked_add(self.len() - 1)
190 }
191
192 /// Borrow the associated `Bitmap` object.
193 fn bitmap(&self) -> &Self::B;
194
195 /// Returns the given address if it is within this region.
196 fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
197 if self.address_in_range(addr) {
198 Some(addr)
199 } else {
200 None
201 }
202 }
203
204 /// Returns `true` if the given address is within this region.
205 fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
206 addr.raw_value() < self.len()
207 }
208
209 /// Returns the address plus the offset if it is in this region.
210 fn checked_offset(
211 &self,
212 base: MemoryRegionAddress,
213 offset: usize,
214 ) -> Option<MemoryRegionAddress> {
215 base.checked_add(offset as u64)
216 .and_then(|addr| self.check_address(addr))
217 }
218
219 /// Tries to convert an absolute address to a relative address within this region.
220 ///
221 /// Returns `None` if `addr` is out of the bounds of this region.
222 fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
223 addr.checked_offset_from(self.start_addr())
224 .and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
225 }
226
227 /// Returns the host virtual address corresponding to the region address.
228 ///
229 /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
230 /// have the capability to mmap guest address range into host virtual address space for
231 /// direct access, so the corresponding host virtual address may be passed to other subsystems.
232 ///
233 /// # Note
234 /// The underlying guest memory is not protected from memory aliasing, which breaks the
235 /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
236 /// concurrent accesses to the underlying guest memory.
237 fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
238 Err(Error::HostAddressNotAvailable)
239 }
240
241 /// Returns information regarding the file and offset backing this memory region.
242 fn file_offset(&self) -> Option<&FileOffset> {
243 None
244 }
245
246 /// Returns a slice corresponding to the data in the region.
247 ///
248 /// Returns `None` if the region does not support slice-based access.
249 ///
250 /// # Safety
251 ///
252 /// Unsafe because of possible aliasing.
253 unsafe fn as_slice(&self) -> Option<&[u8]> {
254 None
255 }
256
257 /// Returns a mutable slice corresponding to the data in the region.
258 ///
259 /// Returns `None` if the region does not support slice-based access.
260 ///
261 /// # Safety
262 ///
263 /// Unsafe because of possible aliasing. Mutable accesses performed through the
264 /// returned slice are not visible to the dirty bitmap tracking functionality of
265 /// the region, and must be manually recorded using the associated bitmap object.
266 unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
267 None
268 }
269
270 /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
271 /// `offset`.
272 #[allow(unused_variables)]
273 fn get_slice(
274 &self,
275 offset: MemoryRegionAddress,
276 count: usize,
277 ) -> Result<VolatileSlice<BS<Self::B>>> {
278 Err(Error::HostAddressNotAvailable)
279 }
280
281 /// Gets a slice of memory for the entire region that supports volatile access.
282 ///
283 /// # Examples (uses the `backend-mmap` feature)
284 ///
285 /// ```
286 /// # #[cfg(feature = "backend-mmap")]
287 /// # {
288 /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion};
289 /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef};
290 /// #
291 /// let region = MmapRegion::<()>::new(0x400).expect("Could not create mmap region");
292 /// let region =
293 /// GuestRegionMmap::new(region, GuestAddress(0x0)).expect("Could not create guest memory");
294 /// let slice = region
295 /// .as_volatile_slice()
296 /// .expect("Could not get volatile slice");
297 ///
298 /// let v = 42u32;
299 /// let r = slice
300 /// .get_ref::<u32>(0x200)
301 /// .expect("Could not get reference");
302 /// r.store(v);
303 /// assert_eq!(r.load(), v);
304 /// # }
305 /// ```
306 fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
307 self.get_slice(MemoryRegionAddress(0), self.len() as usize)
308 }
309
310 /// Show if the region is based on the `HugeTLBFS`.
311 /// Returns Some(true) if the region is backed by hugetlbfs.
312 /// None represents that no information is available.
313 ///
314 /// # Examples (uses the `backend-mmap` feature)
315 ///
316 /// ```
317 /// # #[cfg(feature = "backend-mmap")]
318 /// # {
319 /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap};
320 /// let addr = GuestAddress(0x1000);
321 /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap();
322 /// let r = mem.find_region(addr).unwrap();
323 /// assert_eq!(r.is_hugetlbfs(), None);
324 /// # }
325 /// ```
326 #[cfg(target_os = "linux")]
327 fn is_hugetlbfs(&self) -> Option<bool> {
328 None
329 }
330}
331
332/// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object.
333/// The vm-memory crate already provides trivial implementation for
334/// references to `GuestMemory` or reference-counted `GuestMemory` objects,
335/// but the trait can also be implemented by any other struct in order
336/// to provide temporary access to a snapshot of the memory map.
337///
338/// In order to support generic mutable memory maps, devices (or other things
339/// that access memory) should store the memory as a `GuestAddressSpace<M>`.
340/// This example shows that references can also be used as the `GuestAddressSpace`
341/// implementation, providing a zero-cost abstraction whenever immutable memory
342/// maps are sufficient.
343///
344/// # Examples (uses the `backend-mmap` and `backend-atomic` features)
345///
346/// ```
347/// # #[cfg(feature = "backend-mmap")]
348/// # {
349/// # use std::sync::Arc;
350/// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap};
351/// #
352/// pub struct VirtioDevice<AS: GuestAddressSpace> {
353/// mem: Option<AS>,
354/// }
355///
356/// impl<AS: GuestAddressSpace> VirtioDevice<AS> {
357/// fn new() -> Self {
358/// VirtioDevice { mem: None }
359/// }
360/// fn activate(&mut self, mem: AS) {
361/// self.mem = Some(mem)
362/// }
363/// }
364///
365/// fn get_mmap() -> GuestMemoryMmap<()> {
366/// let start_addr = GuestAddress(0x1000);
367/// GuestMemoryMmap::from_ranges(&vec![(start_addr, 0x400)])
368/// .expect("Could not create guest memory")
369/// }
370///
371/// // Using `VirtioDevice` with an immutable GuestMemoryMmap:
372/// let mut for_immutable_mmap = VirtioDevice::<&GuestMemoryMmap<()>>::new();
373/// let mmap = get_mmap();
374/// for_immutable_mmap.activate(&mmap);
375/// let mut another = VirtioDevice::<&GuestMemoryMmap<()>>::new();
376/// another.activate(&mmap);
377///
378/// # #[cfg(feature = "backend-atomic")]
379/// # {
380/// # use vm_memory::GuestMemoryAtomic;
381/// // Using `VirtioDevice` with a mutable GuestMemoryMmap:
382/// let mut for_mutable_mmap = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
383/// let atomic = GuestMemoryAtomic::new(get_mmap());
384/// for_mutable_mmap.activate(atomic.clone());
385/// let mut another = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
386/// another.activate(atomic.clone());
387///
388/// // atomic can be modified here...
389/// # }
390/// # }
391/// ```
392pub trait GuestAddressSpace {
393 /// The type that will be used to access guest memory.
394 type M: GuestMemory;
395
396 /// A type that provides access to the memory.
397 type T: Clone + Deref<Target = Self::M>;
398
399 /// Return an object (e.g. a reference or guard) that can be used
400 /// to access memory through this address space. The object provides
401 /// a consistent snapshot of the memory map.
402 fn memory(&self) -> Self::T;
403}
404
405impl<M: GuestMemory> GuestAddressSpace for &M {
406 type M = M;
407 type T = Self;
408
409 fn memory(&self) -> Self {
410 self
411 }
412}
413
414impl<M: GuestMemory> GuestAddressSpace for Rc<M> {
415 type M = M;
416 type T = Self;
417
418 fn memory(&self) -> Self {
419 self.clone()
420 }
421}
422
423impl<M: GuestMemory> GuestAddressSpace for Arc<M> {
424 type M = M;
425 type T = Self;
426
427 fn memory(&self) -> Self {
428 self.clone()
429 }
430}
431
432/// Lifetime generic associated iterators. The actual iterator type is defined through associated
433/// item `Iter`, for example:
434///
435/// ```
436/// # use std::marker::PhantomData;
437/// # use vm_memory::guest_memory::GuestMemoryIterator;
438/// #
439/// // Declare the relevant Region and Memory types
440/// struct MyGuestRegion {/* fields omitted */}
441/// struct MyGuestMemory {/* fields omitted */}
442///
443/// // Make an Iterator type to iterate over the Regions
444/// # /*
445/// struct MyGuestMemoryIter<'a> {/* fields omitted */}
446/// # */
447/// # struct MyGuestMemoryIter<'a> {
448/// # _marker: PhantomData<&'a MyGuestRegion>,
449/// # }
450/// impl<'a> Iterator for MyGuestMemoryIter<'a> {
451/// type Item = &'a MyGuestRegion;
452/// fn next(&mut self) -> Option<&'a MyGuestRegion> {
453/// // ...
454/// # None
455/// }
456/// }
457///
458/// // Associate the Iter type with the Memory type
459/// impl<'a> GuestMemoryIterator<'a, MyGuestRegion> for MyGuestMemory {
460/// type Iter = MyGuestMemoryIter<'a>;
461/// }
462/// ```
463pub trait GuestMemoryIterator<'a, R: 'a> {
464 /// Type of the `iter` method's return value.
465 type Iter: Iterator<Item = &'a R>;
466}
467
468/// `GuestMemory` represents a container for an *immutable* collection of
469/// `GuestMemoryRegion` objects. `GuestMemory` provides the `Bytes<GuestAddress>`
470/// trait to hide the details of accessing guest memory by physical address.
471/// Interior mutability is not allowed for implementations of `GuestMemory` so
472/// that they always provide a consistent view of the memory map.
473///
474/// The task of the `GuestMemory` trait are:
475/// - map a request address to a `GuestMemoryRegion` object and relay the request to it.
476/// - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
477pub trait GuestMemory {
478 /// Type of objects hosted by the address space.
479 type R: GuestMemoryRegion;
480
481 /// Lifetime generic associated iterators. Usually this is just `Self`.
482 type I: for<'a> GuestMemoryIterator<'a, Self::R>;
483
484 /// Returns the number of regions in the collection.
485 fn num_regions(&self) -> usize;
486
487 /// Returns the region containing the specified address or `None`.
488 fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
489
490 /// Perform the specified action on each region.
491 ///
492 /// It only walks children of current region and does not step into sub regions.
493 #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
494 fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
495 where
496 F: Fn(usize, &Self::R) -> std::result::Result<(), E>,
497 {
498 for (index, region) in self.iter().enumerate() {
499 cb(index, region)?;
500 }
501 Ok(())
502 }
503
504 /// Perform the specified action on each region mutably.
505 ///
506 /// It only walks children of current region and does not step into sub regions.
507 #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
508 fn with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E>
509 where
510 F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,
511 {
512 for (index, region) in self.iter().enumerate() {
513 cb(index, region)?;
514 }
515 Ok(())
516 }
517
518 /// Gets an iterator over the entries in the collection.
519 ///
520 /// # Examples
521 ///
522 /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
523 /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
524 /// `backend-mmap` feature)
525 ///
526 /// ```
527 /// # #[cfg(feature = "backend-mmap")]
528 /// # {
529 /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
530 /// #
531 /// let start_addr1 = GuestAddress(0x0);
532 /// let start_addr2 = GuestAddress(0x400);
533 /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
534 /// .expect("Could not create guest memory");
535 ///
536 /// let total_size = gm
537 /// .iter()
538 /// .map(|region| region.len() / 1024)
539 /// .fold(0, |acc, size| acc + size);
540 /// assert_eq!(3, total_size)
541 /// # }
542 /// ```
543 fn iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter;
544
545 /// Applies two functions, specified as callbacks, on the inner memory regions.
546 ///
547 /// # Arguments
548 /// * `init` - Starting value of the accumulator for the `foldf` function.
549 /// * `mapf` - "Map" function, applied to all the inner memory regions. It returns an array of
550 /// the same size as the memory regions array, containing the function's results
551 /// for each region.
552 /// * `foldf` - "Fold" function, applied to the array returned by `mapf`. It acts as an
553 /// operator, applying itself to the `init` value and to each subsequent elemnent
554 /// in the array returned by `mapf`.
555 ///
556 /// # Examples
557 ///
558 /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
559 /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
560 /// `backend-mmap` feature)
561 ///
562 /// ```
563 /// # #[cfg(feature = "backend-mmap")]
564 /// # {
565 /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
566 /// #
567 /// let start_addr1 = GuestAddress(0x0);
568 /// let start_addr2 = GuestAddress(0x400);
569 /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
570 /// .expect("Could not create guest memory");
571 ///
572 /// let total_size = gm.map_and_fold(0, |(_, region)| region.len() / 1024, |acc, size| acc + size);
573 /// assert_eq!(3, total_size)
574 /// # }
575 /// ```
576 #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
577 fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
578 where
579 F: Fn((usize, &Self::R)) -> T,
580 G: Fn(T, T) -> T,
581 {
582 self.iter().enumerate().map(mapf).fold(init, foldf)
583 }
584
585 /// Returns the maximum (inclusive) address managed by the
586 /// [`GuestMemory`](trait.GuestMemory.html).
587 ///
588 /// # Examples (uses the `backend-mmap` feature)
589 ///
590 /// ```
591 /// # #[cfg(feature = "backend-mmap")]
592 /// # {
593 /// # use vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap};
594 /// #
595 /// let start_addr = GuestAddress(0x1000);
596 /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
597 /// .expect("Could not create guest memory");
598 ///
599 /// assert_eq!(start_addr.checked_add(0x3ff), Some(gm.last_addr()));
600 /// # }
601 /// ```
602 fn last_addr(&self) -> GuestAddress {
603 self.iter()
604 .map(GuestMemoryRegion::last_addr)
605 .fold(GuestAddress(0), std::cmp::max)
606 }
607
608 /// Tries to convert an absolute address to a relative address within the corresponding region.
609 ///
610 /// Returns `None` if `addr` isn't present within the memory of the guest.
611 fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
612 self.find_region(addr)
613 .map(|r| (r, r.to_region_addr(addr).unwrap()))
614 }
615
616 /// Returns `true` if the given address is present within the memory of the guest.
617 fn address_in_range(&self, addr: GuestAddress) -> bool {
618 self.find_region(addr).is_some()
619 }
620
621 /// Returns the given address if it is present within the memory of the guest.
622 fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
623 self.find_region(addr).map(|_| addr)
624 }
625
626 /// Check whether the range [base, base + len) is valid.
627 fn check_range(&self, base: GuestAddress, len: usize) -> bool {
628 match self.try_access(len, base, |_, count, _, _| -> Result<usize> { Ok(count) }) {
629 Ok(count) => count == len,
630 _ => false,
631 }
632 }
633
634 /// Returns the address plus the offset if it is present within the memory of the guest.
635 fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
636 base.checked_add(offset as u64)
637 .and_then(|addr| self.check_address(addr))
638 }
639
640 /// Invokes callback `f` to handle data in the address range `[addr, addr + count)`.
641 ///
642 /// The address range `[addr, addr + count)` may span more than one
643 /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object, or even have holes in it.
644 /// So [`try_access()`](trait.GuestMemory.html#method.try_access) invokes the callback 'f'
645 /// for each [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object involved and returns:
646 /// - the error code returned by the callback 'f'
647 /// - the size of the already handled data when encountering the first hole
648 /// - the size of the already handled data when the whole range has been handled
649 fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
650 where
651 F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
652 {
653 let mut cur = addr;
654 let mut total = 0;
655 while let Some(region) = self.find_region(cur) {
656 let start = region.to_region_addr(cur).unwrap();
657 let cap = region.len() - start.raw_value();
658 let len = std::cmp::min(cap, (count - total) as GuestUsize);
659 match f(total, len as usize, start, region) {
660 // no more data
661 Ok(0) => return Ok(total),
662 // made some progress
663 Ok(len) => {
664 total += len;
665 if total == count {
666 break;
667 }
668 cur = match cur.overflowing_add(len as GuestUsize) {
669 (GuestAddress(0), _) => GuestAddress(0),
670 (result, false) => result,
671 (_, true) => panic!("guest address overflow"),
672 }
673 }
674 // error happened
675 e => return e,
676 }
677 }
678 if total == 0 {
679 Err(Error::InvalidGuestAddress(addr))
680 } else {
681 Ok(total)
682 }
683 }
684
685 /// Get the host virtual address corresponding to the guest address.
686 ///
687 /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
688 /// have the capability to mmap the guest address range into virtual address space of the host
689 /// for direct access, so the corresponding host virtual address may be passed to other
690 /// subsystems.
691 ///
692 /// # Note
693 /// The underlying guest memory is not protected from memory aliasing, which breaks the
694 /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
695 /// concurrent accesses to the underlying guest memory.
696 ///
697 /// # Arguments
698 /// * `addr` - Guest address to convert.
699 ///
700 /// # Examples (uses the `backend-mmap` feature)
701 ///
702 /// ```
703 /// # #[cfg(feature = "backend-mmap")]
704 /// # {
705 /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
706 /// #
707 /// # let start_addr = GuestAddress(0x1000);
708 /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x500)])
709 /// # .expect("Could not create guest memory");
710 /// #
711 /// let addr = gm
712 /// .get_host_address(GuestAddress(0x1200))
713 /// .expect("Could not get host address");
714 /// println!("Host address is {:p}", addr);
715 /// # }
716 /// ```
717 fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> {
718 self.to_region_addr(addr)
719 .ok_or(Error::InvalidGuestAddress(addr))
720 .and_then(|(r, addr)| r.get_host_address(addr))
721 }
722
723 /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
724 /// `addr`.
725 fn get_slice(&self, addr: GuestAddress, count: usize) -> Result<VolatileSlice<MS<Self>>> {
726 self.to_region_addr(addr)
727 .ok_or(Error::InvalidGuestAddress(addr))
728 .and_then(|(r, addr)| r.get_slice(addr, count))
729 }
730}
731
732impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
733 type E = Error;
734
735 fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
736 self.try_access(
737 buf.len(),
738 addr,
739 |offset, _count, caddr, region| -> Result<usize> {
740 region.write(&buf[offset as usize..], caddr)
741 },
742 )
743 }
744
745 fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
746 self.try_access(
747 buf.len(),
748 addr,
749 |offset, _count, caddr, region| -> Result<usize> {
750 region.read(&mut buf[offset as usize..], caddr)
751 },
752 )
753 }
754
755 /// # Examples
756 ///
757 /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature)
758 ///
759 /// ```
760 /// # #[cfg(feature = "backend-mmap")]
761 /// # {
762 /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
763 /// #
764 /// # let start_addr = GuestAddress(0x1000);
765 /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
766 /// # .expect("Could not create guest memory");
767 /// #
768 /// gm.write_slice(&[1, 2, 3, 4, 5], start_addr)
769 /// .expect("Could not write slice to guest memory");
770 /// # }
771 /// ```
772 fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
773 let res = self.write(buf, addr)?;
774 if res != buf.len() {
775 return Err(Error::PartialBuffer {
776 expected: buf.len(),
777 completed: res,
778 });
779 }
780 Ok(())
781 }
782
783 /// # Examples
784 ///
785 /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature)
786 ///
787 /// ```
788 /// # #[cfg(feature = "backend-mmap")]
789 /// # {
790 /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
791 /// #
792 /// let start_addr = GuestAddress(0x1000);
793 /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
794 /// .expect("Could not create guest memory");
795 /// let buf = &mut [0u8; 16];
796 ///
797 /// gm.read_slice(buf, start_addr)
798 /// .expect("Could not read slice from guest memory");
799 /// # }
800 /// ```
801 fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
802 let res = self.read(buf, addr)?;
803 if res != buf.len() {
804 return Err(Error::PartialBuffer {
805 expected: buf.len(),
806 completed: res,
807 });
808 }
809 Ok(())
810 }
811
812 /// # Examples
813 ///
814 /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
815 ///
816 /// ```
817 /// # #[cfg(feature = "backend-mmap")]
818 /// # {
819 /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
820 /// # use std::fs::File;
821 /// # use std::path::Path;
822 /// #
823 /// # let start_addr = GuestAddress(0x1000);
824 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
825 /// # .expect("Could not create guest memory");
826 /// # let addr = GuestAddress(0x1010);
827 /// # let mut file = if cfg!(unix) {
828 /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
829 /// # file
830 /// # } else {
831 /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
832 /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
833 /// # };
834 ///
835 /// gm.read_from(addr, &mut file, 128)
836 /// .expect("Could not read from /dev/urandom into guest memory");
837 ///
838 /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
839 /// let rand_val: u32 = gm
840 /// .read_obj(read_addr)
841 /// .expect("Could not read u32 val from /dev/urandom");
842 /// # }
843 /// ```
844 fn read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
845 where
846 F: Read,
847 {
848 self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
849 // Check if something bad happened before doing unsafe things.
850 assert!(offset <= count);
851 // SAFETY: Safe because we are checking the offset.
852 if let Some(dst) = unsafe { region.as_mut_slice() } {
853 // This is safe cause `start` and `len` are within the `region`, and we manually
854 // record the dirty status of the written range below.
855 let start = caddr.raw_value() as usize;
856 let end = start + len;
857 let bytes_read = loop {
858 match src.read(&mut dst[start..end]) {
859 Ok(n) => break n,
860 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
861 Err(e) => return Err(Error::IOError(e)),
862 }
863 };
864
865 region.bitmap().mark_dirty(start, bytes_read);
866 Ok(bytes_read)
867 } else {
868 let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
869 let mut buf = vec![0u8; len].into_boxed_slice();
870 loop {
871 match src.read(&mut buf[..]) {
872 Ok(bytes_read) => {
873 // We don't need to update the dirty bitmap manually here because it's
874 // expected to be handled by the logic within the `Bytes`
875 // implementation for the region object.
876 let bytes_written = region.write(&buf[0..bytes_read], caddr)?;
877 assert_eq!(bytes_written, bytes_read);
878 break Ok(bytes_read);
879 }
880 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
881 Err(e) => break Err(Error::IOError(e)),
882 }
883 }
884 }
885 })
886 }
887
888 fn read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()>
889 where
890 F: Read,
891 {
892 let res = self.read_from(addr, src, count)?;
893 if res != count {
894 return Err(Error::PartialBuffer {
895 expected: count,
896 completed: res,
897 });
898 }
899 Ok(())
900 }
901
902 /// # Examples
903 ///
904 /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
905 ///
906 /// ```
907 /// # #[cfg(not(unix))]
908 /// # extern crate vmm_sys_util;
909 /// # #[cfg(feature = "backend-mmap")]
910 /// # {
911 /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
912 /// #
913 /// # let start_addr = GuestAddress(0x1000);
914 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
915 /// # .expect("Could not create guest memory");
916 /// # let mut file = if cfg!(unix) {
917 /// # use std::fs::OpenOptions;
918 /// let mut file = OpenOptions::new()
919 /// .write(true)
920 /// .open("/dev/null")
921 /// .expect("Could not open /dev/null");
922 /// # file
923 /// # } else {
924 /// # use vmm_sys_util::tempfile::TempFile;
925 /// # TempFile::new().unwrap().into_file()
926 /// # };
927 ///
928 /// gm.write_to(start_addr, &mut file, 128)
929 /// .expect("Could not write 128 bytes to the provided address");
930 /// # }
931 /// ```
932 fn write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
933 where
934 F: Write,
935 {
936 self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
937 // Check if something bad happened before doing unsafe things.
938 assert!(offset <= count);
939 // SAFETY: Safe because we are checking the offset is valid.
940 if let Some(src) = unsafe { region.as_slice() } {
941 // This is safe cause `start` and `len` are within the `region`.
942 let start = caddr.raw_value() as usize;
943 let end = start + len;
944 loop {
945 // It is safe to read from volatile memory. Accessing the guest
946 // memory as a slice should be OK as long as nothing assumes another
947 // thread won't change what is loaded; however, we may want to introduce
948 // VolatileRead and VolatileWrite traits in the future.
949 match dst.write(&src[start..end]) {
950 Ok(n) => break Ok(n),
951 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
952 Err(e) => break Err(Error::IOError(e)),
953 }
954 }
955 } else {
956 let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
957 let mut buf = vec![0u8; len].into_boxed_slice();
958 let bytes_read = region.read(&mut buf, caddr)?;
959 assert_eq!(bytes_read, len);
960 // For a non-RAM region, reading could have side effects, so we
961 // must use write_all().
962 dst.write_all(&buf).map_err(Error::IOError)?;
963 Ok(len)
964 }
965 })
966 }
967
968 /// # Examples
969 ///
970 /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
971 ///
972 /// ```
973 /// # #[cfg(not(unix))]
974 /// # extern crate vmm_sys_util;
975 /// # #[cfg(feature = "backend-mmap")]
976 /// # {
977 /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
978 /// #
979 /// # let start_addr = GuestAddress(0x1000);
980 /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
981 /// # .expect("Could not create guest memory");
982 /// # let mut file = if cfg!(unix) {
983 /// # use std::fs::OpenOptions;
984 /// let mut file = OpenOptions::new()
985 /// .write(true)
986 /// .open("/dev/null")
987 /// .expect("Could not open /dev/null");
988 /// # file
989 /// # } else {
990 /// # use vmm_sys_util::tempfile::TempFile;
991 /// # TempFile::new().unwrap().into_file()
992 /// # };
993 ///
994 /// gm.write_all_to(start_addr, &mut file, 128)
995 /// .expect("Could not write 128 bytes to the provided address");
996 /// # }
997 /// ```
998 fn write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
999 where
1000 F: Write,
1001 {
1002 let res = self.write_to(addr, dst, count)?;
1003 if res != count {
1004 return Err(Error::PartialBuffer {
1005 expected: count,
1006 completed: res,
1007 });
1008 }
1009 Ok(())
1010 }
1011
1012 fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
1013 // `find_region` should really do what `to_region_addr` is doing right now, except
1014 // it should keep returning a `Result`.
1015 self.to_region_addr(addr)
1016 .ok_or(Error::InvalidGuestAddress(addr))
1017 .and_then(|(region, region_addr)| region.store(val, region_addr, order))
1018 }
1019
1020 fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
1021 self.to_region_addr(addr)
1022 .ok_or(Error::InvalidGuestAddress(addr))
1023 .and_then(|(region, region_addr)| region.load(region_addr, order))
1024 }
1025}
1026
1027#[cfg(test)]
1028mod tests {
1029 #![allow(clippy::undocumented_unsafe_blocks)]
1030 use super::*;
1031 #[cfg(feature = "backend-mmap")]
1032 use crate::bytes::ByteValued;
1033 #[cfg(feature = "backend-mmap")]
1034 use crate::GuestAddress;
1035 #[cfg(feature = "backend-mmap")]
1036 use std::io::Cursor;
1037 #[cfg(feature = "backend-mmap")]
1038 use std::time::{Duration, Instant};
1039
1040 use vmm_sys_util::tempfile::TempFile;
1041
1042 #[cfg(feature = "backend-mmap")]
1043 type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
1044
1045 #[cfg(feature = "backend-mmap")]
1046 fn make_image(size: u8) -> Vec<u8> {
1047 let mut image: Vec<u8> = Vec::with_capacity(size as usize);
1048 for i in 0..size {
1049 image.push(i);
1050 }
1051 image
1052 }
1053
1054 #[test]
1055 fn test_file_offset() {
1056 let file = TempFile::new().unwrap().into_file();
1057 let start = 1234;
1058 let file_offset = FileOffset::new(file, start);
1059 assert_eq!(file_offset.start(), start);
1060 assert_eq!(
1061 file_offset.file() as *const File,
1062 file_offset.arc().as_ref() as *const File
1063 );
1064 }
1065
1066 #[cfg(feature = "backend-mmap")]
1067 #[test]
1068 fn checked_read_from() {
1069 let start_addr1 = GuestAddress(0x0);
1070 let start_addr2 = GuestAddress(0x40);
1071 let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap();
1072 let image = make_image(0x80);
1073 let offset = GuestAddress(0x30);
1074 let count: usize = 0x20;
1075 assert_eq!(
1076 0x20_usize,
1077 mem.read_from(offset, &mut Cursor::new(&image), count)
1078 .unwrap()
1079 );
1080 }
1081
1082 // Runs the provided closure in a loop, until at least `duration` time units have elapsed.
1083 #[cfg(feature = "backend-mmap")]
1084 fn loop_timed<F>(duration: Duration, mut f: F)
1085 where
1086 F: FnMut(),
1087 {
1088 // We check the time every `CHECK_PERIOD` iterations.
1089 const CHECK_PERIOD: u64 = 1_000_000;
1090 let start_time = Instant::now();
1091
1092 loop {
1093 for _ in 0..CHECK_PERIOD {
1094 f();
1095 }
1096 if start_time.elapsed() >= duration {
1097 break;
1098 }
1099 }
1100 }
1101
1102 // Helper method for the following test. It spawns a writer and a reader thread, which
1103 // simultaneously try to access an object that is placed at the junction of two memory regions.
1104 // The part of the object that's continuously accessed is a member of type T. The writer
1105 // flips all the bits of the member with every write, while the reader checks that every byte
1106 // has the same value (and thus it did not do a non-atomic access). The test succeeds if
1107 // no mismatch is detected after performing accesses for a pre-determined amount of time.
1108 #[cfg(feature = "backend-mmap")]
1109 fn non_atomic_access_helper<T>()
1110 where
1111 T: ByteValued
1112 + std::fmt::Debug
1113 + From<u8>
1114 + Into<u128>
1115 + std::ops::Not<Output = T>
1116 + PartialEq,
1117 {
1118 use std::mem;
1119 use std::thread;
1120
1121 // A dummy type that's always going to have the same alignment as the first member,
1122 // and then adds some bytes at the end.
1123 #[derive(Clone, Copy, Debug, Default, PartialEq)]
1124 struct Data<T> {
1125 val: T,
1126 some_bytes: [u8; 7],
1127 }
1128
1129 // Some sanity checks.
1130 assert_eq!(mem::align_of::<T>(), mem::align_of::<Data<T>>());
1131 assert_eq!(mem::size_of::<T>(), mem::align_of::<T>());
1132
1133 unsafe impl<T: ByteValued> ByteValued for Data<T> {}
1134
1135 // Start of first guest memory region.
1136 let start = GuestAddress(0);
1137 let region_len = 1 << 12;
1138
1139 // The address where we start writing/reading a Data<T> value.
1140 let data_start = GuestAddress((region_len - mem::size_of::<T>()) as u64);
1141
1142 let mem = GuestMemoryMmap::from_ranges(&[
1143 (start, region_len),
1144 (start.unchecked_add(region_len as u64), region_len),
1145 ])
1146 .unwrap();
1147
1148 // Need to clone this and move it into the new thread we create.
1149 let mem2 = mem.clone();
1150 // Just some bytes.
1151 let some_bytes = [1u8, 2, 4, 16, 32, 64, 128];
1152
1153 let mut data = Data {
1154 val: T::from(0u8),
1155 some_bytes,
1156 };
1157
1158 // Simple check that cross-region write/read is ok.
1159 mem.write_obj(data, data_start).unwrap();
1160 let read_data = mem.read_obj::<Data<T>>(data_start).unwrap();
1161 assert_eq!(read_data, data);
1162
1163 let t = thread::spawn(move || {
1164 let mut count: u64 = 0;
1165
1166 loop_timed(Duration::from_secs(3), || {
1167 let data = mem2.read_obj::<Data<T>>(data_start).unwrap();
1168
1169 // Every time data is written to memory by the other thread, the value of
1170 // data.val alternates between 0 and T::MAX, so the inner bytes should always
1171 // have the same value. If they don't match, it means we read a partial value,
1172 // so the access was not atomic.
1173 let bytes = data.val.into().to_le_bytes();
1174 for i in 1..mem::size_of::<T>() {
1175 if bytes[0] != bytes[i] {
1176 panic!(
1177 "val bytes don't match {:?} after {} iterations",
1178 &bytes[..mem::size_of::<T>()],
1179 count
1180 );
1181 }
1182 }
1183 count += 1;
1184 });
1185 });
1186
1187 // Write the object while flipping the bits of data.val over and over again.
1188 loop_timed(Duration::from_secs(3), || {
1189 mem.write_obj(data, data_start).unwrap();
1190 data.val = !data.val;
1191 });
1192
1193 t.join().unwrap()
1194 }
1195
1196 #[cfg(feature = "backend-mmap")]
1197 #[test]
1198 fn test_non_atomic_access() {
1199 non_atomic_access_helper::<u16>()
1200 }
1201
1202 #[cfg(feature = "backend-mmap")]
1203 #[test]
1204 fn test_zero_length_accesses() {
1205 #[derive(Default, Clone, Copy)]
1206 #[repr(C)]
1207 struct ZeroSizedStruct {
1208 dummy: [u32; 0],
1209 }
1210
1211 unsafe impl ByteValued for ZeroSizedStruct {}
1212
1213 let addr = GuestAddress(0x1000);
1214 let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1215 let obj = ZeroSizedStruct::default();
1216 let mut image = make_image(0x80);
1217
1218 assert_eq!(mem.write(&[], addr).unwrap(), 0);
1219 assert_eq!(mem.read(&mut [], addr).unwrap(), 0);
1220
1221 assert!(mem.write_slice(&[], addr).is_ok());
1222 assert!(mem.read_slice(&mut [], addr).is_ok());
1223
1224 assert!(mem.write_obj(obj, addr).is_ok());
1225 assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
1226
1227 assert_eq!(mem.read_from(addr, &mut Cursor::new(&image), 0).unwrap(), 0);
1228
1229 assert!(mem
1230 .read_exact_from(addr, &mut Cursor::new(&image), 0)
1231 .is_ok());
1232
1233 assert_eq!(
1234 mem.write_to(addr, &mut Cursor::new(&mut image), 0).unwrap(),
1235 0
1236 );
1237
1238 assert!(mem
1239 .write_all_to(addr, &mut Cursor::new(&mut image), 0)
1240 .is_ok());
1241 }
1242
1243 #[cfg(feature = "backend-mmap")]
1244 #[test]
1245 fn test_atomic_accesses() {
1246 let addr = GuestAddress(0x1000);
1247 let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1248 let bad_addr = addr.unchecked_add(0x1000);
1249
1250 crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
1251 }
1252
1253 #[cfg(feature = "backend-mmap")]
1254 #[cfg(target_os = "linux")]
1255 #[test]
1256 fn test_guest_memory_mmap_is_hugetlbfs() {
1257 let addr = GuestAddress(0x1000);
1258 let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1259 let r = mem.find_region(addr).unwrap();
1260 assert_eq!(r.is_hugetlbfs(), None);
1261 }
1262}