vm_memory/
volatile_memory.rs

1// Portions Copyright 2019 Red Hat, Inc.
2//
3// Copyright 2017 The Chromium OS Authors. All rights reserved.
4// Use of this source code is governed by a BSD-style license that can be
5// found in the THIRT-PARTY file.
6//
7// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
8
9//! Types for volatile access to memory.
10//!
11//! Two of the core rules for safe rust is no data races and no aliased mutable references.
12//! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
13//! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
14//! accessed volatile. Some systems really do need to operate on shared memory and can't have the
15//! compiler reordering or eliding access because it has no visibility into what other systems are
16//! doing with that hunk of memory.
17//!
18//! For the purposes of maintaining safety, volatile memory has some rules of its own:
19//! 1. No references or slices to volatile memory (`&` or `&mut`).
20//! 2. Access should always been done with a volatile read or write.
21//! The First rule is because having references of any kind to memory considered volatile would
22//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
23//! done concurrently without synchronization. With volatile access we know that the compiler has
24//! not reordered or elided the access.
25
26use std::cmp::min;
27use std::error;
28use std::fmt;
29use std::io::{self, Read, Write};
30use std::marker::PhantomData;
31use std::mem::{align_of, size_of};
32use std::ptr::copy;
33use std::ptr::{read_volatile, write_volatile};
34use std::result;
35use std::slice::{from_raw_parts, from_raw_parts_mut};
36use std::sync::atomic::Ordering;
37use std::usize;
38
39use crate::atomic_integer::AtomicInteger;
40use crate::bitmap::{Bitmap, BitmapSlice, BS};
41use crate::{AtomicAccess, ByteValued, Bytes};
42
43use copy_slice_impl::copy_slice;
44
45/// `VolatileMemory` related errors.
46#[allow(missing_docs)]
47#[derive(Debug)]
48pub enum Error {
49    /// `addr` is out of bounds of the volatile memory slice.
50    OutOfBounds { addr: usize },
51    /// Taking a slice at `base` with `offset` would overflow `usize`.
52    Overflow { base: usize, offset: usize },
53    /// Taking a slice whose size overflows `usize`.
54    TooBig { nelements: usize, size: usize },
55    /// Trying to obtain a misaligned reference.
56    Misaligned { addr: usize, alignment: usize },
57    /// Writing to memory failed
58    IOError(io::Error),
59    /// Incomplete read or write
60    PartialBuffer { expected: usize, completed: usize },
61}
62
63impl fmt::Display for Error {
64    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
65        match self {
66            Error::OutOfBounds { addr } => write!(f, "address 0x{:x} is out of bounds", addr),
67            Error::Overflow { base, offset } => write!(
68                f,
69                "address 0x{:x} offset by 0x{:x} would overflow",
70                base, offset
71            ),
72            Error::TooBig { nelements, size } => write!(
73                f,
74                "{:?} elements of size {:?} would overflow a usize",
75                nelements, size
76            ),
77            Error::Misaligned { addr, alignment } => {
78                write!(f, "address 0x{:x} is not aligned to {:?}", addr, alignment)
79            }
80            Error::IOError(error) => write!(f, "{}", error),
81            Error::PartialBuffer {
82                expected,
83                completed,
84            } => write!(
85                f,
86                "only used {} bytes in {} long buffer",
87                completed, expected
88            ),
89        }
90    }
91}
92
93impl error::Error for Error {}
94
95/// Result of volatile memory operations.
96pub type Result<T> = result::Result<T, Error>;
97
98/// Convenience function for computing `base + offset`.
99///
100/// # Errors
101///
102/// Returns [`Err(Error::Overflow)`](enum.Error.html#variant.Overflow) in case `base + offset`
103/// exceeds `usize::MAX`.
104///
105/// # Examples
106///
107/// ```
108/// # use vm_memory::volatile_memory::compute_offset;
109/// #
110/// assert_eq!(108, compute_offset(100, 8).unwrap());
111/// assert!(compute_offset(std::usize::MAX, 6).is_err());
112/// ```
113pub fn compute_offset(base: usize, offset: usize) -> Result<usize> {
114    match base.checked_add(offset) {
115        None => Err(Error::Overflow { base, offset }),
116        Some(m) => Ok(m),
117    }
118}
119
120/// Types that support raw volatile access to their data.
121pub trait VolatileMemory {
122    /// Type used for dirty memory tracking.
123    type B: Bitmap;
124
125    /// Gets the size of this slice.
126    fn len(&self) -> usize;
127
128    /// Check whether the region is empty.
129    fn is_empty(&self) -> bool {
130        self.len() == 0
131    }
132
133    /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
134    /// `offset`.
135    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<BS<Self::B>>>;
136
137    /// Gets a slice of memory for the entire region that supports volatile access.
138    fn as_volatile_slice(&self) -> VolatileSlice<BS<Self::B>> {
139        self.get_slice(0, self.len()).unwrap()
140    }
141
142    /// Gets a `VolatileRef` at `offset`.
143    fn get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<T, BS<Self::B>>> {
144        let slice = self.get_slice(offset, size_of::<T>())?;
145        // SAFETY: This is safe because the pointer is range-checked by get_slice, and
146        // the lifetime is the same as self.
147        unsafe { Ok(VolatileRef::with_bitmap(slice.addr, slice.bitmap)) }
148    }
149
150    /// Returns a [`VolatileArrayRef`](struct.VolatileArrayRef.html) of `n` elements starting at
151    /// `offset`.
152    fn get_array_ref<T: ByteValued>(
153        &self,
154        offset: usize,
155        n: usize,
156    ) -> Result<VolatileArrayRef<T, BS<Self::B>>> {
157        // Use isize to avoid problems with ptr::offset and ptr::add down the line.
158        let nbytes = isize::try_from(n)
159            .ok()
160            .and_then(|n| n.checked_mul(size_of::<T>() as isize))
161            .ok_or(Error::TooBig {
162                nelements: n,
163                size: size_of::<T>(),
164            })?;
165        let slice = self.get_slice(offset, nbytes as usize)?;
166        // SAFETY: This is safe because the pointer is range-checked by get_slice, and
167        // the lifetime is the same as self.
168        unsafe { Ok(VolatileArrayRef::with_bitmap(slice.addr, n, slice.bitmap)) }
169    }
170
171    /// Returns a reference to an instance of `T` at `offset`.
172    ///
173    /// # Safety
174    /// To use this safely, the caller must guarantee that there are no other
175    /// users of the given chunk of memory for the lifetime of the result.
176    ///
177    /// # Errors
178    ///
179    /// If the resulting pointer is not aligned, this method will return an
180    /// [`Error`](enum.Error.html).
181    unsafe fn aligned_as_ref<T: ByteValued>(&self, offset: usize) -> Result<&T> {
182        let slice = self.get_slice(offset, size_of::<T>())?;
183        slice.check_alignment(align_of::<T>())?;
184        Ok(&*(slice.addr as *const T))
185    }
186
187    /// Returns a mutable reference to an instance of `T` at `offset`. Mutable accesses performed
188    /// using the resulting reference are not automatically accounted for by the dirty bitmap
189    /// tracking functionality.
190    ///
191    /// # Safety
192    ///
193    /// To use this safely, the caller must guarantee that there are no other
194    /// users of the given chunk of memory for the lifetime of the result.
195    ///
196    /// # Errors
197    ///
198    /// If the resulting pointer is not aligned, this method will return an
199    /// [`Error`](enum.Error.html).
200    unsafe fn aligned_as_mut<T: ByteValued>(&self, offset: usize) -> Result<&mut T> {
201        let slice = self.get_slice(offset, size_of::<T>())?;
202        slice.check_alignment(align_of::<T>())?;
203
204        Ok(&mut *(slice.addr as *mut T))
205    }
206
207    /// Returns a reference to an instance of `T` at `offset`. Mutable accesses performed
208    /// using the resulting reference are not automatically accounted for by the dirty bitmap
209    /// tracking functionality.
210    ///
211    /// # Errors
212    ///
213    /// If the resulting pointer is not aligned, this method will return an
214    /// [`Error`](enum.Error.html).
215    fn get_atomic_ref<T: AtomicInteger>(&self, offset: usize) -> Result<&T> {
216        let slice = self.get_slice(offset, size_of::<T>())?;
217        slice.check_alignment(align_of::<T>())?;
218
219        // SAFETY: This is safe because the pointer is range-checked by get_slice, and
220        // the lifetime is the same as self.
221        unsafe { Ok(&*(slice.addr as *const T)) }
222    }
223
224    /// Returns the sum of `base` and `offset` if the resulting address is valid.
225    fn compute_end_offset(&self, base: usize, offset: usize) -> Result<usize> {
226        let mem_end = compute_offset(base, offset)?;
227        if mem_end > self.len() {
228            return Err(Error::OutOfBounds { addr: mem_end });
229        }
230        Ok(mem_end)
231    }
232}
233
234impl<'a> VolatileMemory for &'a mut [u8] {
235    type B = ();
236
237    fn len(&self) -> usize {
238        <[u8]>::len(self)
239    }
240
241    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<()>> {
242        let _ = self.compute_end_offset(offset, count)?;
243        // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
244        // the lifetime is the same as the original slice.
245        unsafe {
246            Ok(VolatileSlice::new(
247                (self.as_ptr() as usize + offset) as *mut _,
248                count,
249            ))
250        }
251    }
252}
253
254#[repr(C, packed)]
255struct Packed<T>(T);
256
257/// A slice of raw memory that supports volatile access.
258#[derive(Clone, Copy, Debug)]
259pub struct VolatileSlice<'a, B = ()> {
260    addr: *mut u8,
261    size: usize,
262    bitmap: B,
263    phantom: PhantomData<&'a u8>,
264}
265
266impl<'a> VolatileSlice<'a, ()> {
267    /// Creates a slice of raw memory that must support volatile access.
268    ///
269    /// # Safety
270    ///
271    /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
272    /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
273    /// must also guarantee that all other users of the given chunk of memory are using volatile
274    /// accesses.
275    pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
276        Self::with_bitmap(addr, size, ())
277    }
278}
279
280impl<'a, B: BitmapSlice> VolatileSlice<'a, B> {
281    /// Creates a slice of raw memory that must support volatile access, and uses the provided
282    /// `bitmap` object for dirty page tracking.
283    ///
284    /// # Safety
285    ///
286    /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
287    /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
288    /// must also guarantee that all other users of the given chunk of memory are using volatile
289    /// accesses.
290    pub unsafe fn with_bitmap(addr: *mut u8, size: usize, bitmap: B) -> VolatileSlice<'a, B> {
291        VolatileSlice {
292            addr,
293            size,
294            bitmap,
295            phantom: PhantomData,
296        }
297    }
298
299    /// Returns a pointer to the beginning of the slice. Mutable accesses performed
300    /// using the resulting pointer are not automatically accounted for by the dirty bitmap
301    /// tracking functionality.
302    pub fn as_ptr(&self) -> *mut u8 {
303        self.addr
304    }
305
306    /// Gets the size of this slice.
307    pub fn len(&self) -> usize {
308        self.size
309    }
310
311    /// Checks if the slice is empty.
312    pub fn is_empty(&self) -> bool {
313        self.size == 0
314    }
315
316    /// Borrows the inner `BitmapSlice`.
317    pub fn bitmap(&self) -> &B {
318        &self.bitmap
319    }
320
321    /// Divides one slice into two at an index.
322    ///
323    /// # Example
324    ///
325    /// ```
326    /// # use vm_memory::VolatileMemory;
327    /// #
328    /// # // Create a buffer
329    /// # let mut mem = [0u8; 32];
330    /// # let mem_ref = &mut mem[..];
331    /// #
332    /// # // Get a `VolatileSlice` from the buffer
333    /// let vslice = mem_ref
334    ///     .get_slice(0, 32)
335    ///     .expect("Could not get VolatileSlice");
336    ///
337    /// let (start, end) = vslice.split_at(8).expect("Could not split VolatileSlice");
338    /// assert_eq!(8, start.len());
339    /// assert_eq!(24, end.len());
340    /// ```
341    pub fn split_at(&self, mid: usize) -> Result<(Self, Self)> {
342        let end = self.offset(mid)?;
343        // SAFETY: safe because self.offset() already checked the bounds
344        let start = unsafe { VolatileSlice::with_bitmap(self.addr, mid, self.bitmap.clone()) };
345
346        Ok((start, end))
347    }
348
349    /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
350    /// `offset` with `count` length.
351    ///
352    /// The returned subslice is a copy of this slice with the address increased by `offset` bytes
353    /// and the size set to `count` bytes.
354    pub fn subslice(&self, offset: usize, count: usize) -> Result<Self> {
355        let mem_end = compute_offset(offset, count)?;
356        if mem_end > self.len() {
357            return Err(Error::OutOfBounds { addr: mem_end });
358        }
359        // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
360        // the lifetime is the same as the original slice.
361        unsafe {
362            Ok(VolatileSlice::with_bitmap(
363                (self.as_ptr() as usize + offset) as *mut u8,
364                count,
365                self.bitmap.slice_at(offset),
366            ))
367        }
368    }
369
370    /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at
371    /// `offset`.
372    ///
373    /// The returned subslice is a copy of this slice with the address increased by `count` bytes
374    /// and the size reduced by `count` bytes.
375    pub fn offset(&self, count: usize) -> Result<VolatileSlice<'a, B>> {
376        let new_addr = (self.addr as usize)
377            .checked_add(count)
378            .ok_or(Error::Overflow {
379                base: self.addr as usize,
380                offset: count,
381            })?;
382        let new_size = self
383            .size
384            .checked_sub(count)
385            .ok_or(Error::OutOfBounds { addr: new_addr })?;
386        // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
387        // memory of the original slice.
388        unsafe {
389            Ok(VolatileSlice::with_bitmap(
390                new_addr as *mut u8,
391                new_size,
392                self.bitmap.slice_at(count),
393            ))
394        }
395    }
396
397    /// Copies as many elements of type `T` as possible from this slice to `buf`.
398    ///
399    /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
400    /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
401    /// using volatile reads.
402    ///
403    /// # Examples
404    ///
405    /// ```
406    /// # use vm_memory::VolatileMemory;
407    /// #
408    /// let mut mem = [0u8; 32];
409    /// let mem_ref = &mut mem[..];
410    /// let vslice = mem_ref
411    ///     .get_slice(0, 32)
412    ///     .expect("Could not get VolatileSlice");
413    /// let mut buf = [5u8; 16];
414    /// let res = vslice.copy_to(&mut buf[..]);
415    ///
416    /// assert_eq!(16, res);
417    /// for &v in &buf[..] {
418    ///     assert_eq!(v, 0);
419    /// }
420    /// ```
421    pub fn copy_to<T>(&self, buf: &mut [T]) -> usize
422    where
423        T: ByteValued,
424    {
425        // A fast path for u8/i8
426        if size_of::<T>() == 1 {
427            // SAFETY: It is safe because the pointers are range-checked when the slices are
428            // created, and they never escape the VolatileSlices.
429            let source = unsafe { self.as_slice() };
430            // SAFETY: Safe because `T` is a one-byte data structure.
431            let dst = unsafe { from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, buf.len()) };
432            copy_slice(dst, source)
433        } else {
434            let count = self.size / size_of::<T>();
435            let source = self.get_array_ref::<T>(0, count).unwrap();
436            source.copy_to(buf)
437        }
438    }
439
440    /// Copies as many bytes as possible from this slice to the provided `slice`.
441    ///
442    /// The copies happen in an undefined order.
443    ///
444    /// # Examples
445    ///
446    /// ```
447    /// # use vm_memory::VolatileMemory;
448    /// #
449    /// # // Create a buffer
450    /// # let mut mem = [0u8; 32];
451    /// # let mem_ref = &mut mem[..];
452    /// #
453    /// # // Get a `VolatileSlice` from the buffer
454    /// # let vslice = mem_ref.get_slice(0, 32)
455    /// #    .expect("Could not get VolatileSlice");
456    /// #
457    /// vslice.copy_to_volatile_slice(
458    ///     vslice
459    ///         .get_slice(16, 16)
460    ///         .expect("Could not get VolatileSlice"),
461    /// );
462    /// ```
463    pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
464        // SAFETY: Safe because the pointers are range-checked when the slices
465        // are created, and they never escape the VolatileSlices.
466        // FIXME: ... however, is it really okay to mix non-volatile
467        // operations such as copy with read_volatile and write_volatile?
468        unsafe {
469            let count = min(self.size, slice.size);
470            copy(self.addr, slice.addr, count);
471            slice.bitmap.mark_dirty(0, count);
472        }
473    }
474
475    /// Copies as many elements of type `T` as possible from `buf` to this slice.
476    ///
477    /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
478    ///
479    /// # Examples
480    ///
481    /// ```
482    /// # use vm_memory::VolatileMemory;
483    /// #
484    /// let mut mem = [0u8; 32];
485    /// let mem_ref = &mut mem[..];
486    /// let vslice = mem_ref
487    ///     .get_slice(0, 32)
488    ///     .expect("Could not get VolatileSlice");
489    ///
490    /// let buf = [5u8; 64];
491    /// vslice.copy_from(&buf[..]);
492    ///
493    /// for i in 0..4 {
494    ///     let val = vslice
495    ///         .get_ref::<u32>(i * 4)
496    ///         .expect("Could not get value")
497    ///         .load();
498    ///     assert_eq!(val, 0x05050505);
499    /// }
500    /// ```
501    pub fn copy_from<T>(&self, buf: &[T])
502    where
503        T: ByteValued,
504    {
505        // A fast path for u8/i8
506        if size_of::<T>() == 1 {
507            // SAFETY: It is safe because the pointers are range-checked when the slices are created,
508            // and they never escape the VolatileSlices.
509            let dst = unsafe { self.as_mut_slice() };
510            // SAFETY: Safe because `T` is a one-byte data structure.
511            let src = unsafe { from_raw_parts(buf.as_ptr() as *const u8, buf.len()) };
512            let count = copy_slice(dst, src);
513            self.bitmap.mark_dirty(0, count * size_of::<T>());
514        } else {
515            let count = self.size / size_of::<T>();
516            // It's ok to use unwrap here because `count` was computed based on the current
517            // length of `self`.
518            let dest = self.get_array_ref::<T>(0, count).unwrap();
519
520            // No need to explicitly call `mark_dirty` after this call because
521            // `VolatileArrayRef::copy_from` already takes care of that.
522            dest.copy_from(buf);
523        };
524    }
525
526    /// Returns a slice corresponding to the data in the underlying memory.
527    ///
528    /// # Safety
529    ///
530    /// This function is private and only used for the read/write functions. It is not valid in
531    /// general to take slices of volatile memory.
532    unsafe fn as_slice(&self) -> &[u8] {
533        from_raw_parts(self.addr, self.size)
534    }
535
536    /// Returns a mutable slice corresponding to the data in the underlying memory. Writes to the
537    /// slice have to be tracked manually using the handle returned by `VolatileSlice::bitmap`.
538    ///
539    /// # Safety
540    ///
541    /// This function is private and only used for the read/write functions. It is not valid in
542    /// general to take slices of volatile memory. Mutable accesses performed through the returned
543    /// slice are not visible to the dirty bitmap tracking functionality, and must be manually
544    /// recorded using the associated bitmap object.
545    #[allow(clippy::mut_from_ref)]
546    unsafe fn as_mut_slice(&self) -> &mut [u8] {
547        from_raw_parts_mut(self.addr, self.size)
548    }
549
550    /// Checks if the current slice is aligned at `alignment` bytes.
551    fn check_alignment(&self, alignment: usize) -> Result<()> {
552        // Check that the desired alignment is a power of two.
553        debug_assert!((alignment & (alignment - 1)) == 0);
554        if ((self.addr as usize) & (alignment - 1)) != 0 {
555            return Err(Error::Misaligned {
556                addr: self.addr as usize,
557                alignment,
558            });
559        }
560        Ok(())
561    }
562}
563
564impl<B: BitmapSlice> Bytes<usize> for VolatileSlice<'_, B> {
565    type E = Error;
566
567    /// # Examples
568    /// * Write a slice of size 5 at offset 1020 of a 1024-byte `VolatileSlice`.
569    ///
570    /// ```
571    /// # use vm_memory::{Bytes, VolatileMemory};
572    /// #
573    /// let mut mem = [0u8; 1024];
574    /// let mut mem_ref = &mut mem[..];
575    /// let vslice = mem_ref.as_volatile_slice();
576    /// let res = vslice.write(&[1, 2, 3, 4, 5], 1020);
577    ///
578    /// assert!(res.is_ok());
579    /// assert_eq!(res.unwrap(), 4);
580    /// ```
581    fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
582        if buf.is_empty() {
583            return Ok(0);
584        }
585
586        if addr >= self.size {
587            return Err(Error::OutOfBounds { addr });
588        }
589
590        // SAFETY: Guest memory can't strictly be modeled as a slice because it is
591        // volatile.  Writing to it with what is essentially a fancy memcpy
592        // won't hurt anything as long as we get the bounds checks right.
593        let slice = unsafe { self.as_mut_slice() }.split_at_mut(addr).1;
594
595        let count = copy_slice(slice, buf);
596        self.bitmap.mark_dirty(addr, count);
597        Ok(count)
598    }
599
600    /// # Examples
601    /// * Read a slice of size 16 at offset 1010 of a 1024-byte `VolatileSlice`.
602    ///
603    /// ```
604    /// # use vm_memory::{Bytes, VolatileMemory};
605    /// #
606    /// let mut mem = [0u8; 1024];
607    /// let mut mem_ref = &mut mem[..];
608    /// let vslice = mem_ref.as_volatile_slice();
609    /// let buf = &mut [0u8; 16];
610    /// let res = vslice.read(buf, 1010);
611    ///
612    /// assert!(res.is_ok());
613    /// assert_eq!(res.unwrap(), 14);
614    /// ```
615    fn read(&self, buf: &mut [u8], addr: usize) -> Result<usize> {
616        if buf.is_empty() {
617            return Ok(0);
618        }
619
620        if addr >= self.size {
621            return Err(Error::OutOfBounds { addr });
622        }
623
624        // SAFETY: Guest memory can't strictly be modeled as a slice because it is
625        // volatile.  Writing to it with what is essentially a fancy memcpy
626        // won't hurt anything as long as we get the bounds checks right.
627        let slice = unsafe { self.as_slice() }.split_at(addr).1;
628        Ok(copy_slice(buf, slice))
629    }
630
631    /// # Examples
632    /// * Write a slice at offset 256.
633    ///
634    /// ```
635    /// # use vm_memory::{Bytes, VolatileMemory};
636    /// #
637    /// # // Create a buffer
638    /// # let mut mem = [0u8; 1024];
639    /// # let mut mem_ref = &mut mem[..];
640    /// #
641    /// # // Get a `VolatileSlice` from the buffer
642    /// # let vslice = mem_ref.as_volatile_slice();
643    /// #
644    /// let res = vslice.write_slice(&[1, 2, 3, 4, 5], 256);
645    ///
646    /// assert!(res.is_ok());
647    /// assert_eq!(res.unwrap(), ());
648    /// ```
649    fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> {
650        // `mark_dirty` called within `self.write`.
651        let len = self.write(buf, addr)?;
652        if len != buf.len() {
653            return Err(Error::PartialBuffer {
654                expected: buf.len(),
655                completed: len,
656            });
657        }
658        Ok(())
659    }
660
661    /// # Examples
662    /// * Read a slice of size 16 at offset 256.
663    ///
664    /// ```
665    /// # use vm_memory::{Bytes, VolatileMemory};
666    /// #
667    /// # // Create a buffer
668    /// # let mut mem = [0u8; 1024];
669    /// # let mut mem_ref = &mut mem[..];
670    /// #
671    /// # // Get a `VolatileSlice` from the buffer
672    /// # let vslice = mem_ref.as_volatile_slice();
673    /// #
674    /// let buf = &mut [0u8; 16];
675    /// let res = vslice.read_slice(buf, 256);
676    ///
677    /// assert!(res.is_ok());
678    /// ```
679    fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> {
680        let len = self.read(buf, addr)?;
681        if len != buf.len() {
682            return Err(Error::PartialBuffer {
683                expected: buf.len(),
684                completed: len,
685            });
686        }
687        Ok(())
688    }
689
690    /// # Examples
691    ///
692    /// * Read bytes from /dev/urandom
693    ///
694    /// ```
695    /// # use vm_memory::{Bytes, VolatileMemory};
696    /// # use std::fs::File;
697    /// # use std::path::Path;
698    /// #
699    /// # if cfg!(unix) {
700    /// # let mut mem = [0u8; 1024];
701    /// # let mut mem_ref = &mut mem[..];
702    /// # let vslice = mem_ref.as_volatile_slice();
703    /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
704    ///
705    /// vslice
706    ///     .read_from(32, &mut file, 128)
707    ///     .expect("Could not read bytes from file into VolatileSlice");
708    ///
709    /// let rand_val: u32 = vslice
710    ///     .read_obj(40)
711    ///     .expect("Could not read value from VolatileSlice");
712    /// # }
713    /// ```
714    fn read_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize>
715    where
716        F: Read,
717    {
718        let end = self.compute_end_offset(addr, count)?;
719        // SAFETY: It is safe to overwrite the volatile memory. Accessing the guest
720        // memory as a mutable slice is OK because nothing assumes another
721        // thread won't change what is loaded.
722        let bytes_read = unsafe {
723            let dst = &mut self.as_mut_slice()[addr..end];
724            loop {
725                match src.read(dst) {
726                    Ok(n) => break n,
727                    Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
728                    Err(e) => return Err(Error::IOError(e)),
729                }
730            }
731        };
732
733        self.bitmap.mark_dirty(addr, bytes_read);
734        Ok(bytes_read)
735    }
736
737    /// # Examples
738    ///
739    /// * Read bytes from /dev/urandom
740    ///
741    /// ```
742    /// # use vm_memory::{Bytes, VolatileMemory};
743    /// # use std::fs::File;
744    /// # use std::path::Path;
745    /// #
746    /// # if cfg!(unix) {
747    /// # let mut mem = [0u8; 1024];
748    /// # let mut mem_ref = &mut mem[..];
749    /// # let vslice = mem_ref.as_volatile_slice();
750    /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
751    ///
752    /// vslice
753    ///     .read_exact_from(32, &mut file, 128)
754    ///     .expect("Could not read bytes from file into VolatileSlice");
755    ///
756    /// let rand_val: u32 = vslice
757    ///     .read_obj(40)
758    ///     .expect("Could not read value from VolatileSlice");
759    /// # }
760    /// ```
761    fn read_exact_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()>
762    where
763        F: Read,
764    {
765        let end = self.compute_end_offset(addr, count)?;
766
767        // SAFETY: It is safe to overwrite the volatile memory. Accessing the guest memory as a
768        // mutable slice is OK because nothing assumes another thread won't change what is loaded.
769        // We also manually update the dirty bitmap below.
770        let dst = unsafe { &mut self.as_mut_slice()[addr..end] };
771
772        let result = src.read_exact(dst).map_err(Error::IOError);
773        self.bitmap.mark_dirty(addr, count);
774        result
775    }
776
777    /// # Examples
778    ///
779    /// * Write 128 bytes to /dev/null
780    ///
781    /// ```
782    /// # use vm_memory::{Bytes, VolatileMemory};
783    /// # use std::fs::OpenOptions;
784    /// # use std::path::Path;
785    /// #
786    /// # if cfg!(unix) {
787    /// # let mut mem = [0u8; 1024];
788    /// # let mut mem_ref = &mut mem[..];
789    /// # let vslice = mem_ref.as_volatile_slice();
790    /// let mut file = OpenOptions::new()
791    ///     .write(true)
792    ///     .open("/dev/null")
793    ///     .expect("Could not open /dev/null");
794    ///
795    /// vslice
796    ///     .write_to(32, &mut file, 128)
797    ///     .expect("Could not write value from VolatileSlice to /dev/null");
798    /// # }
799    /// ```
800    fn write_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize>
801    where
802        F: Write,
803    {
804        let end = self.compute_end_offset(addr, count)?;
805        // SAFETY: It is safe to read from volatile memory. Accessing the guest
806        // memory as a slice is OK because nothing assumes another thread
807        // won't change what is loaded.
808        unsafe {
809            let src = &self.as_slice()[addr..end];
810            loop {
811                match dst.write(src) {
812                    Ok(n) => break Ok(n),
813                    Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
814                    Err(e) => break Err(Error::IOError(e)),
815                }
816            }
817        }
818    }
819
820    /// # Examples
821    ///
822    /// * Write 128 bytes to /dev/null
823    ///
824    /// ```
825    /// # use vm_memory::{Bytes, VolatileMemory};
826    /// # use std::fs::OpenOptions;
827    /// # use std::path::Path;
828    /// #
829    /// # if cfg!(unix) {
830    /// # let mut mem = [0u8; 1024];
831    /// # let mut mem_ref = &mut mem[..];
832    /// # let vslice = mem_ref.as_volatile_slice();
833    /// let mut file = OpenOptions::new()
834    ///     .write(true)
835    ///     .open("/dev/null")
836    ///     .expect("Could not open /dev/null");
837    ///
838    /// vslice
839    ///     .write_all_to(32, &mut file, 128)
840    ///     .expect("Could not write value from VolatileSlice to /dev/null");
841    /// # }
842    /// ```
843    fn write_all_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()>
844    where
845        F: Write,
846    {
847        let end = self.compute_end_offset(addr, count)?;
848        // SAFETY: It is safe to read from volatile memory. Accessing the guest
849        // memory as a slice is OK because nothing assumes another thread
850        // won't change what is loaded.
851        unsafe {
852            let src = &self.as_slice()[addr..end];
853            dst.write_all(src).map_err(Error::IOError)?;
854        }
855        Ok(())
856    }
857
858    fn store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()> {
859        self.get_atomic_ref::<T::A>(addr).map(|r| {
860            r.store(val.into(), order);
861            self.bitmap.mark_dirty(addr, size_of::<T>())
862        })
863    }
864
865    fn load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T> {
866        self.get_atomic_ref::<T::A>(addr)
867            .map(|r| r.load(order).into())
868    }
869}
870
871impl<B: BitmapSlice> VolatileMemory for VolatileSlice<'_, B> {
872    type B = B;
873
874    fn len(&self) -> usize {
875        self.size
876    }
877
878    fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<B>> {
879        let _ = self.compute_end_offset(offset, count)?;
880        Ok(
881            // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and
882            // the lifetime is the same as self.
883            unsafe {
884                VolatileSlice::with_bitmap(
885                    (self.addr as usize + offset) as *mut u8,
886                    count,
887                    self.bitmap.slice_at(offset),
888                )
889            },
890        )
891    }
892}
893
894/// A memory location that supports volatile access to an instance of `T`.
895///
896/// # Examples
897///
898/// ```
899/// # use vm_memory::VolatileRef;
900/// #
901/// let mut v = 5u32;
902/// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32 as *mut u8) };
903///
904/// assert_eq!(v, 5);
905/// assert_eq!(v_ref.load(), 5);
906/// v_ref.store(500);
907/// assert_eq!(v, 500);
908/// ```
909#[derive(Clone, Copy, Debug)]
910pub struct VolatileRef<'a, T, B = ()> {
911    addr: *mut Packed<T>,
912    bitmap: B,
913    phantom: PhantomData<&'a T>,
914}
915
916impl<'a, T> VolatileRef<'a, T, ()>
917where
918    T: ByteValued,
919{
920    /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`.
921    ///
922    /// # Safety
923    ///
924    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
925    /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
926    /// must also guarantee that all other users of the given chunk of memory are using volatile
927    /// accesses.
928    pub unsafe fn new(addr: *mut u8) -> Self {
929        Self::with_bitmap(addr, ())
930    }
931}
932
933#[allow(clippy::len_without_is_empty)]
934impl<'a, T, B> VolatileRef<'a, T, B>
935where
936    T: ByteValued,
937    B: BitmapSlice,
938{
939    /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`, using the
940    /// provided `bitmap` object for dirty page tracking.
941    ///
942    /// # Safety
943    ///
944    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
945    /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
946    /// must also guarantee that all other users of the given chunk of memory are using volatile
947    /// accesses.
948    pub unsafe fn with_bitmap(addr: *mut u8, bitmap: B) -> Self {
949        VolatileRef {
950            addr: addr as *mut Packed<T>,
951            bitmap,
952            phantom: PhantomData,
953        }
954    }
955
956    /// Returns a pointer to the underlying memory. Mutable accesses performed
957    /// using the resulting pointer are not automatically accounted for by the dirty bitmap
958    /// tracking functionality.
959    pub fn as_ptr(&self) -> *mut u8 {
960        self.addr as *mut u8
961    }
962
963    /// Gets the size of the referenced type `T`.
964    ///
965    /// # Examples
966    ///
967    /// ```
968    /// # use std::mem::size_of;
969    /// # use vm_memory::VolatileRef;
970    /// #
971    /// let v_ref = unsafe { VolatileRef::<u32>::new(0 as *mut _) };
972    /// assert_eq!(v_ref.len(), size_of::<u32>() as usize);
973    /// ```
974    pub fn len(&self) -> usize {
975        size_of::<T>()
976    }
977
978    /// Borrows the inner `BitmapSlice`.
979    pub fn bitmap(&self) -> &B {
980        &self.bitmap
981    }
982
983    /// Does a volatile write of the value `v` to the address of this ref.
984    #[inline(always)]
985    pub fn store(&self, v: T) {
986        // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
987        unsafe { write_volatile(self.addr, Packed::<T>(v)) };
988        self.bitmap.mark_dirty(0, size_of::<T>())
989    }
990
991    /// Does a volatile read of the value at the address of this ref.
992    #[inline(always)]
993    pub fn load(&self) -> T {
994        // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
995        // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
996        // in this function with the commented code below and running `cargo test --release`.
997        // unsafe { *(self.addr as *const T) }
998        unsafe { read_volatile(self.addr).0 }
999    }
1000
1001    /// Converts this to a [`VolatileSlice`](struct.VolatileSlice.html) with the same size and
1002    /// address.
1003    pub fn to_slice(&self) -> VolatileSlice<'a, B> {
1004        // SAFETY: Safe because we checked the address and size when creating this VolatileRef.
1005        unsafe {
1006            VolatileSlice::with_bitmap(self.addr as *mut u8, size_of::<T>(), self.bitmap.clone())
1007        }
1008    }
1009}
1010
1011/// A memory location that supports volatile access to an array of elements of type `T`.
1012///
1013/// # Examples
1014///
1015/// ```
1016/// # use vm_memory::VolatileArrayRef;
1017/// #
1018/// let mut v = [5u32; 1];
1019/// let v_ref = unsafe { VolatileArrayRef::new(&mut v[0] as *mut u32 as *mut u8, v.len()) };
1020///
1021/// assert_eq!(v[0], 5);
1022/// assert_eq!(v_ref.load(0), 5);
1023/// v_ref.store(0, 500);
1024/// assert_eq!(v[0], 500);
1025/// ```
1026#[derive(Clone, Copy, Debug)]
1027pub struct VolatileArrayRef<'a, T, B = ()> {
1028    addr: *mut u8,
1029    nelem: usize,
1030    bitmap: B,
1031    phantom: PhantomData<&'a T>,
1032}
1033
1034impl<'a, T> VolatileArrayRef<'a, T>
1035where
1036    T: ByteValued,
1037{
1038    /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
1039    /// type `T`.
1040    ///
1041    /// # Safety
1042    ///
1043    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
1044    /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
1045    /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
1046    /// memory are using volatile accesses.
1047    pub unsafe fn new(addr: *mut u8, nelem: usize) -> Self {
1048        Self::with_bitmap(addr, nelem, ())
1049    }
1050}
1051
1052impl<'a, T, B> VolatileArrayRef<'a, T, B>
1053where
1054    T: ByteValued,
1055    B: BitmapSlice,
1056{
1057    /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of
1058    /// type `T`, using the provided `bitmap` object for dirty page tracking.
1059    ///
1060    /// # Safety
1061    ///
1062    /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for
1063    /// `nelem` values of type `T` and is available for the duration of the lifetime of the new
1064    /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of
1065    /// memory are using volatile accesses.
1066    pub unsafe fn with_bitmap(addr: *mut u8, nelem: usize, bitmap: B) -> Self {
1067        VolatileArrayRef {
1068            addr,
1069            nelem,
1070            bitmap,
1071            phantom: PhantomData,
1072        }
1073    }
1074
1075    /// Returns `true` if this array is empty.
1076    ///
1077    /// # Examples
1078    ///
1079    /// ```
1080    /// # use vm_memory::VolatileArrayRef;
1081    /// #
1082    /// let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
1083    /// assert!(v_array.is_empty());
1084    /// ```
1085    pub fn is_empty(&self) -> bool {
1086        self.nelem == 0
1087    }
1088
1089    /// Returns the number of elements in the array.
1090    ///
1091    /// # Examples
1092    ///
1093    /// ```
1094    /// # use vm_memory::VolatileArrayRef;
1095    /// #
1096    /// # let v_array = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 1) };
1097    /// assert_eq!(v_array.len(), 1);
1098    /// ```
1099    pub fn len(&self) -> usize {
1100        self.nelem
1101    }
1102
1103    /// Returns the size of `T`.
1104    ///
1105    /// # Examples
1106    ///
1107    /// ```
1108    /// # use std::mem::size_of;
1109    /// # use vm_memory::VolatileArrayRef;
1110    /// #
1111    /// let v_ref = unsafe { VolatileArrayRef::<u32>::new(0 as *mut _, 0) };
1112    /// assert_eq!(v_ref.element_size(), size_of::<u32>() as usize);
1113    /// ```
1114    pub fn element_size(&self) -> usize {
1115        size_of::<T>()
1116    }
1117
1118    /// Returns a pointer to the underlying memory. Mutable accesses performed
1119    /// using the resulting pointer are not automatically accounted for by the dirty bitmap
1120    /// tracking functionality.
1121    pub fn as_ptr(&self) -> *mut u8 {
1122        self.addr
1123    }
1124
1125    /// Borrows the inner `BitmapSlice`.
1126    pub fn bitmap(&self) -> &B {
1127        &self.bitmap
1128    }
1129
1130    /// Converts this to a `VolatileSlice` with the same size and address.
1131    pub fn to_slice(&self) -> VolatileSlice<'a, B> {
1132        // SAFETY: Safe as long as the caller validated addr when creating this object.
1133        unsafe {
1134            VolatileSlice::with_bitmap(
1135                self.addr,
1136                self.nelem * self.element_size(),
1137                self.bitmap.clone(),
1138            )
1139        }
1140    }
1141
1142    /// Does a volatile read of the element at `index`.
1143    ///
1144    /// # Panics
1145    ///
1146    /// Panics if `index` is less than the number of elements of the array to which `&self` points.
1147    pub fn ref_at(&self, index: usize) -> VolatileRef<'a, T, B> {
1148        assert!(index < self.nelem);
1149        // SAFETY: Safe because the memory has the same lifetime and points to a subset of the
1150        // memory of the VolatileArrayRef.
1151        unsafe {
1152            // byteofs must fit in an isize as it was checked in get_array_ref.
1153            let byteofs = (self.element_size() * index) as isize;
1154            let ptr = self.as_ptr().offset(byteofs);
1155            VolatileRef::with_bitmap(ptr, self.bitmap.slice_at(byteofs as usize))
1156        }
1157    }
1158
1159    /// Does a volatile read of the element at `index`.
1160    pub fn load(&self, index: usize) -> T {
1161        self.ref_at(index).load()
1162    }
1163
1164    /// Does a volatile write of the element at `index`.
1165    pub fn store(&self, index: usize, value: T) {
1166        // The `VolatileRef::store` call below implements the required dirty bitmap tracking logic,
1167        // so no need to do that in this method as well.
1168        self.ref_at(index).store(value)
1169    }
1170
1171    /// Copies as many elements of type `T` as possible from this array to `buf`.
1172    ///
1173    /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
1174    /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks
1175    /// using volatile reads.
1176    ///
1177    /// # Examples
1178    ///
1179    /// ```
1180    /// # use vm_memory::VolatileArrayRef;
1181    /// #
1182    /// let mut v = [0u8; 32];
1183    /// let v_ref = unsafe { VolatileArrayRef::new(&mut v[0] as *mut u8, v.len()) };
1184    ///
1185    /// let mut buf = [5u8; 16];
1186    /// v_ref.copy_to(&mut buf[..]);
1187    /// for &v in &buf[..] {
1188    ///     assert_eq!(v, 0);
1189    /// }
1190    /// ```
1191    pub fn copy_to(&self, buf: &mut [T]) -> usize {
1192        // A fast path for u8/i8
1193        if size_of::<T>() == 1 {
1194            let source = self.to_slice();
1195            // SAFETY: It is safe because the pointers are range-checked when the slices are
1196            // created, and they never escape the VolatileSlices.
1197            let src = unsafe { source.as_slice() };
1198            // SAFETY: Safe because `T` is a one-byte data structure.
1199            let dst = unsafe { from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, buf.len()) };
1200            return copy_slice(dst, src);
1201        }
1202
1203        let mut addr = self.addr;
1204        let mut i = 0;
1205        for v in buf.iter_mut().take(self.len()) {
1206            // SAFETY: read_volatile is safe because the pointers are range-checked when
1207            // the slices are created, and they never escape the VolatileSlices.
1208            // ptr::add is safe because get_array_ref() validated that
1209            // size_of::<T>() * self.len() fits in an isize.
1210            unsafe {
1211                *v = read_volatile(addr as *const Packed<T>).0;
1212                addr = addr.add(self.element_size());
1213            };
1214            i += 1;
1215        }
1216        i
1217    }
1218
1219    /// Copies as many bytes as possible from this slice to the provided `slice`.
1220    ///
1221    /// The copies happen in an undefined order.
1222    ///
1223    /// # Examples
1224    ///
1225    /// ```
1226    /// # use vm_memory::VolatileArrayRef;
1227    /// #
1228    /// let mut v = [0u8; 32];
1229    /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(&mut v[0] as *mut u8, v.len()) };
1230    /// let mut buf = [5u8; 16];
1231    /// let v_ref2 = unsafe { VolatileArrayRef::<u8>::new(&mut buf[0] as *mut u8, buf.len()) };
1232    ///
1233    /// v_ref.copy_to_volatile_slice(v_ref2.to_slice());
1234    /// for &v in &buf[..] {
1235    ///     assert_eq!(v, 0);
1236    /// }
1237    /// ```
1238    pub fn copy_to_volatile_slice<S: BitmapSlice>(&self, slice: VolatileSlice<S>) {
1239        // SAFETY: Safe because the pointers are range-checked when the slices
1240        // are created, and they never escape the VolatileSlices.
1241        // FIXME: ... however, is it really okay to mix non-volatile
1242        // operations such as copy with read_volatile and write_volatile?
1243        unsafe {
1244            let count = min(self.len() * self.element_size(), slice.size);
1245            copy(self.addr, slice.addr, count);
1246            slice.bitmap.mark_dirty(0, count);
1247        }
1248    }
1249
1250    /// Copies as many elements of type `T` as possible from `buf` to this slice.
1251    ///
1252    /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller,
1253    /// to this slice's memory. The copy happens from smallest to largest address in
1254    /// `T` sized chunks using volatile writes.
1255    ///
1256    /// # Examples
1257    ///
1258    /// ```
1259    /// # use vm_memory::VolatileArrayRef;
1260    /// #
1261    /// let mut v = [0u8; 32];
1262    /// let v_ref = unsafe { VolatileArrayRef::<u8>::new(&mut v[0] as *mut u8, v.len()) };
1263    ///
1264    /// let buf = [5u8; 64];
1265    /// v_ref.copy_from(&buf[..]);
1266    /// for &val in &v[..] {
1267    ///     assert_eq!(5u8, val);
1268    /// }
1269    /// ```
1270    pub fn copy_from(&self, buf: &[T]) {
1271        // A fast path for u8/i8
1272        if size_of::<T>() == 1 {
1273            let destination = self.to_slice();
1274            // SAFETY: It is safe because the pointers are range-checked when the slices are
1275            // created, and they never escape the VolatileSlices.
1276            let dst = unsafe { destination.as_mut_slice() };
1277            // SAFETY: Safe because `T` is a one-byte data structure.
1278            let src = unsafe { from_raw_parts(buf.as_ptr() as *const u8, buf.len()) };
1279            let count = copy_slice(dst, src);
1280            self.bitmap.mark_dirty(0, count);
1281        } else {
1282            let mut addr = self.addr;
1283            for &v in buf.iter().take(self.len()) {
1284                // SAFETY: write_volatile is safe because the pointers are range-checked when
1285                // the slices are created, and they never escape the VolatileSlices.
1286                // ptr::add is safe because get_array_ref() validated that
1287                // size_of::<T>() * self.len() fits in an isize.
1288                unsafe {
1289                    write_volatile(addr as *mut Packed<T>, Packed::<T>(v));
1290                    addr = addr.add(self.element_size());
1291                }
1292            }
1293
1294            self.bitmap
1295                .mark_dirty(0, addr as usize - self.addr as usize)
1296        }
1297    }
1298}
1299
1300impl<'a, B: BitmapSlice> From<VolatileSlice<'a, B>> for VolatileArrayRef<'a, u8, B> {
1301    fn from(slice: VolatileSlice<'a, B>) -> Self {
1302        // SAFETY: Safe because the result has the same lifetime and points to the same
1303        // memory as the incoming VolatileSlice.
1304        unsafe { VolatileArrayRef::with_bitmap(slice.as_ptr(), slice.len(), slice.bitmap) }
1305    }
1306}
1307
1308// Return the largest value that `addr` is aligned to. Forcing this function to return 1 will
1309// cause test_non_atomic_access to fail.
1310fn alignment(addr: usize) -> usize {
1311    // Rust is silly and does not let me write addr & -addr.
1312    addr & (!addr + 1)
1313}
1314
1315mod copy_slice_impl {
1316    use super::*;
1317
1318    // SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
1319    // - `src_addr` and `dst_addr` must be valid for reads/writes.
1320    // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`.
1321    // - `src_addr` must point to a properly initialized value, which is true here because
1322    //   we're only using integer primitives.
1323    unsafe fn copy_single(align: usize, src_addr: usize, dst_addr: usize) {
1324        match align {
1325            8 => write_volatile(dst_addr as *mut u64, read_volatile(src_addr as *const u64)),
1326            4 => write_volatile(dst_addr as *mut u32, read_volatile(src_addr as *const u32)),
1327            2 => write_volatile(dst_addr as *mut u16, read_volatile(src_addr as *const u16)),
1328            1 => write_volatile(dst_addr as *mut u8, read_volatile(src_addr as *const u8)),
1329            _ => unreachable!(),
1330        }
1331    }
1332
1333    fn copy_slice_volatile(dst: &mut [u8], src: &[u8]) -> usize {
1334        let total = min(src.len(), dst.len());
1335        let mut left = total;
1336
1337        let mut src_addr = src.as_ptr() as usize;
1338        let mut dst_addr = dst.as_ptr() as usize;
1339        let align = min(alignment(src_addr), alignment(dst_addr));
1340
1341        let mut copy_aligned_slice = |min_align| {
1342            while align >= min_align && left >= min_align {
1343                // SAFETY: Safe because we check alignment beforehand, the memory areas are valid
1344                // for reads/writes, and the source always contains a valid value.
1345                unsafe { copy_single(min_align, src_addr, dst_addr) };
1346                src_addr += min_align;
1347                dst_addr += min_align;
1348                left -= min_align;
1349            }
1350        };
1351
1352        if size_of::<usize>() > 4 {
1353            copy_aligned_slice(8);
1354        }
1355        copy_aligned_slice(4);
1356        copy_aligned_slice(2);
1357        copy_aligned_slice(1);
1358
1359        total
1360    }
1361
1362    pub(super) fn copy_slice(dst: &mut [u8], src: &[u8]) -> usize {
1363        let total = min(src.len(), dst.len());
1364        if total <= size_of::<usize>() {
1365            copy_slice_volatile(dst, src);
1366        } else {
1367            dst[..total].copy_from_slice(&src[..total]);
1368        }
1369
1370        total
1371    }
1372}
1373
1374#[cfg(test)]
1375mod tests {
1376    #![allow(clippy::undocumented_unsafe_blocks)]
1377    use super::*;
1378
1379    use std::fs::File;
1380    use std::mem::size_of_val;
1381    use std::path::Path;
1382    use std::sync::atomic::{AtomicUsize, Ordering};
1383    use std::sync::{Arc, Barrier};
1384    use std::thread::spawn;
1385
1386    use matches::assert_matches;
1387    use vmm_sys_util::tempfile::TempFile;
1388
1389    use crate::bitmap::tests::{
1390        check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory,
1391    };
1392    use crate::bitmap::{AtomicBitmap, RefSlice};
1393
1394    #[derive(Clone)]
1395    struct VecMem {
1396        mem: Arc<[u8]>,
1397    }
1398
1399    impl VecMem {
1400        fn new(size: usize) -> VecMem {
1401            VecMem {
1402                mem: vec![0; size].into(),
1403            }
1404        }
1405    }
1406
1407    impl VolatileMemory for VecMem {
1408        type B = ();
1409
1410        fn len(&self) -> usize {
1411            self.mem.len()
1412        }
1413
1414        fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice<()>> {
1415            let _ = self.compute_end_offset(offset, count)?;
1416            Ok(unsafe {
1417                VolatileSlice::new((self.mem.as_ptr() as usize + offset) as *mut _, count)
1418            })
1419        }
1420    }
1421
1422    #[test]
1423    fn test_display_error() {
1424        assert_eq!(
1425            format!("{}", Error::OutOfBounds { addr: 0x10 }),
1426            "address 0x10 is out of bounds"
1427        );
1428
1429        assert_eq!(
1430            format!(
1431                "{}",
1432                Error::Overflow {
1433                    base: 0x0,
1434                    offset: 0x10
1435                }
1436            ),
1437            "address 0x0 offset by 0x10 would overflow"
1438        );
1439
1440        assert_eq!(
1441            format!(
1442                "{}",
1443                Error::TooBig {
1444                    nelements: 100_000,
1445                    size: 1_000_000_000
1446                }
1447            ),
1448            "100000 elements of size 1000000000 would overflow a usize"
1449        );
1450
1451        assert_eq!(
1452            format!(
1453                "{}",
1454                Error::Misaligned {
1455                    addr: 0x4,
1456                    alignment: 8
1457                }
1458            ),
1459            "address 0x4 is not aligned to 8"
1460        );
1461
1462        assert_eq!(
1463            format!(
1464                "{}",
1465                Error::PartialBuffer {
1466                    expected: 100,
1467                    completed: 90
1468                }
1469            ),
1470            "only used 90 bytes in 100 long buffer"
1471        );
1472    }
1473
1474    #[test]
1475    fn misaligned_ref() {
1476        let mut a = [0u8; 3];
1477        let a_ref = &mut a[..];
1478        unsafe {
1479            assert!(
1480                a_ref.aligned_as_ref::<u16>(0).is_err() ^ a_ref.aligned_as_ref::<u16>(1).is_err()
1481            );
1482            assert!(
1483                a_ref.aligned_as_mut::<u16>(0).is_err() ^ a_ref.aligned_as_mut::<u16>(1).is_err()
1484            );
1485        }
1486    }
1487
1488    #[test]
1489    fn atomic_store() {
1490        let mut a = [0usize; 1];
1491        {
1492            let a_ref = unsafe {
1493                VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>())
1494            };
1495            let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
1496            atomic.store(2usize, Ordering::Relaxed)
1497        }
1498        assert_eq!(a[0], 2);
1499    }
1500
1501    #[test]
1502    fn atomic_load() {
1503        let mut a = [5usize; 1];
1504        {
1505            let a_ref = unsafe {
1506                VolatileSlice::new(&mut a[0] as *mut usize as *mut u8,
1507                                   size_of::<usize>())
1508            };
1509            let atomic = {
1510                let atomic = a_ref.get_atomic_ref::<AtomicUsize>(0).unwrap();
1511                assert_eq!(atomic.load(Ordering::Relaxed), 5usize);
1512                atomic
1513            };
1514            // To make sure we can take the atomic out of the scope we made it in:
1515            atomic.load(Ordering::Relaxed);
1516            // but not too far:
1517            // atomicu8
1518        } //.load(std::sync::atomic::Ordering::Relaxed)
1519        ;
1520    }
1521
1522    #[test]
1523    fn misaligned_atomic() {
1524        let mut a = [5usize, 5usize];
1525        let a_ref =
1526            unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::<usize>()) };
1527        assert!(a_ref.get_atomic_ref::<AtomicUsize>(0).is_ok());
1528        assert!(a_ref.get_atomic_ref::<AtomicUsize>(1).is_err());
1529    }
1530
1531    #[test]
1532    fn ref_store() {
1533        let mut a = [0u8; 1];
1534        {
1535            let a_ref = &mut a[..];
1536            let v_ref = a_ref.get_ref(0).unwrap();
1537            v_ref.store(2u8);
1538        }
1539        assert_eq!(a[0], 2);
1540    }
1541
1542    #[test]
1543    fn ref_load() {
1544        let mut a = [5u8; 1];
1545        {
1546            let a_ref = &mut a[..];
1547            let c = {
1548                let v_ref = a_ref.get_ref::<u8>(0).unwrap();
1549                assert_eq!(v_ref.load(), 5u8);
1550                v_ref
1551            };
1552            // To make sure we can take a v_ref out of the scope we made it in:
1553            c.load();
1554            // but not too far:
1555            // c
1556        } //.load()
1557        ;
1558    }
1559
1560    #[test]
1561    fn ref_to_slice() {
1562        let mut a = [1u8; 5];
1563        let a_ref = &mut a[..];
1564        let v_ref = a_ref.get_ref(1).unwrap();
1565        v_ref.store(0x1234_5678u32);
1566        let ref_slice = v_ref.to_slice();
1567        assert_eq!(v_ref.as_ptr() as usize, ref_slice.as_ptr() as usize);
1568        assert_eq!(v_ref.len(), ref_slice.len());
1569        assert!(!ref_slice.is_empty());
1570    }
1571
1572    #[test]
1573    fn observe_mutate() {
1574        let a = VecMem::new(1);
1575        let a_clone = a.clone();
1576        let v_ref = a.get_ref::<u8>(0).unwrap();
1577        let barrier = Arc::new(Barrier::new(2));
1578        let barrier1 = barrier.clone();
1579
1580        v_ref.store(99);
1581        spawn(move || {
1582            barrier1.wait();
1583            let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
1584            clone_v_ref.store(0);
1585            barrier1.wait();
1586        });
1587
1588        assert_eq!(v_ref.load(), 99);
1589        barrier.wait();
1590        barrier.wait();
1591        assert_eq!(v_ref.load(), 0);
1592    }
1593
1594    #[test]
1595    fn mem_is_empty() {
1596        let a = VecMem::new(100);
1597        assert!(!a.is_empty());
1598
1599        let a = VecMem::new(0);
1600        assert!(a.is_empty());
1601    }
1602
1603    #[test]
1604    fn slice_len() {
1605        let mem = VecMem::new(100);
1606        let slice = mem.get_slice(0, 27).unwrap();
1607        assert_eq!(slice.len(), 27);
1608        assert!(!slice.is_empty());
1609
1610        let slice = mem.get_slice(34, 27).unwrap();
1611        assert_eq!(slice.len(), 27);
1612        assert!(!slice.is_empty());
1613
1614        let slice = slice.get_slice(20, 5).unwrap();
1615        assert_eq!(slice.len(), 5);
1616        assert!(!slice.is_empty());
1617
1618        let slice = mem.get_slice(34, 0).unwrap();
1619        assert!(slice.is_empty());
1620    }
1621
1622    #[test]
1623    fn slice_subslice() {
1624        let mem = VecMem::new(100);
1625        let slice = mem.get_slice(0, 100).unwrap();
1626        assert!(slice.write(&[1; 80], 10).is_ok());
1627
1628        assert!(slice.subslice(0, 0).is_ok());
1629        assert!(slice.subslice(0, 101).is_err());
1630
1631        assert!(slice.subslice(99, 0).is_ok());
1632        assert!(slice.subslice(99, 1).is_ok());
1633        assert!(slice.subslice(99, 2).is_err());
1634
1635        assert!(slice.subslice(100, 0).is_ok());
1636        assert!(slice.subslice(100, 1).is_err());
1637
1638        assert!(slice.subslice(101, 0).is_err());
1639        assert!(slice.subslice(101, 1).is_err());
1640
1641        assert!(slice.subslice(std::usize::MAX, 2).is_err());
1642        assert!(slice.subslice(2, std::usize::MAX).is_err());
1643
1644        let maybe_offset_slice = slice.subslice(10, 80);
1645        assert!(maybe_offset_slice.is_ok());
1646        let offset_slice = maybe_offset_slice.unwrap();
1647        assert_eq!(offset_slice.len(), 80);
1648
1649        let mut buf = [0; 80];
1650        assert!(offset_slice.read(&mut buf, 0).is_ok());
1651        assert_eq!(&buf[0..80], &[1; 80][0..80]);
1652    }
1653
1654    #[test]
1655    fn slice_offset() {
1656        let mem = VecMem::new(100);
1657        let slice = mem.get_slice(0, 100).unwrap();
1658        assert!(slice.write(&[1; 80], 10).is_ok());
1659
1660        assert!(slice.offset(101).is_err());
1661
1662        let maybe_offset_slice = slice.offset(10);
1663        assert!(maybe_offset_slice.is_ok());
1664        let offset_slice = maybe_offset_slice.unwrap();
1665        assert_eq!(offset_slice.len(), 90);
1666        let mut buf = [0; 90];
1667        assert!(offset_slice.read(&mut buf, 0).is_ok());
1668        assert_eq!(&buf[0..80], &[1; 80][0..80]);
1669        assert_eq!(&buf[80..90], &[0; 10][0..10]);
1670    }
1671
1672    #[test]
1673    fn slice_copy_to_u8() {
1674        let mut a = [2u8, 4, 6, 8, 10];
1675        let mut b = [0u8; 4];
1676        let mut c = [0u8; 6];
1677        let a_ref = &mut a[..];
1678        let v_ref = a_ref.get_slice(0, a_ref.len()).unwrap();
1679        v_ref.copy_to(&mut b[..]);
1680        v_ref.copy_to(&mut c[..]);
1681        assert_eq!(b[0..4], a_ref[0..4]);
1682        assert_eq!(c[0..5], a_ref[0..5]);
1683    }
1684
1685    #[test]
1686    fn slice_copy_to_u16() {
1687        let mut a = [0x01u16, 0x2, 0x03, 0x4, 0x5];
1688        let mut b = [0u16; 4];
1689        let mut c = [0u16; 6];
1690        let a_ref = &mut a[..];
1691        let v_ref = unsafe { VolatileSlice::new(a_ref.as_mut_ptr() as *mut u8, 9) };
1692
1693        v_ref.copy_to(&mut b[..]);
1694        v_ref.copy_to(&mut c[..]);
1695        assert_eq!(b[0..4], a_ref[0..4]);
1696        assert_eq!(c[0..4], a_ref[0..4]);
1697        assert_eq!(c[4], 0);
1698    }
1699
1700    #[test]
1701    fn slice_copy_from_u8() {
1702        let a = [2u8, 4, 6, 8, 10];
1703        let mut b = [0u8; 4];
1704        let mut c = [0u8; 6];
1705        let b_ref = &mut b[..];
1706        let v_ref = b_ref.get_slice(0, b_ref.len()).unwrap();
1707        v_ref.copy_from(&a[..]);
1708        assert_eq!(b_ref[0..4], a[0..4]);
1709
1710        let c_ref = &mut c[..];
1711        let v_ref = c_ref.get_slice(0, c_ref.len()).unwrap();
1712        v_ref.copy_from(&a[..]);
1713        assert_eq!(c_ref[0..5], a[0..5]);
1714    }
1715
1716    #[test]
1717    fn slice_copy_from_u16() {
1718        let a = [2u16, 4, 6, 8, 10];
1719        let mut b = [0u16; 4];
1720        let mut c = [0u16; 6];
1721        let b_ref = &mut b[..];
1722        let v_ref = unsafe { VolatileSlice::new(b_ref.as_mut_ptr() as *mut u8, 8) };
1723        v_ref.copy_from(&a[..]);
1724        assert_eq!(b_ref[0..4], a[0..4]);
1725
1726        let c_ref = &mut c[..];
1727        let v_ref = unsafe { VolatileSlice::new(c_ref.as_mut_ptr() as *mut u8, 9) };
1728        v_ref.copy_from(&a[..]);
1729        assert_eq!(c_ref[0..4], a[0..4]);
1730        assert_eq!(c_ref[4], 0);
1731    }
1732
1733    #[test]
1734    fn slice_copy_to_volatile_slice() {
1735        let mut a = [2u8, 4, 6, 8, 10];
1736        let a_ref = &mut a[..];
1737        let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
1738
1739        let mut b = [0u8; 4];
1740        let b_ref = &mut b[..];
1741        let b_slice = b_ref.get_slice(0, b_ref.len()).unwrap();
1742
1743        a_slice.copy_to_volatile_slice(b_slice);
1744        assert_eq!(b, [2, 4, 6, 8]);
1745    }
1746
1747    #[test]
1748    fn slice_overflow_error() {
1749        use std::usize::MAX;
1750        let a = VecMem::new(1);
1751        let res = a.get_slice(MAX, 1).unwrap_err();
1752        assert_matches!(
1753            res,
1754            Error::Overflow {
1755                base: MAX,
1756                offset: 1,
1757            }
1758        );
1759    }
1760
1761    #[test]
1762    fn slice_oob_error() {
1763        let a = VecMem::new(100);
1764        a.get_slice(50, 50).unwrap();
1765        let res = a.get_slice(55, 50).unwrap_err();
1766        assert_matches!(res, Error::OutOfBounds { addr: 105 });
1767    }
1768
1769    #[test]
1770    fn ref_overflow_error() {
1771        use std::usize::MAX;
1772        let a = VecMem::new(1);
1773        let res = a.get_ref::<u8>(MAX).unwrap_err();
1774        assert_matches!(
1775            res,
1776            Error::Overflow {
1777                base: MAX,
1778                offset: 1,
1779            }
1780        );
1781    }
1782
1783    #[test]
1784    fn ref_oob_error() {
1785        let a = VecMem::new(100);
1786        a.get_ref::<u8>(99).unwrap();
1787        let res = a.get_ref::<u16>(99).unwrap_err();
1788        assert_matches!(res, Error::OutOfBounds { addr: 101 });
1789    }
1790
1791    #[test]
1792    fn ref_oob_too_large() {
1793        let a = VecMem::new(3);
1794        let res = a.get_ref::<u32>(0).unwrap_err();
1795        assert_matches!(res, Error::OutOfBounds { addr: 4 });
1796    }
1797
1798    #[test]
1799    fn slice_store() {
1800        let a = VecMem::new(5);
1801        let s = a.as_volatile_slice();
1802        let r = a.get_ref(2).unwrap();
1803        r.store(9u16);
1804        assert_eq!(s.read_obj::<u16>(2).unwrap(), 9);
1805    }
1806
1807    #[test]
1808    fn test_write_past_end() {
1809        let a = VecMem::new(5);
1810        let s = a.as_volatile_slice();
1811        let res = s.write(&[1, 2, 3, 4, 5, 6], 0);
1812        assert!(res.is_ok());
1813        assert_eq!(res.unwrap(), 5);
1814    }
1815
1816    #[test]
1817    fn slice_read_and_write() {
1818        let a = VecMem::new(5);
1819        let s = a.as_volatile_slice();
1820        let sample_buf = [1, 2, 3];
1821        assert!(s.write(&sample_buf, 5).is_err());
1822        assert!(s.write(&sample_buf, 2).is_ok());
1823        let mut buf = [0u8; 3];
1824        assert!(s.read(&mut buf, 5).is_err());
1825        assert!(s.read_slice(&mut buf, 2).is_ok());
1826        assert_eq!(buf, sample_buf);
1827
1828        // Writing an empty buffer at the end of the volatile slice works.
1829        assert_eq!(s.write(&[], 100).unwrap(), 0);
1830        let buf: &mut [u8] = &mut [];
1831        assert_eq!(s.read(buf, 4).unwrap(), 0);
1832
1833        // Check that reading and writing an empty buffer does not yield an error.
1834        let empty_mem = VecMem::new(0);
1835        let empty = empty_mem.as_volatile_slice();
1836        assert_eq!(empty.write(&[], 1).unwrap(), 0);
1837        assert_eq!(empty.read(buf, 1).unwrap(), 0);
1838    }
1839
1840    #[test]
1841    fn obj_read_and_write() {
1842        let a = VecMem::new(5);
1843        let s = a.as_volatile_slice();
1844        assert!(s.write_obj(55u16, 4).is_err());
1845        assert!(s.write_obj(55u16, core::usize::MAX).is_err());
1846        assert!(s.write_obj(55u16, 2).is_ok());
1847        assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
1848        assert!(s.read_obj::<u16>(4).is_err());
1849        assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
1850    }
1851
1852    #[test]
1853    fn mem_read_and_write() {
1854        let a = VecMem::new(5);
1855        let s = a.as_volatile_slice();
1856        assert!(s.write_obj(!0u32, 1).is_ok());
1857        let mut file = if cfg!(unix) {
1858            File::open(Path::new("/dev/zero")).unwrap()
1859        } else {
1860            File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
1861        };
1862        assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
1863        assert!(s
1864            .read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
1865            .is_err());
1866
1867        assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
1868
1869        let mut f = TempFile::new().unwrap().into_file();
1870        assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
1871        format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
1872
1873        let value = s.read_obj::<u32>(1).unwrap();
1874        if cfg!(unix) {
1875            assert_eq!(value, 0);
1876        } else {
1877            assert_eq!(value, 0x0090_5a4d);
1878        }
1879
1880        let mut sink = Vec::new();
1881        assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
1882        assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
1883        assert!(s
1884            .write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
1885            .is_err());
1886        format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
1887        if cfg!(unix) {
1888            assert_eq!(sink, vec![0; size_of::<u32>()]);
1889        } else {
1890            assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
1891        };
1892    }
1893
1894    #[test]
1895    fn unaligned_read_and_write() {
1896        let a = VecMem::new(7);
1897        let s = a.as_volatile_slice();
1898        let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4];
1899        assert!(s.write_slice(&sample_buf, 0).is_ok());
1900        let r = a.get_ref::<u32>(2).unwrap();
1901        assert_eq!(r.load(), 0xAAAA_AAAA);
1902
1903        r.store(0x5555_5555);
1904        let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4];
1905        let mut buf: [u8; 7] = Default::default();
1906        assert!(s.read_slice(&mut buf, 0).is_ok());
1907        assert_eq!(buf, sample_buf);
1908    }
1909
1910    #[test]
1911    fn ref_array_from_slice() {
1912        let mut a = [2, 4, 6, 8, 10];
1913        let a_vec = a.to_vec();
1914        let a_ref = &mut a[..];
1915        let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap();
1916        let a_array_ref: VolatileArrayRef<u8, ()> = a_slice.into();
1917        for (i, entry) in a_vec.iter().enumerate() {
1918            assert_eq!(&a_array_ref.load(i), entry);
1919        }
1920    }
1921
1922    #[test]
1923    fn ref_array_store() {
1924        let mut a = [0u8; 5];
1925        {
1926            let a_ref = &mut a[..];
1927            let v_ref = a_ref.get_array_ref(1, 4).unwrap();
1928            v_ref.store(1, 2u8);
1929            v_ref.store(2, 4u8);
1930            v_ref.store(3, 6u8);
1931        }
1932        let expected = [2u8, 4u8, 6u8];
1933        assert_eq!(a[2..=4], expected);
1934    }
1935
1936    #[test]
1937    fn ref_array_load() {
1938        let mut a = [0, 0, 2, 3, 10];
1939        {
1940            let a_ref = &mut a[..];
1941            let c = {
1942                let v_ref = a_ref.get_array_ref::<u8>(1, 4).unwrap();
1943                assert_eq!(v_ref.load(1), 2u8);
1944                assert_eq!(v_ref.load(2), 3u8);
1945                assert_eq!(v_ref.load(3), 10u8);
1946                v_ref
1947            };
1948            // To make sure we can take a v_ref out of the scope we made it in:
1949            c.load(0);
1950            // but not too far:
1951            // c
1952        } //.load()
1953        ;
1954    }
1955
1956    #[test]
1957    fn ref_array_overflow() {
1958        let mut a = [0, 0, 2, 3, 10];
1959        let a_ref = &mut a[..];
1960        let res = a_ref.get_array_ref::<u32>(4, usize::MAX).unwrap_err();
1961        assert_matches!(
1962            res,
1963            Error::TooBig {
1964                nelements: usize::MAX,
1965                size: 4,
1966            }
1967        );
1968    }
1969
1970    #[test]
1971    fn alignment() {
1972        let a = [0u8; 64];
1973        let a = &a[a.as_ptr().align_offset(32)] as *const u8 as usize;
1974        assert!(super::alignment(a) >= 32);
1975        assert_eq!(super::alignment(a + 9), 1);
1976        assert_eq!(super::alignment(a + 30), 2);
1977        assert_eq!(super::alignment(a + 12), 4);
1978        assert_eq!(super::alignment(a + 8), 8);
1979    }
1980
1981    #[test]
1982    fn test_atomic_accesses() {
1983        let a = VecMem::new(0x1000);
1984        let s = a.as_volatile_slice();
1985
1986        crate::bytes::tests::check_atomic_accesses(s, 0, 0x1000);
1987    }
1988
1989    #[test]
1990    fn split_at() {
1991        let mut mem = [0u8; 32];
1992        let mem_ref = &mut mem[..];
1993        let vslice = mem_ref.get_slice(0, 32).unwrap();
1994        let (start, end) = vslice.split_at(8).unwrap();
1995        assert_eq!(start.len(), 8);
1996        assert_eq!(end.len(), 24);
1997        let (start, end) = vslice.split_at(0).unwrap();
1998        assert_eq!(start.len(), 0);
1999        assert_eq!(end.len(), 32);
2000        let (start, end) = vslice.split_at(31).unwrap();
2001        assert_eq!(start.len(), 31);
2002        assert_eq!(end.len(), 1);
2003        let (start, end) = vslice.split_at(32).unwrap();
2004        assert_eq!(start.len(), 32);
2005        assert_eq!(end.len(), 0);
2006        let err = vslice.split_at(33).unwrap_err();
2007        assert_matches!(err, Error::OutOfBounds { addr: _ })
2008    }
2009
2010    #[test]
2011    fn test_volatile_slice_dirty_tracking() {
2012        let val = 123u64;
2013        let dirty_offset = 0x1000;
2014        let dirty_len = size_of_val(&val);
2015        let page_size = 0x1000;
2016
2017        let mut buf = vec![0u8; 0x10000];
2018
2019        // Invoke the `Bytes` test helper function.
2020        {
2021            let bitmap = AtomicBitmap::new(buf.len(), page_size);
2022            let slice = unsafe {
2023                VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap.slice_at(0))
2024            };
2025
2026            test_bytes(
2027                &slice,
2028                |s: &VolatileSlice<RefSlice<AtomicBitmap>>,
2029                 start: usize,
2030                 len: usize,
2031                 clean: bool| { check_range(s.bitmap(), start, len, clean) },
2032                |offset| offset,
2033                0x1000,
2034            );
2035        }
2036
2037        // Invoke the `VolatileMemory` test helper function.
2038        {
2039            let bitmap = AtomicBitmap::new(buf.len(), page_size);
2040            let slice = unsafe {
2041                VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap.slice_at(0))
2042            };
2043            test_volatile_memory(&slice);
2044        }
2045
2046        let bitmap = AtomicBitmap::new(buf.len(), page_size);
2047        let slice =
2048            unsafe { VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap.slice_at(0)) };
2049
2050        let bitmap2 = AtomicBitmap::new(buf.len(), page_size);
2051        let slice2 =
2052            unsafe { VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap2.slice_at(0)) };
2053
2054        let bitmap3 = AtomicBitmap::new(buf.len(), page_size);
2055        let slice3 =
2056            unsafe { VolatileSlice::with_bitmap(buf.as_mut_ptr(), buf.len(), bitmap3.slice_at(0)) };
2057
2058        assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
2059        assert!(range_is_clean(slice2.bitmap(), 0, slice2.len()));
2060
2061        slice.write_obj(val, dirty_offset).unwrap();
2062        assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
2063
2064        slice.copy_to_volatile_slice(slice2);
2065        assert!(range_is_dirty(slice2.bitmap(), 0, slice2.len()));
2066
2067        {
2068            let (s1, s2) = slice.split_at(dirty_offset).unwrap();
2069            assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
2070            assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
2071        }
2072
2073        {
2074            let s = slice.subslice(dirty_offset, dirty_len).unwrap();
2075            assert!(range_is_dirty(s.bitmap(), 0, s.len()));
2076        }
2077
2078        {
2079            let s = slice.offset(dirty_offset).unwrap();
2080            assert!(range_is_dirty(s.bitmap(), 0, dirty_len));
2081        }
2082
2083        // Test `copy_from` for size_of::<T> == 1.
2084        {
2085            let buf = vec![1u8; dirty_offset];
2086
2087            assert!(range_is_clean(slice.bitmap(), 0, dirty_offset));
2088            slice.copy_from(&buf);
2089            assert!(range_is_dirty(slice.bitmap(), 0, dirty_offset));
2090        }
2091
2092        // Test `copy_from` for size_of::<T> > 1.
2093        {
2094            let val = 1u32;
2095            let buf = vec![val; dirty_offset / size_of_val(&val)];
2096
2097            assert!(range_is_clean(slice3.bitmap(), 0, dirty_offset));
2098            slice3.copy_from(&buf);
2099            assert!(range_is_dirty(slice3.bitmap(), 0, dirty_offset));
2100        }
2101    }
2102
2103    #[test]
2104    fn test_volatile_ref_dirty_tracking() {
2105        let val = 123u64;
2106        let mut buf = vec![val];
2107        let page_size = 0x1000;
2108
2109        let bitmap = AtomicBitmap::new(size_of_val(&val), page_size);
2110        let vref =
2111            unsafe { VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0)) };
2112
2113        assert!(range_is_clean(vref.bitmap(), 0, vref.len()));
2114        vref.store(val);
2115        assert!(range_is_dirty(vref.bitmap(), 0, vref.len()));
2116    }
2117
2118    fn test_volatile_array_ref_copy_from_tracking<T>(buf: &mut [T], index: usize, page_size: usize)
2119    where
2120        T: ByteValued + From<u8>,
2121    {
2122        let bitmap = AtomicBitmap::new(buf.len() * size_of::<T>(), page_size);
2123        let arr = unsafe {
2124            VolatileArrayRef::with_bitmap(
2125                buf.as_mut_ptr() as *mut u8,
2126                index + 1,
2127                bitmap.slice_at(0),
2128            )
2129        };
2130
2131        let val = T::from(123);
2132        let copy_buf = vec![val; index + 1];
2133
2134        assert!(range_is_clean(arr.bitmap(), 0, arr.len() * size_of::<T>()));
2135        arr.copy_from(copy_buf.as_slice());
2136        assert!(range_is_dirty(arr.bitmap(), 0, buf.len() * size_of::<T>()));
2137    }
2138
2139    #[test]
2140    fn test_volatile_array_ref_dirty_tracking() {
2141        let val = 123u64;
2142        let dirty_len = size_of_val(&val);
2143        let index = 0x1000;
2144        let dirty_offset = dirty_len * index;
2145        let page_size = 0x1000;
2146
2147        let mut buf = vec![0u64; index + 1];
2148        let mut byte_buf = vec![0u8; index + 1];
2149
2150        // Test `ref_at`.
2151        {
2152            let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
2153            let arr = unsafe {
2154                VolatileArrayRef::with_bitmap(
2155                    buf.as_mut_ptr() as *mut u8,
2156                    index + 1,
2157                    bitmap.slice_at(0),
2158                )
2159            };
2160
2161            assert!(range_is_clean(arr.bitmap(), 0, arr.len() * dirty_len));
2162            arr.ref_at(index).store(val);
2163            assert!(range_is_dirty(arr.bitmap(), dirty_offset, dirty_len));
2164        }
2165
2166        // Test `store`.
2167        {
2168            let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
2169            let arr = unsafe {
2170                VolatileArrayRef::with_bitmap(
2171                    buf.as_mut_ptr() as *mut u8,
2172                    index + 1,
2173                    bitmap.slice_at(0),
2174                )
2175            };
2176
2177            let slice = arr.to_slice();
2178            assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
2179            arr.store(index, val);
2180            assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len));
2181        }
2182
2183        // Test `copy_from` when size_of::<T>() == 1.
2184        test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, page_size);
2185        // Test `copy_from` when size_of::<T>() > 1.
2186        test_volatile_array_ref_copy_from_tracking(&mut buf, index, page_size);
2187    }
2188}