vm_memory/
bytes.rs

1// Portions Copyright 2019 Red Hat, Inc.
2//
3// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4//
5// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6// Use of this source code is governed by a BSD-style license that can be
7// found in the LICENSE-BSD-3-Clause file.
8//
9// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10
11//! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
12//! data.
13
14use std::io::{Read, Write};
15use std::mem::size_of;
16use std::result::Result;
17use std::slice::{from_raw_parts, from_raw_parts_mut};
18use std::sync::atomic::Ordering;
19
20use crate::atomic_integer::AtomicInteger;
21use crate::volatile_memory::VolatileSlice;
22
23/// Types for which it is safe to initialize from raw data.
24///
25/// # Safety
26///
27/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a
28/// byte array.  This is generally true for all plain-old-data structs.  It is notably not true for
29/// any type that includes a reference.
30///
31/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
32pub unsafe trait ByteValued: Copy + Default + Send + Sync {
33    /// Converts a slice of raw data into a reference of `Self`.
34    ///
35    /// The value of `data` is not copied. Instead a reference is made from the given slice. The
36    /// value of `Self` will depend on the representation of the type in memory, and may change in
37    /// an unstable fashion.
38    ///
39    /// This will return `None` if the length of data does not match the size of `Self`, or if the
40    /// data is not aligned for the type of `Self`.
41    fn from_slice(data: &[u8]) -> Option<&Self> {
42        // Early out to avoid an unneeded `align_to` call.
43        if data.len() != size_of::<Self>() {
44            return None;
45        }
46
47        // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
48        // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
49        // ensures that we don't have any unaligned references. This aliases a pointer, but because
50        // the pointer is from a const slice reference, there are no mutable aliases. Finally, the
51        // reference returned can not outlive data because they have equal implicit lifetime
52        // constraints.
53        match unsafe { data.align_to::<Self>() } {
54            ([], [mid], []) => Some(mid),
55            _ => None,
56        }
57    }
58
59    /// Converts a mutable slice of raw data into a mutable reference of `Self`.
60    ///
61    /// Because `Self` is made from a reference to the mutable slice, mutations to the returned
62    /// reference are immediately reflected in `data`. The value of the returned `Self` will depend
63    /// on the representation of the type in memory, and may change in an unstable fashion.
64    ///
65    /// This will return `None` if the length of data does not match the size of `Self`, or if the
66    /// data is not aligned for the type of `Self`.
67    fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
68        // Early out to avoid an unneeded `align_to_mut` call.
69        if data.len() != size_of::<Self>() {
70            return None;
71        }
72
73        // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
74        // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
75        // ensures that we don't have any unaligned references. This aliases a pointer, but because
76        // the pointer is from a mut slice reference, we borrow the passed in mutable reference.
77        // Finally, the reference returned can not outlive data because they have equal implicit
78        // lifetime constraints.
79        match unsafe { data.align_to_mut::<Self>() } {
80            ([], [mid], []) => Some(mid),
81            _ => None,
82        }
83    }
84
85    /// Converts a reference to `self` into a slice of bytes.
86    ///
87    /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`.
88    /// The value of bytes in the returned slice will depend on the representation of the type in
89    /// memory, and may change in an unstable fashion.
90    fn as_slice(&self) -> &[u8] {
91        // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
92        // guarantees it. The lifetime of the returned slice is the same as the passed reference,
93        // so that no dangling pointers will result from this pointer alias.
94        unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
95    }
96
97    /// Converts a mutable reference to `self` into a mutable slice of bytes.
98    ///
99    /// Because the slice is made from a reference to `self`, mutations to the returned slice are
100    /// immediately reflected in `self`. The value of bytes in the returned slice will depend on
101    /// the representation of the type in memory, and may change in an unstable fashion.
102    fn as_mut_slice(&mut self) -> &mut [u8] {
103        // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
104        // guarantees it. The trait also guarantees that any combination of bytes is valid for this
105        // type, so modifying them in the form of a byte slice is valid. The lifetime of the
106        // returned slice is the same as the passed reference, so that no dangling pointers will
107        // result from this pointer alias. Although this does alias a mutable pointer, we do so by
108        // exclusively borrowing the given mutable reference.
109        unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
110    }
111
112    /// Converts a mutable reference to `self` into a `VolatileSlice`.  This is
113    /// useful because `VolatileSlice` provides a `Bytes<usize>` implementation.
114    ///
115    /// # Safety
116    ///
117    /// Unlike most `VolatileMemory` implementation, this method requires an exclusive
118    /// reference to `self`; this trivially fulfills `VolatileSlice::new`'s requirement
119    /// that all accesses to `self` use volatile accesses (because there can
120    /// be no other accesses).
121    fn as_bytes(&mut self) -> VolatileSlice {
122        // SAFETY: This is safe because the lifetime is the same as self
123        unsafe { VolatileSlice::new(self as *mut Self as usize as *mut _, size_of::<Self>()) }
124    }
125}
126
127macro_rules! byte_valued_array {
128    ($T:ty, $($N:expr)+) => {
129        $(
130            // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued.
131            // They are just numbers.
132            unsafe impl ByteValued for [$T; $N] {}
133        )+
134    }
135}
136
137macro_rules! byte_valued_type {
138    ($T:ty) => {
139        // SAFETY: Safe as long T is POD.
140        // We are using this macro to generated the implementation for integer types below.
141        unsafe impl ByteValued for $T {}
142        byte_valued_array! {
143            $T,
144            0  1  2  3  4  5  6  7  8  9
145            10 11 12 13 14 15 16 17 18 19
146            20 21 22 23 24 25 26 27 28 29
147            30 31 32
148        }
149    };
150}
151
152byte_valued_type!(u8);
153byte_valued_type!(u16);
154byte_valued_type!(u32);
155byte_valued_type!(u64);
156byte_valued_type!(usize);
157byte_valued_type!(i8);
158byte_valued_type!(i16);
159byte_valued_type!(i32);
160byte_valued_type!(i64);
161byte_valued_type!(isize);
162
163/// A trait used to identify types which can be accessed atomically by proxy.
164pub trait AtomicAccess:
165    ByteValued
166    // Could not find a more succinct way of stating that `Self` can be converted
167    // into `Self::A::V`, and the other way around.
168    + From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
169    + Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
170{
171    /// The `AtomicInteger` that atomic operations on `Self` are based on.
172    type A: AtomicInteger;
173}
174
175macro_rules! impl_atomic_access {
176    ($T:ty, $A:path) => {
177        impl AtomicAccess for $T {
178            type A = $A;
179        }
180    };
181}
182
183impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
184impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
185impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
186#[cfg(any(
187    target_arch = "x86_64",
188    target_arch = "aarch64",
189    target_arch = "powerpc64",
190    target_arch = "s390x"
191))]
192impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
193
194impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
195impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
196impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
197#[cfg(any(
198    target_arch = "x86_64",
199    target_arch = "aarch64",
200    target_arch = "powerpc64",
201    target_arch = "s390x"
202))]
203impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
204
205impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
206impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);
207
208/// A container to host a range of bytes and access its content.
209///
210/// Candidates which may implement this trait include:
211/// - anonymous memory areas
212/// - mmapped memory areas
213/// - data files
214/// - a proxy to access memory on remote
215pub trait Bytes<A> {
216    /// Associated error codes
217    type E;
218
219    /// Writes a slice into the container at `addr`.
220    ///
221    /// Returns the number of bytes written. The number of bytes written can
222    /// be less than the length of the slice if there isn't enough room in the
223    /// container.
224    fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
225
226    /// Reads data from the container at `addr` into a slice.
227    ///
228    /// Returns the number of bytes read. The number of bytes read can be less than the length
229    /// of the slice if there isn't enough data within the container.
230    fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
231
232    /// Writes the entire content of a slice into the container at `addr`.
233    ///
234    /// # Errors
235    ///
236    /// Returns an error if there isn't enough space within the container to write the entire slice.
237    /// Part of the data may have been copied nevertheless.
238    fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
239
240    /// Reads data from the container at `addr` to fill an entire slice.
241    ///
242    /// # Errors
243    ///
244    /// Returns an error if there isn't enough data within the container to fill the entire slice.
245    /// Part of the data may have been copied nevertheless.
246    fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
247
248    /// Writes an object into the container at `addr`.
249    ///
250    /// # Errors
251    ///
252    /// Returns an error if the object doesn't fit inside the container.
253    fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
254        self.write_slice(val.as_slice(), addr)
255    }
256
257    /// Reads an object from the container at `addr`.
258    ///
259    /// Reading from a volatile area isn't strictly safe as it could change mid-read.
260    /// However, as long as the type T is plain old data and can handle random initialization,
261    /// everything will be OK.
262    ///
263    /// # Errors
264    ///
265    /// Returns an error if there's not enough data inside the container.
266    fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
267        let mut result: T = Default::default();
268        self.read_slice(result.as_mut_slice(), addr).map(|_| result)
269    }
270
271    /// Reads up to `count` bytes from an object and writes them into the container at `addr`.
272    ///
273    /// Returns the number of bytes written into the container.
274    ///
275    /// # Arguments
276    /// * `addr` - Begin writing at this address.
277    /// * `src` - Copy from `src` into the container.
278    /// * `count` - Copy `count` bytes from `src` into the container.
279    fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
280    where
281        F: Read;
282
283    /// Reads exactly `count` bytes from an object and writes them into the container at `addr`.
284    ///
285    /// # Errors
286    ///
287    /// Returns an error if `count` bytes couldn't have been copied from `src` to the container.
288    /// Part of the data may have been copied nevertheless.
289    ///
290    /// # Arguments
291    /// * `addr` - Begin writing at this address.
292    /// * `src` - Copy from `src` into the container.
293    /// * `count` - Copy exactly `count` bytes from `src` into the container.
294    fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
295    where
296        F: Read;
297
298    /// Reads up to `count` bytes from the container at `addr` and writes them it into an object.
299    ///
300    /// Returns the number of bytes written into the object.
301    ///
302    /// # Arguments
303    /// * `addr` - Begin reading from this address.
304    /// * `dst` - Copy from the container to `dst`.
305    /// * `count` - Copy `count` bytes from the container to `dst`.
306    fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
307    where
308        F: Write;
309
310    /// Reads exactly `count` bytes from the container at `addr` and writes them into an object.
311    ///
312    /// # Errors
313    ///
314    /// Returns an error if `count` bytes couldn't have been copied from the container to `dst`.
315    /// Part of the data may have been copied nevertheless.
316    ///
317    /// # Arguments
318    /// * `addr` - Begin reading from this address.
319    /// * `dst` - Copy from the container to `dst`.
320    /// * `count` - Copy exactly `count` bytes from the container to `dst`.
321    fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
322    where
323        F: Write;
324
325    /// Atomically store a value at the specified address.
326    fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;
327
328    /// Atomically load a value from the specified address.
329    fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
330}
331
332#[cfg(test)]
333pub(crate) mod tests {
334    #![allow(clippy::undocumented_unsafe_blocks)]
335    use super::*;
336
337    use std::fmt::Debug;
338    use std::mem::align_of;
339    use std::slice;
340
341    // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be
342    // zero-initialized.
343    pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
344    where
345        A: Copy,
346        B: Bytes<A>,
347        B::E: Debug,
348    {
349        let val = 100u32;
350
351        assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
352        b.store(val, addr, Ordering::Relaxed).unwrap();
353        assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);
354
355        assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
356        assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
357    }
358
359    fn check_byte_valued_type<T>()
360    where
361        T: ByteValued + PartialEq + Debug + Default,
362    {
363        let mut data = [0u8; 32];
364        let pre_len = {
365            let (pre, _, _) = unsafe { data.align_to::<T>() };
366            pre.len()
367        };
368        {
369            let aligned_data = &mut data[pre_len..pre_len + size_of::<T>()];
370            {
371                let mut val: T = Default::default();
372                assert_eq!(T::from_slice(aligned_data), Some(&val));
373                assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val));
374                assert_eq!(val.as_slice(), aligned_data);
375                assert_eq!(val.as_mut_slice(), aligned_data);
376            }
377        }
378        for i in 1..size_of::<T>() {
379            let begin = pre_len + i;
380            let end = begin + size_of::<T>();
381            let unaligned_data = &mut data[begin..end];
382            {
383                if align_of::<T>() != 1 {
384                    assert_eq!(T::from_slice(unaligned_data), None);
385                    assert_eq!(T::from_mut_slice(unaligned_data), None);
386                }
387            }
388        }
389        // Check the early out condition
390        {
391            assert!(T::from_slice(&data).is_none());
392            assert!(T::from_mut_slice(&mut data).is_none());
393        }
394    }
395
396    #[test]
397    fn test_byte_valued() {
398        check_byte_valued_type::<u8>();
399        check_byte_valued_type::<u16>();
400        check_byte_valued_type::<u32>();
401        check_byte_valued_type::<u64>();
402        check_byte_valued_type::<usize>();
403        check_byte_valued_type::<i8>();
404        check_byte_valued_type::<i16>();
405        check_byte_valued_type::<i32>();
406        check_byte_valued_type::<i64>();
407        check_byte_valued_type::<isize>();
408    }
409
410    pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10;
411
412    pub struct MockBytesContainer {
413        container: [u8; MOCK_BYTES_CONTAINER_SIZE],
414    }
415
416    impl MockBytesContainer {
417        pub fn new() -> Self {
418            MockBytesContainer {
419                container: [0; MOCK_BYTES_CONTAINER_SIZE],
420            }
421        }
422
423        pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> {
424            if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr {
425                return Err(());
426            }
427
428            Ok(())
429        }
430    }
431
432    impl Bytes<usize> for MockBytesContainer {
433        type E = ();
434
435        fn write(&self, _: &[u8], _: usize) -> Result<usize, Self::E> {
436            unimplemented!()
437        }
438
439        fn read(&self, _: &mut [u8], _: usize) -> Result<usize, Self::E> {
440            unimplemented!()
441        }
442
443        fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> {
444            self.validate_slice_op(buf, addr)?;
445
446            // We need to get a mut reference to `self.container`.
447            let container_ptr = self.container[addr..].as_ptr() as usize as *mut u8;
448            let container = unsafe { slice::from_raw_parts_mut(container_ptr, buf.len()) };
449            container.copy_from_slice(buf);
450
451            Ok(())
452        }
453
454        fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> {
455            self.validate_slice_op(buf, addr)?;
456
457            buf.copy_from_slice(&self.container[addr..buf.len()]);
458
459            Ok(())
460        }
461
462        fn read_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
463        where
464            F: Read,
465        {
466            unimplemented!()
467        }
468
469        fn read_exact_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
470        where
471            F: Read,
472        {
473            unimplemented!()
474        }
475
476        fn write_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
477        where
478            F: Write,
479        {
480            unimplemented!()
481        }
482
483        fn write_all_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
484        where
485            F: Write,
486        {
487            unimplemented!()
488        }
489
490        fn store<T: AtomicAccess>(
491            &self,
492            _val: T,
493            _addr: usize,
494            _order: Ordering,
495        ) -> Result<(), Self::E> {
496            unimplemented!()
497        }
498
499        fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
500            unimplemented!()
501        }
502    }
503
504    #[test]
505    fn test_bytes() {
506        let bytes = MockBytesContainer::new();
507
508        assert!(bytes.write_obj(std::u64::MAX, 0).is_ok());
509        assert_eq!(bytes.read_obj::<u64>(0).unwrap(), std::u64::MAX);
510
511        assert!(bytes
512            .write_obj(std::u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
513            .is_err());
514        assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
515    }
516
517    #[repr(C)]
518    #[derive(Copy, Clone, Default)]
519    struct S {
520        a: u32,
521        b: u32,
522    }
523
524    unsafe impl ByteValued for S {}
525
526    #[test]
527    fn byte_valued_slice() {
528        let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1];
529        let mut s: S = Default::default();
530        s.as_bytes().copy_from(&a);
531        assert_eq!(s.a, 0);
532        assert_eq!(s.b, 0x0101_0101);
533    }
534}