vm_memory/bitmap/
mod.rs

1// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4//! This module holds abstractions that enable tracking the areas dirtied by writes of a specified
5//! length to a given offset. In particular, this is used to track write accesses within a
6//! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the
7//! global view for an entire `GuestMemory` object.
8
9#[cfg(any(test, feature = "backend-bitmap"))]
10mod backend;
11
12use std::fmt::Debug;
13
14use crate::{GuestMemory, GuestMemoryRegion};
15
16#[cfg(any(test, feature = "backend-bitmap"))]
17pub use backend::{ArcSlice, AtomicBitmap, RefSlice};
18
19/// Trait implemented by types that support creating `BitmapSlice` objects.
20pub trait WithBitmapSlice<'a> {
21    /// Type of the bitmap slice.
22    type S: BitmapSlice;
23}
24
25/// Trait used to represent that a `BitmapSlice` is a `Bitmap` itself, but also satisfies the
26/// restriction that slices created from it have the same type as `Self`.
27pub trait BitmapSlice: Bitmap + Clone + Debug + for<'a> WithBitmapSlice<'a, S = Self> {}
28
29/// Common bitmap operations. Using Higher-Rank Trait Bounds (HRTBs) to effectively define
30/// an associated type that has a lifetime parameter, without tagging the `Bitmap` trait with
31/// a lifetime as well.
32///
33/// Using an associated type allows implementing the `Bitmap` and `BitmapSlice` functionality
34/// as a zero-cost abstraction when providing trivial implementations such as the one
35/// defined for `()`.
36// These methods represent the core functionality that's required by `vm-memory` abstractions
37// to implement generic tracking logic, as well as tests that can be reused by different backends.
38pub trait Bitmap: for<'a> WithBitmapSlice<'a> {
39    /// Mark the memory range specified by the given `offset` and `len` as dirtied.
40    fn mark_dirty(&self, offset: usize, len: usize);
41
42    /// Check whether the specified `offset` is marked as dirty.
43    fn dirty_at(&self, offset: usize) -> bool;
44
45    /// Return a `<Self as WithBitmapSlice>::S` slice of the current bitmap, starting at
46    /// the specified `offset`.
47    fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S;
48}
49
50/// A no-op `Bitmap` implementation that can be provided for backends that do not actually
51/// require the tracking functionality.
52
53impl<'a> WithBitmapSlice<'a> for () {
54    type S = Self;
55}
56
57impl BitmapSlice for () {}
58
59impl Bitmap for () {
60    fn mark_dirty(&self, _offset: usize, _len: usize) {}
61
62    fn dirty_at(&self, _offset: usize) -> bool {
63        false
64    }
65
66    fn slice_at(&self, _offset: usize) -> Self {}
67}
68
69/// A `Bitmap` and `BitmapSlice` implementation for `Option<B>`.
70
71impl<'a, B> WithBitmapSlice<'a> for Option<B>
72where
73    B: WithBitmapSlice<'a>,
74{
75    type S = Option<B::S>;
76}
77
78impl<B: BitmapSlice> BitmapSlice for Option<B> {}
79
80impl<B: Bitmap> Bitmap for Option<B> {
81    fn mark_dirty(&self, offset: usize, len: usize) {
82        if let Some(inner) = self {
83            inner.mark_dirty(offset, len)
84        }
85    }
86
87    fn dirty_at(&self, offset: usize) -> bool {
88        if let Some(inner) = self {
89            return inner.dirty_at(offset);
90        }
91        false
92    }
93
94    fn slice_at(&self, offset: usize) -> Option<<B as WithBitmapSlice>::S> {
95        if let Some(inner) = self {
96            return Some(inner.slice_at(offset));
97        }
98        None
99    }
100}
101
102/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
103/// an object `B: WithBitmapSlice<'a>`.
104pub type BS<'a, B> = <B as WithBitmapSlice<'a>>::S;
105
106/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
107/// the memory regions of an object `M: GuestMemory`.
108pub type MS<'a, M> = BS<'a, <<M as GuestMemory>::R as GuestMemoryRegion>::B>;
109
110#[cfg(test)]
111pub(crate) mod tests {
112    use super::*;
113
114    use std::io::Cursor;
115    use std::marker::PhantomData;
116    use std::mem::size_of_val;
117    use std::result::Result;
118    use std::sync::atomic::Ordering;
119
120    use crate::{Bytes, VolatileMemory};
121    #[cfg(feature = "backend-mmap")]
122    use crate::{GuestAddress, MemoryRegionAddress};
123
124    // Helper method to check whether a specified range is clean.
125    pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
126        (start..start + len).all(|offset| !b.dirty_at(offset))
127    }
128
129    // Helper method to check whether a specified range is dirty.
130    pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
131        (start..start + len).all(|offset| b.dirty_at(offset))
132    }
133
134    pub fn check_range<B: Bitmap>(b: &B, start: usize, len: usize, clean: bool) -> bool {
135        if clean {
136            range_is_clean(b, start, len)
137        } else {
138            range_is_dirty(b, start, len)
139        }
140    }
141
142    // Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
143    // an area of length at least 0x2000.
144    pub fn test_bitmap<B: Bitmap>(b: &B) {
145        let len = 0x2000;
146        let dirty_offset = 0x1000;
147        let dirty_len = 0x100;
148
149        // Some basic checks.
150        let s = b.slice_at(dirty_offset);
151
152        assert!(range_is_clean(b, 0, len));
153        assert!(range_is_clean(&s, 0, dirty_len));
154
155        b.mark_dirty(dirty_offset, dirty_len);
156        assert!(range_is_dirty(b, dirty_offset, dirty_len));
157        assert!(range_is_dirty(&s, 0, dirty_len));
158    }
159
160    #[derive(Debug)]
161    pub enum TestAccessError {
162        RangeCleanCheck,
163        RangeDirtyCheck,
164    }
165
166    // A helper object that implements auxiliary operations for testing `Bytes` implementations
167    // in the context of dirty bitmap tracking.
168    struct BytesHelper<F, G, M> {
169        check_range_fn: F,
170        address_fn: G,
171        phantom: PhantomData<*const M>,
172    }
173
174    // `F` represents a closure the checks whether a specified range associated with the `Bytes`
175    // object that's being tested is marked as dirty or not (depending on the value of the last
176    // parameter). It has the following parameters:
177    // - A reference to a `Bytes` implementations that's subject to testing.
178    // - The offset of the range.
179    // - The length of the range.
180    // - Whether we are checking if the range is clean (when `true`) or marked as dirty.
181    //
182    // `G` represents a closure that translates an offset into an address value that's
183    // relevant for the `Bytes` implementation being tested.
184    impl<F, G, M, A> BytesHelper<F, G, M>
185    where
186        F: Fn(&M, usize, usize, bool) -> bool,
187        G: Fn(usize) -> A,
188        M: Bytes<A>,
189    {
190        fn check_range(&self, m: &M, start: usize, len: usize, clean: bool) -> bool {
191            (self.check_range_fn)(m, start, len, clean)
192        }
193
194        fn address(&self, offset: usize) -> A {
195            (self.address_fn)(offset)
196        }
197
198        fn test_access<Op>(
199            &self,
200            bytes: &M,
201            dirty_offset: usize,
202            dirty_len: usize,
203            op: Op,
204        ) -> Result<(), TestAccessError>
205        where
206            Op: Fn(&M, A),
207        {
208            if !self.check_range(bytes, dirty_offset, dirty_len, true) {
209                return Err(TestAccessError::RangeCleanCheck);
210            }
211
212            op(bytes, self.address(dirty_offset));
213
214            if !self.check_range(bytes, dirty_offset, dirty_len, false) {
215                return Err(TestAccessError::RangeDirtyCheck);
216            }
217
218            Ok(())
219        }
220    }
221
222    // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment.
223    // The `step` parameter represents the offset that's added the the current address after
224    // performing each access. It provides finer grained control when testing tracking
225    // implementations that aggregate entire ranges for accounting purposes (for example, doing
226    // tracking at the page level).
227    pub fn test_bytes<F, G, M, A>(bytes: &M, check_range_fn: F, address_fn: G, step: usize)
228    where
229        F: Fn(&M, usize, usize, bool) -> bool,
230        G: Fn(usize) -> A,
231        A: Copy,
232        M: Bytes<A>,
233        <M as Bytes<A>>::E: Debug,
234    {
235        const BUF_SIZE: usize = 1024;
236        let buf = vec![1u8; 1024];
237
238        let val = 1u64;
239
240        let h = BytesHelper {
241            check_range_fn,
242            address_fn,
243            phantom: PhantomData,
244        };
245
246        let mut dirty_offset = 0x1000;
247
248        // Test `write`.
249        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
250            assert_eq!(m.write(buf.as_slice(), addr).unwrap(), BUF_SIZE)
251        })
252        .unwrap();
253        dirty_offset += step;
254
255        // Test `write_slice`.
256        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
257            m.write_slice(buf.as_slice(), addr).unwrap()
258        })
259        .unwrap();
260        dirty_offset += step;
261
262        // Test `write_obj`.
263        h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
264            m.write_obj(val, addr).unwrap()
265        })
266        .unwrap();
267        dirty_offset += step;
268
269        // Test `read_from`.
270        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
271            assert_eq!(
272                m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(),
273                BUF_SIZE
274            )
275        })
276        .unwrap();
277        dirty_offset += step;
278
279        // Test `read_exact_from`.
280        h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
281            m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE)
282                .unwrap()
283        })
284        .unwrap();
285        dirty_offset += step;
286
287        // Test `store`.
288        h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| {
289            m.store(val, addr, Ordering::Relaxed).unwrap()
290        })
291        .unwrap();
292    }
293
294    // This function and the next are currently conditionally compiled because we only use
295    // them to test the mmap-based backend implementations for now. Going forward, the generic
296    // test functions defined here can be placed in a separate module (i.e. `test_utilities`)
297    // which is gated by a feature and can be used for testing purposes by other crates as well.
298    #[cfg(feature = "backend-mmap")]
299    fn test_guest_memory_region<R: GuestMemoryRegion>(region: &R) {
300        let dirty_addr = MemoryRegionAddress(0x0);
301        let val = 123u64;
302        let dirty_len = size_of_val(&val);
303
304        let slice = region.get_slice(dirty_addr, dirty_len).unwrap();
305
306        assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
307        assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
308
309        region.write_obj(val, dirty_addr).unwrap();
310
311        assert!(range_is_dirty(
312            region.bitmap(),
313            dirty_addr.0 as usize,
314            dirty_len
315        ));
316
317        assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
318
319        // Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same
320        // `region` handle because `test_bytes` starts performing writes after the range that's
321        // been already dirtied in the first part of this test.
322        test_bytes(
323            region,
324            |r: &R, start: usize, len: usize, clean: bool| {
325                check_range(r.bitmap(), start, len, clean)
326            },
327            |offset| MemoryRegionAddress(offset as u64),
328            0x1000,
329        );
330    }
331
332    #[cfg(feature = "backend-mmap")]
333    // Assumptions about M generated by f ...
334    pub fn test_guest_memory_and_region<M, F>(f: F)
335    where
336        M: GuestMemory,
337        F: Fn() -> M,
338    {
339        let m = f();
340        let dirty_addr = GuestAddress(0x1000);
341        let val = 123u64;
342        let dirty_len = size_of_val(&val);
343
344        let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap();
345        let slice = m.get_slice(dirty_addr, dirty_len).unwrap();
346
347        assert!(range_is_clean(region.bitmap(), 0, region.len() as usize));
348        assert!(range_is_clean(slice.bitmap(), 0, dirty_len));
349
350        m.write_obj(val, dirty_addr).unwrap();
351
352        assert!(range_is_dirty(
353            region.bitmap(),
354            region_addr.0 as usize,
355            dirty_len
356        ));
357
358        assert!(range_is_dirty(slice.bitmap(), 0, dirty_len));
359
360        // Now let's invoke the tests for the inner `GuestMemoryRegion` type.
361        test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap());
362
363        // Finally, let's invoke the generic tests for `Bytes`.
364        let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool {
365            let mut check_result = true;
366            m.try_access(len, GuestAddress(start as u64), |_, size, reg_addr, reg| {
367                if !check_range(reg.bitmap(), reg_addr.0 as usize, size, clean) {
368                    check_result = false;
369                }
370                Ok(size)
371            })
372            .unwrap();
373
374            check_result
375        };
376
377        test_bytes(
378            &f(),
379            check_range_closure,
380            |offset| GuestAddress(offset as u64),
381            0x1000,
382        );
383    }
384
385    pub fn test_volatile_memory<M: VolatileMemory>(m: &M) {
386        assert!(m.len() >= 0x8000);
387
388        let dirty_offset = 0x1000;
389        let val = 123u64;
390        let dirty_len = size_of_val(&val);
391
392        let get_ref_offset = 0x2000;
393        let array_ref_offset = 0x3000;
394
395        let s1 = m.as_volatile_slice();
396        let s2 = m.get_slice(dirty_offset, dirty_len).unwrap();
397
398        assert!(range_is_clean(s1.bitmap(), 0, s1.len()));
399        assert!(range_is_clean(s2.bitmap(), 0, s2.len()));
400
401        s1.write_obj(val, dirty_offset).unwrap();
402
403        assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len));
404        assert!(range_is_dirty(s2.bitmap(), 0, dirty_len));
405
406        let v_ref = m.get_ref::<u64>(get_ref_offset).unwrap();
407        assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len));
408        v_ref.store(val);
409        assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len));
410
411        let arr_ref = m.get_array_ref::<u64>(array_ref_offset, 1).unwrap();
412        assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len));
413        arr_ref.store(0, val);
414        assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len));
415    }
416}