redb/tree_store/page_store/
backends.rs

1use crate::StorageBackend;
2use std::io;
3use std::io::Error;
4use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
5
6#[derive(Debug)]
7pub(crate) struct ReadOnlyBackend {
8    inner: Box<dyn StorageBackend>,
9}
10
11impl ReadOnlyBackend {
12    pub fn new(inner: Box<dyn StorageBackend>) -> Self {
13        Self { inner }
14    }
15}
16
17impl StorageBackend for ReadOnlyBackend {
18    fn len(&self) -> Result<u64, Error> {
19        self.inner.len()
20    }
21
22    fn read(&self, offset: u64, out: &mut [u8]) -> Result<(), Error> {
23        self.inner.read(offset, out)
24    }
25
26    fn set_len(&self, _len: u64) -> Result<(), Error> {
27        unreachable!()
28    }
29
30    fn sync_data(&self) -> Result<(), Error> {
31        unreachable!()
32    }
33
34    fn write(&self, _offset: u64, _data: &[u8]) -> Result<(), Error> {
35        unreachable!()
36    }
37
38    fn close(&self) -> Result<(), Error> {
39        self.inner.close()
40    }
41}
42
43/// Acts as temporal in-memory database storage.
44#[derive(Debug, Default)]
45pub struct InMemoryBackend(RwLock<Vec<u8>>);
46
47impl InMemoryBackend {
48    fn out_of_range() -> io::Error {
49        io::Error::new(io::ErrorKind::InvalidInput, "Index out-of-range.")
50    }
51}
52
53impl InMemoryBackend {
54    /// Creates a new, empty memory backend.
55    pub fn new() -> Self {
56        Self::default()
57    }
58
59    /// Gets a read guard for this backend.
60    fn read(&self) -> RwLockReadGuard<'_, Vec<u8>> {
61        self.0.read().expect("Could not acquire read lock.")
62    }
63
64    /// Gets a write guard for this backend.
65    fn write(&self) -> RwLockWriteGuard<'_, Vec<u8>> {
66        self.0.write().expect("Could not acquire write lock.")
67    }
68}
69
70impl StorageBackend for InMemoryBackend {
71    fn len(&self) -> Result<u64, io::Error> {
72        Ok(self.read().len() as u64)
73    }
74
75    fn read(&self, offset: u64, out: &mut [u8]) -> Result<(), io::Error> {
76        let guard = self.read();
77        let offset = usize::try_from(offset).map_err(|_| Self::out_of_range())?;
78        if offset + out.len() <= guard.len() {
79            out.copy_from_slice(&guard[offset..offset + out.len()]);
80            Ok(())
81        } else {
82            Err(Self::out_of_range())
83        }
84    }
85
86    fn set_len(&self, len: u64) -> Result<(), io::Error> {
87        let mut guard = self.write();
88        let len = usize::try_from(len).map_err(|_| Self::out_of_range())?;
89        if guard.len() < len {
90            let additional = len - guard.len();
91            guard.reserve(additional);
92            for _ in 0..additional {
93                guard.push(0);
94            }
95        } else {
96            guard.truncate(len);
97        }
98
99        Ok(())
100    }
101
102    fn sync_data(&self) -> Result<(), io::Error> {
103        Ok(())
104    }
105
106    fn write(&self, offset: u64, data: &[u8]) -> Result<(), io::Error> {
107        let mut guard = self.write();
108        let offset = usize::try_from(offset).map_err(|_| Self::out_of_range())?;
109        if offset + data.len() <= guard.len() {
110            guard[offset..offset + data.len()].copy_from_slice(data);
111            Ok(())
112        } else {
113            Err(Self::out_of_range())
114        }
115    }
116}