Update the documentation to use linking by name

See https://doc.rust-lang.org/stable/rustdoc/linking-to-items-by-name.html
This commit is contained in:
Julien Cretin
2021-03-13 13:16:57 +01:00
parent eb0a0770dd
commit 3c7c5a4810
8 changed files with 368 additions and 377 deletions

View File

@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Flash storage for testing.
//!
//! [`BufferStorage`] implements the flash [`Storage`] interface but doesn't interface with an
//! actual flash storage. Instead it uses a buffer in memory to represent the storage state.
use crate::{Storage, StorageError, StorageIndex, StorageResult}; use crate::{Storage, StorageError, StorageIndex, StorageResult};
use alloc::borrow::Borrow; use alloc::borrow::Borrow;
use alloc::boxed::Box; use alloc::boxed::Box;
@@ -63,8 +68,8 @@ pub struct BufferOptions {
/// ///
/// When set, the following conditions would panic: /// When set, the following conditions would panic:
/// - A bit is written from 0 to 1. /// - A bit is written from 0 to 1.
/// - A word is written more than `max_word_writes`. /// - A word is written more than [`Self::max_word_writes`].
/// - A page is erased more than `max_page_erases`. /// - A page is erased more than [`Self::max_page_erases`].
pub strict_mode: bool, pub strict_mode: bool,
} }
@@ -110,15 +115,13 @@ impl BufferStorage {
/// ///
/// Before each subsequent mutable operation (write or erase), the delay is decremented if /// Before each subsequent mutable operation (write or erase), the delay is decremented if
/// positive. If the delay is elapsed, the operation is saved and an error is returned. /// positive. If the delay is elapsed, the operation is saved and an error is returned.
/// Subsequent operations will panic until the interrupted operation is [corrupted] or the /// Subsequent operations will panic until either of:
/// interruption is [reset]. /// - The interrupted operation is [corrupted](BufferStorage::corrupt_operation).
/// - The interruption is [reset](BufferStorage::reset_interruption).
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if an interruption is already armed. /// Panics if an interruption is already armed.
///
/// [corrupted]: struct.BufferStorage.html#method.corrupt_operation
/// [reset]: struct.BufferStorage.html#method.reset_interruption
pub fn arm_interruption(&mut self, delay: usize) { pub fn arm_interruption(&mut self, delay: usize) {
self.interruption.arm(delay); self.interruption.arm(delay);
} }
@@ -130,10 +133,8 @@ impl BufferStorage {
/// # Panics /// # Panics
/// ///
/// Panics if any of the following conditions hold: /// Panics if any of the following conditions hold:
/// - An interruption was not [armed]. /// - An interruption was not [armed](BufferStorage::arm_interruption).
/// - An interruption was armed and it has triggered. /// - An interruption was armed and it has triggered.
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
pub fn disarm_interruption(&mut self) -> usize { pub fn disarm_interruption(&mut self) -> usize {
self.interruption.get().err().unwrap() self.interruption.get().err().unwrap()
} }
@@ -142,16 +143,14 @@ impl BufferStorage {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if an interruption was not [armed]. /// Panics if an interruption was not [armed](BufferStorage::arm_interruption).
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
pub fn reset_interruption(&mut self) { pub fn reset_interruption(&mut self) {
let _ = self.interruption.get(); let _ = self.interruption.get();
} }
/// Corrupts an interrupted operation. /// Corrupts an interrupted operation.
/// ///
/// Applies the [corruption function] to the storage. Counters are updated accordingly: /// Applies the corruption function to the storage. Counters are updated accordingly:
/// - If a word is fully written, its counter is incremented regardless of whether other words /// - If a word is fully written, its counter is incremented regardless of whether other words
/// of the same operation have been fully written. /// of the same operation have been fully written.
/// - If a page is fully erased, its counter is incremented (and its word counters are reset). /// - If a page is fully erased, its counter is incremented (and its word counters are reset).
@@ -159,13 +158,10 @@ impl BufferStorage {
/// # Panics /// # Panics
/// ///
/// Panics if any of the following conditions hold: /// Panics if any of the following conditions hold:
/// - An interruption was not [armed]. /// - An interruption was not [armed](BufferStorage::arm_interruption).
/// - An interruption was armed but did not trigger. /// - An interruption was armed but did not trigger.
/// - The corruption function corrupts more bits than allowed. /// - The corruption function corrupts more bits than allowed.
/// - The interrupted operation itself would have panicked. /// - The interrupted operation itself would have panicked.
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
/// [corruption function]: type.BufferCorruptFunction.html
pub fn corrupt_operation(&mut self, corrupt: BufferCorruptFunction) { pub fn corrupt_operation(&mut self, corrupt: BufferCorruptFunction) {
let operation = self.interruption.get().unwrap(); let operation = self.interruption.get().unwrap();
let range = self.operation_range(&operation).unwrap(); let range = self.operation_range(&operation).unwrap();
@@ -217,7 +213,8 @@ impl BufferStorage {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if the maximum number of erase cycles per page is reached. /// Panics if the [maximum number of erase cycles per page](BufferOptions::max_page_erases) is
/// reached.
fn incr_page_erases(&mut self, page: usize) { fn incr_page_erases(&mut self, page: usize) {
// Check that pages are not erased too many times. // Check that pages are not erased too many times.
if self.options.strict_mode { if self.options.strict_mode {
@@ -243,7 +240,8 @@ impl BufferStorage {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if the maximum number of writes per word is reached. /// Panics if the [maximum number of writes per word](BufferOptions::max_word_writes) is
/// reached.
fn incr_word_writes(&mut self, index: usize, value: &[u8], complete: &[u8]) { fn incr_word_writes(&mut self, index: usize, value: &[u8], complete: &[u8]) {
let word_size = self.word_size(); let word_size = self.word_size();
for i in 0..value.len() / word_size { for i in 0..value.len() / word_size {

View File

@@ -12,6 +12,10 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Store wrapper for testing.
//!
//! [`StoreDriver`] wraps a [`Store`] and compares its behavior with its associated [`StoreModel`].
use crate::format::{Format, Position}; use crate::format::{Format, Position};
#[cfg(test)] #[cfg(test)]
use crate::StoreUpdate; use crate::StoreUpdate;

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Storage representation of a store.
#[macro_use] #[macro_use]
mod bitfield; mod bitfield;
@@ -26,13 +28,14 @@ use core::convert::TryFrom;
/// Internal representation of a word in flash. /// Internal representation of a word in flash.
/// ///
/// Currently, the store only supports storages where a word is 32 bits. /// Currently, the store only supports storages where a word is 32 bits, i.e. the [word
/// size](Storage::word_size) is 4 bytes.
type WORD = u32; type WORD = u32;
/// Abstract representation of a word in flash. /// Abstract representation of a word in flash.
/// ///
/// This type is kept abstract to avoid possible confusion with `Nat` if they happen to have the /// This type is kept abstract to avoid possible confusion with [`Nat`] if they happen to have the
/// same representation. This is because they have different semantics, `Nat` represents natural /// same representation. This is because they have different semantics, [`Nat`] represents natural
/// numbers while `Word` represents sequences of bits (and thus has no arithmetic). /// numbers while `Word` represents sequences of bits (and thus has no arithmetic).
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Word(WORD); pub struct Word(WORD);
@@ -47,7 +50,7 @@ impl Word {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if `slice.len() != WORD_SIZE`. /// Panics if `slice.len()` is not [`WORD_SIZE`] bytes.
pub fn from_slice(slice: &[u8]) -> Word { pub fn from_slice(slice: &[u8]) -> Word {
Word(WORD::from_le_bytes(<WordSlice>::try_from(slice).unwrap())) Word(WORD::from_le_bytes(<WordSlice>::try_from(slice).unwrap()))
} }
@@ -60,47 +63,49 @@ impl Word {
/// Size of a word in bytes. /// Size of a word in bytes.
/// ///
/// Currently, the store only supports storages where a word is 4 bytes. /// Currently, the store only supports storages where the [word size](Storage::word_size) is 4
/// bytes.
const WORD_SIZE: Nat = core::mem::size_of::<WORD>() as Nat; const WORD_SIZE: Nat = core::mem::size_of::<WORD>() as Nat;
/// Minimum number of words per page. /// Minimum number of words per page.
/// ///
/// Currently, the store only supports storages where pages have at least 8 words. /// Currently, the store only supports storages where pages have at least 8 [words](WORD_SIZE), i.e.
const MIN_NUM_WORDS_PER_PAGE: Nat = 8; /// the [page size](Storage::page_size) is at least 32 bytes.
const MIN_PAGE_SIZE: Nat = 8;
/// Maximum size of a page in bytes. /// Maximum size of a page in bytes.
/// ///
/// Currently, the store only supports storages where pages are between 8 and 1024 [words]. /// Currently, the store only supports storages where pages have at most 1024 [words](WORD_SIZE),
/// /// i.e. the [page size](Storage::page_size) is at most 4096 bytes.
/// [words]: constant.WORD_SIZE.html
const MAX_PAGE_SIZE: Nat = 4096; const MAX_PAGE_SIZE: Nat = 4096;
/// Maximum number of erase cycles. /// Maximum number of erase cycles.
/// ///
/// Currently, the store only supports storages where the maximum number of erase cycles fits on 16 /// Currently, the store only supports storages where the [maximum number of erase
/// bits. /// cycles](Storage::max_page_erases) fits in 16 bits, i.e. it is at most 65535.
const MAX_ERASE_CYCLE: Nat = 65535; const MAX_ERASE_CYCLE: Nat = 65535;
/// Minimum number of pages. /// Minimum number of pages.
/// ///
/// Currently, the store only supports storages with at least 3 pages. /// Currently, the store only supports storages where the [number of pages](Storage::num_pages) is
/// at least 3.
const MIN_NUM_PAGES: Nat = 3; const MIN_NUM_PAGES: Nat = 3;
/// Maximum page index. /// Maximum page index.
/// ///
/// Thus the maximum number of pages is one more than this number. Currently, the store only /// Currently, the store only supports storages where the [number of pages](Storage::num_pages) is
/// supports storages where the number of pages is between 3 and 64. /// at most 64, i.e. the maximum page index is 63.
const MAX_PAGE_INDEX: Nat = 63; const MAX_PAGE_INDEX: Nat = 63;
/// Maximum key index. /// Maximum key index.
/// ///
/// Thus the number of keys is one more than this number. Currently, the store only supports 4096 /// Currently, the store only supports 4096 keys, i.e. the maximum key index is 4095.
/// keys.
const MAX_KEY_INDEX: Nat = 4095; const MAX_KEY_INDEX: Nat = 4095;
/// Maximum length in bytes of a user payload. /// Maximum length in bytes of a user payload.
/// ///
/// Currently, the store only supports values smaller than 1024 bytes. /// Currently, the store only supports values at most 1023 bytes long. This may be further reduced
/// depending on the [page size](Storage::page_size), see [`Format::max_value_len`].
const MAX_VALUE_LEN: Nat = 1023; const MAX_VALUE_LEN: Nat = 1023;
/// Maximum number of updates per transaction. /// Maximum number of updates per transaction.
@@ -109,9 +114,15 @@ const MAX_VALUE_LEN: Nat = 1023;
const MAX_UPDATES: Nat = 31; const MAX_UPDATES: Nat = 31;
/// Maximum number of words per virtual page. /// Maximum number of words per virtual page.
const MAX_VIRT_PAGE_SIZE: Nat = div_ceil(MAX_PAGE_SIZE, WORD_SIZE) - CONTENT_WORD; ///
/// A virtual page has [`CONTENT_WORD`] less [words](WORD_SIZE) than the storage [page
/// size](Storage::page_size). Those words are used to store the page header. Since a page has at
/// least [8](MIN_PAGE_SIZE) words, a virtual page has at least 6 words.
const MAX_VIRT_PAGE_SIZE: Nat = MAX_PAGE_SIZE / WORD_SIZE - CONTENT_WORD;
/// Word with all bits set to one. /// Word with all bits set to one.
///
/// After a page is erased, all words are equal to this value.
const ERASED_WORD: Word = Word(!(0 as WORD)); const ERASED_WORD: Word = Word(!(0 as WORD));
/// Helpers for a given storage configuration. /// Helpers for a given storage configuration.
@@ -121,33 +132,31 @@ pub struct Format {
/// ///
/// # Invariant /// # Invariant
/// ///
/// - Words divide a page evenly. /// - [Words](WORD_SIZE) divide a page evenly.
/// - There are at least 8 words in a page. /// - There are at least [`MIN_PAGE_SIZE`] words in a page.
/// - There are at most `MAX_PAGE_SIZE` bytes in a page. /// - There are at most [`MAX_PAGE_SIZE`] bytes in a page.
page_size: Nat, page_size: Nat,
/// The number of pages in the storage. /// The number of pages in the storage.
/// ///
/// # Invariant /// # Invariant
/// ///
/// - There are at least 3 pages. /// - There are at least [`MIN_NUM_PAGES`] pages.
/// - There are at most `MAX_PAGE_INDEX + 1` pages. /// - There are at most [`MAX_PAGE_INDEX`] + 1 pages.
num_pages: Nat, num_pages: Nat,
/// The maximum number of times a page can be erased. /// The maximum number of times a page can be erased.
/// ///
/// # Invariant /// # Invariant
/// ///
/// - A page can be erased at most `MAX_ERASE_CYCLE` times. /// - A page can be erased at most [`MAX_ERASE_CYCLE`] times.
max_page_erases: Nat, max_page_erases: Nat,
} }
impl Format { impl Format {
/// Extracts the format from a storage. /// Extracts the format from a storage.
/// ///
/// Returns `None` if the storage is not [supported]. /// Returns `None` if the storage is not [supported](Format::is_storage_supported).
///
/// [supported]: struct.Format.html#method.is_storage_supported
pub fn new<S: Storage>(storage: &S) -> Option<Format> { pub fn new<S: Storage>(storage: &S) -> Option<Format> {
if Format::is_storage_supported(storage) { if Format::is_storage_supported(storage) {
Some(Format { Some(Format {
@@ -163,21 +172,12 @@ impl Format {
/// Returns whether a storage is supported. /// Returns whether a storage is supported.
/// ///
/// A storage is supported if the following conditions hold: /// A storage is supported if the following conditions hold:
/// - The size of a word is [`WORD_SIZE`] bytes. /// - The [`Storage::word_size`] is [`WORD_SIZE`] bytes.
/// - The size of a word evenly divides the size of a page. /// - The [`Storage::word_size`] evenly divides the [`Storage::page_size`].
/// - A page contains at least [`MIN_NUM_WORDS_PER_PAGE`] words. /// - The [`Storage::page_size`] is between [`MIN_PAGE_SIZE`] words and [`MAX_PAGE_SIZE`] bytes.
/// - A page contains at most [`MAX_PAGE_SIZE`] bytes. /// - The [`Storage::num_pages`] is between [`MIN_NUM_PAGES`] and [`MAX_PAGE_INDEX`] + 1.
/// - There are at least [`MIN_NUM_PAGES`] pages. /// - The [`Storage::max_word_writes`] is at least 2.
/// - There are at most [`MAX_PAGE_INDEX`]` + 1` pages. /// - The [`Storage::max_page_erases`] is at most [`MAX_ERASE_CYCLE`].
/// - A word can be written at least twice between erase cycles.
/// - The maximum number of erase cycles is at most [`MAX_ERASE_CYCLE`].
///
/// [`WORD_SIZE`]: constant.WORD_SIZE.html
/// [`MIN_NUM_WORDS_PER_PAGE`]: constant.MIN_NUM_WORDS_PER_PAGE.html
/// [`MAX_PAGE_SIZE`]: constant.MAX_PAGE_SIZE.html
/// [`MIN_NUM_PAGES`]: constant.MIN_NUM_PAGES.html
/// [`MAX_PAGE_INDEX`]: constant.MAX_PAGE_INDEX.html
/// [`MAX_ERASE_CYCLE`]: constant.MAX_ERASE_CYCLE.html
fn is_storage_supported<S: Storage>(storage: &S) -> bool { fn is_storage_supported<S: Storage>(storage: &S) -> bool {
let word_size = usize_to_nat(storage.word_size()); let word_size = usize_to_nat(storage.word_size());
let page_size = usize_to_nat(storage.page_size()); let page_size = usize_to_nat(storage.page_size());
@@ -186,7 +186,7 @@ impl Format {
let max_page_erases = usize_to_nat(storage.max_page_erases()); let max_page_erases = usize_to_nat(storage.max_page_erases());
word_size == WORD_SIZE word_size == WORD_SIZE
&& page_size % word_size == 0 && page_size % word_size == 0
&& (MIN_NUM_WORDS_PER_PAGE * word_size <= page_size && page_size <= MAX_PAGE_SIZE) && (MIN_PAGE_SIZE * word_size <= page_size && page_size <= MAX_PAGE_SIZE)
&& (MIN_NUM_PAGES <= num_pages && num_pages <= MAX_PAGE_INDEX + 1) && (MIN_NUM_PAGES <= num_pages && num_pages <= MAX_PAGE_INDEX + 1)
&& max_word_writes >= 2 && max_word_writes >= 2
&& max_page_erases <= MAX_ERASE_CYCLE && max_page_erases <= MAX_ERASE_CYCLE
@@ -199,28 +199,28 @@ impl Format {
/// The size of a page in bytes. /// The size of a page in bytes.
/// ///
/// We have `MIN_NUM_WORDS_PER_PAGE * self.word_size() <= self.page_size() <= MAX_PAGE_SIZE`. /// This is at least [`MIN_PAGE_SIZE`] [words](WORD_SIZE) and at most [`MAX_PAGE_SIZE`] bytes.
pub fn page_size(&self) -> Nat { pub fn page_size(&self) -> Nat {
self.page_size self.page_size
} }
/// The number of pages in the storage, denoted by `N`. /// The number of pages in the storage, denoted by N.
/// ///
/// We have `MIN_NUM_PAGES <= N <= MAX_PAGE_INDEX + 1`. /// We have [`MIN_NUM_PAGES`] ≤ N ≤ [`MAX_PAGE_INDEX`] + 1.
pub fn num_pages(&self) -> Nat { pub fn num_pages(&self) -> Nat {
self.num_pages self.num_pages
} }
/// The maximum page index. /// The maximum page index.
/// ///
/// We have `2 <= self.max_page() <= MAX_PAGE_INDEX`. /// This is at least [`MIN_NUM_PAGES`] - 1 and at most [`MAX_PAGE_INDEX`].
pub fn max_page(&self) -> Nat { pub fn max_page(&self) -> Nat {
self.num_pages - 1 self.num_pages - 1
} }
/// The maximum number of times a page can be erased, denoted by `E`. /// The maximum number of times a page can be erased, denoted by E.
/// ///
/// We have `E <= MAX_ERASE_CYCLE`. /// We have E ≤ [`MAX_ERASE_CYCLE`].
pub fn max_page_erases(&self) -> Nat { pub fn max_page_erases(&self) -> Nat {
self.max_page_erases self.max_page_erases
} }
@@ -235,19 +235,18 @@ impl Format {
MAX_UPDATES MAX_UPDATES
} }
/// The size of a virtual page in words, denoted by `Q`. /// The size of a virtual page in words, denoted by Q.
/// ///
/// A virtual page is stored in a physical page after the page header. /// A virtual page is stored in a physical page after the page header.
/// ///
/// We have `MIN_NUM_WORDS_PER_PAGE - 2 <= Q <= MAX_VIRT_PAGE_SIZE`. /// We have [`MIN_PAGE_SIZE`] - 2 Q ≤ [`MAX_VIRT_PAGE_SIZE`].
pub fn virt_page_size(&self) -> Nat { pub fn virt_page_size(&self) -> Nat {
self.page_size() / self.word_size() - CONTENT_WORD self.page_size() / self.word_size() - CONTENT_WORD
} }
/// The maximum length in bytes of a user payload. /// The maximum length in bytes of a user payload.
/// ///
/// We have `(MIN_NUM_WORDS_PER_PAGE - 3) * self.word_size() <= self.max_value_len() <= /// This is at least [`MIN_PAGE_SIZE`] - 3 [words](WORD_SIZE) and at most [`MAX_VALUE_LEN`].
/// MAX_VALUE_LEN`.
pub fn max_value_len(&self) -> Nat { pub fn max_value_len(&self) -> Nat {
min( min(
(self.virt_page_size() - 1) * self.word_size(), (self.virt_page_size() - 1) * self.word_size(),
@@ -255,57 +254,50 @@ impl Format {
) )
} }
/// The maximum prefix length in words, denoted by `M`. /// The maximum prefix length in words, denoted by M.
/// ///
/// A prefix is the first words of a virtual page that belong to the last entry of the previous /// A prefix is the first words of a virtual page that belong to the last entry of the previous
/// virtual page. This happens because entries may overlap up to 2 virtual pages. /// virtual page. This happens because entries may overlap up to 2 virtual pages.
/// ///
/// We have `MIN_NUM_WORDS_PER_PAGE - 3 <= M < Q`. /// We have [`MIN_PAGE_SIZE`] - 3 M < Q.
pub fn max_prefix_len(&self) -> Nat { pub fn max_prefix_len(&self) -> Nat {
self.bytes_to_words(self.max_value_len()) self.bytes_to_words(self.max_value_len())
} }
/// The total virtual capacity in words, denoted by `V`. /// The total virtual capacity in words, denoted by V.
/// ///
/// We have `V = (N - 1) * (Q - 1) - M`. /// We have V = (N - 1) × (Q - 1) - M.
/// ///
/// We can show `V >= (N - 2) * (Q - 1)` with the following steps: /// We can show V (N - 2) × (Q - 1) with the following steps:
/// - `M <= Q - 1` from `M < Q` from [`M`] definition /// - M Q - 1 from M < Q from [M](Format::max_prefix_len)'s definition
/// - `-M >= -(Q - 1)` from above /// - -M -(Q - 1) from above
/// - `V >= (N - 1) * (Q - 1) - (Q - 1)` from `V` definition /// - V (N - 1) × (Q - 1) - (Q - 1) from V's definition
///
/// [`M`]: struct.Format.html#method.max_prefix_len
pub fn virt_size(&self) -> Nat { pub fn virt_size(&self) -> Nat {
(self.num_pages() - 1) * (self.virt_page_size() - 1) - self.max_prefix_len() (self.num_pages() - 1) * (self.virt_page_size() - 1) - self.max_prefix_len()
} }
/// The total user capacity in words, denoted by `C`. /// The total user capacity in words, denoted by C.
/// ///
/// We have `C = V - N = (N - 1) * (Q - 2) - M - 1`. /// We have C = V - N = (N - 1) × (Q - 2) - M - 1.
/// ///
/// We can show `C >= (N - 2) * (Q - 2) - 2` with the following steps: /// We can show C (N - 2) × (Q - 2) - 2 with the following steps:
/// - `V >= (N - 2) * (Q - 1)` from [`V`] definition /// - V (N - 2) × (Q - 1) from [V](Format::virt_size)'s definition
/// - `C >= (N - 2) * (Q - 1) - N` from `C` definition /// - C (N - 2) × (Q - 1) - N from C's definition
/// - `(N - 2) * (Q - 1) - N = (N - 2) * (Q - 2) - 2` by calculus /// - (N - 2) × (Q - 1) - N = (N - 2) × (Q - 2) - 2 by calculus
///
/// [`V`]: struct.Format.html#method.virt_size
pub fn total_capacity(&self) -> Nat { pub fn total_capacity(&self) -> Nat {
// From the virtual capacity, we reserve N - 1 words for `Erase` entries and 1 word for a // From the virtual capacity, we reserve N - 1 words for `Erase` entries and 1 word for a
// `Clear` entry. // `Clear` entry.
self.virt_size() - self.num_pages() self.virt_size() - self.num_pages()
} }
/// The total virtual lifetime in words, denoted by `L`. /// The total virtual lifetime in words, denoted by L.
/// ///
/// We have `L = (E * N + N - 1) * Q`. /// We have L = (E × N + N - 1) × Q.
pub fn total_lifetime(&self) -> Position { pub fn total_lifetime(&self) -> Position {
Position::new(self, self.max_page_erases(), self.num_pages() - 1, 0) Position::new(self, self.max_page_erases(), self.num_pages() - 1, 0)
} }
/// Returns the word position of the first entry of a page. /// Returns the word position of the first entry of a page.
///
/// The init info of the page must be provided to know where the first entry of the page
/// starts.
pub fn page_head(&self, init: InitInfo, page: Nat) -> Position { pub fn page_head(&self, init: InitInfo, page: Nat) -> Position {
Position::new(self, init.cycle, page, init.prefix) Position::new(self, init.cycle, page, init.prefix)
} }
@@ -557,7 +549,7 @@ impl Format {
/// ///
/// # Preconditions /// # Preconditions
/// ///
/// - `bytes + self.word_size()` does not overflow. /// - `bytes` + [`Self::word_size`] does not overflow.
pub fn bytes_to_words(&self, bytes: Nat) -> Nat { pub fn bytes_to_words(&self, bytes: Nat) -> Nat {
div_ceil(bytes, self.word_size()) div_ceil(bytes, self.word_size())
} }
@@ -571,7 +563,7 @@ const COMPACT_WORD: Nat = 1;
/// The word index of the content of a page. /// The word index of the content of a page.
/// ///
/// Since a page is at least 8 words, there is always at least 6 words of content. /// This is also the length in words of the page header.
const CONTENT_WORD: Nat = 2; const CONTENT_WORD: Nat = 2;
/// The checksum for a single word. /// The checksum for a single word.
@@ -718,21 +710,21 @@ bitfield! {
/// The position of a word in the virtual storage. /// The position of a word in the virtual storage.
/// ///
/// With the notations defined in `Format`, let: /// With the notations defined in [`Format`], let:
/// - `w` a virtual word offset in a page which is between `0` and `Q - 1` /// - w denote a word offset in a virtual page, thus between 0 and Q - 1
/// - `p` a page offset which is between `0` and `N - 1` /// - p denote a page offset, thus between 0 and N - 1
/// - `c` the number of erase cycles of a page which is between `0` and `E` /// - c denote the number of times a page was erased, thus between 0 and E
/// ///
/// Then the position of a word is `(c*N + p)*Q + w`. This position monotonically increases and /// The position of a word is (c × N + p) × Q + w. This position monotonically increases and
/// represents the consumed lifetime of the storage. /// represents the consumed lifetime of the storage.
/// ///
/// This type is kept abstract to avoid possible confusion with `Nat` and `Word` if they happen to /// This type is kept abstract to avoid possible confusion with [`Nat`] and [`Word`] if they happen
/// have the same representation. Here is an overview of their semantics: /// to have the same representation. Here is an overview of their semantics:
/// ///
/// | Name | Semantics | Arithmetic operations | Bit-wise operations | /// | Name | Semantics | Arithmetic operations | Bit-wise operations |
/// | ---------- | --------------------------- | --------------------- | ------------------- | /// | ---------- | --------------------------- | --------------------- | ------------------- |
/// | `Nat` | Natural numbers | Yes (no overflow) | No | /// | [`Nat`] | Natural numbers | Yes (no overflow) | No |
/// | `Word` | Word in flash | No | Yes | /// | [`Word`] | Word in flash | No | Yes |
/// | `Position` | Position in virtual storage | Yes (no overflow) | No | /// | `Position` | Position in virtual storage | Yes (no overflow) | No |
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Position(Nat); pub struct Position(Nat);
@@ -763,9 +755,9 @@ impl Position {
/// Create a word position given its coordinates. /// Create a word position given its coordinates.
/// ///
/// The coordinates of a word are: /// The coordinates of a word are:
/// - Its word index in its page. /// - Its word index in its virtual page.
/// - Its page index in the storage. /// - Its page index in the storage.
/// - The number of times that page was erased. /// - The number of times its page was erased.
pub fn new(format: &Format, cycle: Nat, page: Nat, word: Nat) -> Position { pub fn new(format: &Format, cycle: Nat, page: Nat, word: Nat) -> Position {
Position((cycle * format.num_pages() + page) * format.virt_page_size() + word) Position((cycle * format.num_pages() + page) * format.virt_page_size() + word)
} }
@@ -928,11 +920,11 @@ pub fn is_erased(slice: &[u8]) -> bool {
/// Divides then takes ceiling. /// Divides then takes ceiling.
/// ///
/// Returns `ceil(x / m)` in mathematical notations (not Rust code). /// Returns ⌈x / m⌉, i.e. the lowest natural number r such that r ≥ x / m.
/// ///
/// # Preconditions /// # Preconditions
/// ///
/// - `x + m` does not overflow. /// - x + m does not overflow.
const fn div_ceil(x: Nat, m: Nat) -> Nat { const fn div_ceil(x: Nat, m: Nat) -> Nat {
(x + m - 1) / m (x + m - 1) / m
} }

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Helper functions for fragmented entries. //! Support for fragmented entries.
//! //!
//! This module permits to handle entries larger than the [maximum value //! This module permits to handle entries larger than the [maximum value
//! length](Store::max_value_length) by storing ordered consecutive fragments in a sequence of keys. //! length](Store::max_value_length) by storing ordered consecutive fragments in a sequence of keys.
@@ -36,7 +36,7 @@ pub trait Keys {
/// ///
/// # Preconditions /// # Preconditions
/// ///
/// The position must be within the length: `pos < len()`. /// The position must be within the length: `pos` < [`Self::len`].
fn key(&self, pos: usize) -> usize; fn key(&self, pos: usize) -> usize;
} }

View File

@@ -12,191 +12,191 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// TODO(ia0): Add links once the code is complete. // The documentation is easier to read from a browser:
// - Run: cargo doc --document-private-items --features=std
// - Open: target/doc/persistent_store/index.html
//! Store abstraction for flash storage //! Store abstraction for flash storage
//! //!
//! # Specification //! # Specification
//! //!
//! The store provides a partial function from keys to values on top of a storage //! The [store](Store) provides a partial function from keys to values on top of a
//! interface. The store total capacity depends on the size of the storage. Store //! [storage](Storage) interface. The store total [capacity](Store::capacity) depends on the size of
//! updates may be bundled in transactions. Mutable operations are atomic, including //! the storage. Store [updates](StoreUpdate) may be bundled in [transactions](Store::transaction).
//! when interrupted. //! Mutable operations are atomic, including when interrupted.
//! //!
//! The store is flash-efficient in the sense that it uses the storage lifetime //! The store is flash-efficient in the sense that it uses the storage [lifetime](Store::lifetime)
//! efficiently. For each page, all words are written at least once between erase //! efficiently. For each page, all words are written at least once between erase cycles and all
//! cycles and all erase cycles are used. However, not all written words are user //! erase cycles are used. However, not all written words are user content: lifetime is also
//! content: lifetime is also consumed with metadata and compaction. //! consumed with metadata and compaction.
//! //!
//! The store is extendable with other entries than key-values. It is essentially a //! The store is extendable with other entries than key-values. It is essentially a framework
//! framework providing access to the storage lifetime. The partial function is //! providing access to the storage lifetime. The partial function is simply the most common usage
//! simply the most common usage and can be used to encode other usages. //! and can be used to encode other usages.
//! //!
//! ## Definitions //! ## Definitions
//! //!
//! An _entry_ is a pair of a key and a value. A _key_ is a number between 0 //! An _entry_ is a pair of a key and a value. A _key_ is a number between 0 and
//! and 4095. A _value_ is a byte slice with a length between 0 and 1023 bytes (for //! [4095](format::MAX_KEY_INDEX). A _value_ is a byte slice with a length between 0 and
//! large enough pages). //! [1023](format::Format::max_value_len) bytes (for large enough pages).
//! //!
//! The store provides the following _updates_: //! The store provides the following _updates_:
//! - Given a key and a value, `Insert` updates the store such that the value is //! - Given a key and a value, [`StoreUpdate::Insert`] updates the store such that the value is
//! associated with the key. The values for other keys are left unchanged. //! associated with the key. The values for other keys are left unchanged.
//! - Given a key, `Remove` updates the store such that no value is associated with //! - Given a key, [`StoreUpdate::Remove`] updates the store such that no value is associated with
//! the key. The values for other keys are left unchanged. Additionally, if there //! the key. The values for other keys are left unchanged. Additionally, if there was a value
//! was a value associated with the key, the value is wiped from the storage //! associated with the key, the value is wiped from the storage (all its bits are set to 0).
//! (all its bits are set to 0).
//! //!
//! The store provides the following _read-only operations_: //! The store provides the following _read-only operations_:
//! - `Iter` iterates through the store returning all entries exactly once. The //! - [`Store::iter`] iterates through the store returning all entries exactly once. The iteration
//! iteration order is not specified but stable between mutable operations. //! order is not specified but stable between mutable operations.
//! - `Capacity` returns how many words can be stored before the store is full. //! - [`Store::capacity`] returns how many words can be stored before the store is full.
//! - `Lifetime` returns how many words can be written before the storage lifetime //! - [`Store::lifetime`] returns how many words can be written before the storage lifetime is
//! is consumed. //! consumed.
//! //!
//! The store provides the following _mutable operations_: //! The store provides the following _mutable operations_:
//! - Given a set of independent updates, `Transaction` applies the sequence of //! - Given a set of independent updates, [`Store::transaction`] applies the sequence of updates.
//! updates. //! - Given a threshold, [`Store::clear`] removes all entries with a key greater or equal to the
//! - Given a threshold, `Clear` removes all entries with a key greater or equal //! threshold.
//! to the threshold. //! - Given a length in words, [`Store::prepare`] makes one step of compaction unless that many
//! - Given a length in words, `Prepare` makes one step of compaction unless that //! words can be written without compaction. This operation has no effect on the store but may
//! many words can be written without compaction. This operation has no effect //! still mutate its storage. In particular, the store has the same capacity but a possibly
//! on the store but may still mutate its storage. In particular, the store has //! reduced lifetime.
//! the same capacity but a possibly reduced lifetime.
//! //!
//! A mutable operation is _atomic_ if, when power is lost during the operation, the //! A mutable operation is _atomic_ if, when power is lost during the operation, the store is either
//! store is either updated (as if the operation succeeded) or left unchanged (as if //! updated (as if the operation succeeded) or left unchanged (as if the operation did not occur).
//! the operation did not occur). If the store is left unchanged, lifetime may still //! If the store is left unchanged, lifetime may still be consumed.
//! be consumed.
//! //!
//! The store relies on the following _storage interface_: //! The store relies on the following _storage interface_:
//! - It is possible to read a byte slice. The slice won't span multiple pages. //! - It is possible to [read](Storage::read_slice) a byte slice. The slice won't span multiple
//! - It is possible to write a word slice. The slice won't span multiple pages. //! pages.
//! - It is possible to erase a page. //! - It is possible to [write](Storage::write_slice) a word slice. The slice won't span multiple
//! - The pages are sequentially indexed from 0. If the actual underlying storage //! pages.
//! is segmented, then the storage layer should translate those indices to //! - It is possible to [erase](Storage::erase_page) a page.
//! actual page addresses. //! - The pages are sequentially indexed from 0. If the actual underlying storage is segmented,
//! then the storage layer should translate those indices to actual page addresses.
//! //!
//! The store has a _total capacity_ of `C = (N - 1) * (P - 4) - M - 1` words, where //! The store has a _total capacity_ of C = (N - 1) × (P - 4) - M - 1 words, where:
//! `P` is the number of words per page, `N` is the number of pages, and `M` is the //! - P is the number of words per page
//! maximum length in words of a value (256 for large enough pages). The capacity //! - [N](format::Format::num_pages) is the number of pages
//! used by each mutable operation is given below (a transient word only uses //! - [M](format::Format::max_prefix_len) is the maximum length in words of a value (256 for large
//! capacity during the operation): //! enough pages)
//! - `Insert` uses `1 + ceil(len / 4)` words where `len` is the length of the
//! value in bytes. If an entry was replaced, the words used by its insertion
//! are freed.
//! - `Remove` doesn't use capacity if alone in the transaction and 1 transient
//! word otherwise. If an entry was deleted, the words used by its insertion are
//! freed.
//! - `Transaction` uses 1 transient word. In addition, the updates of the
//! transaction use and free words as described above.
//! - `Clear` doesn't use capacity and frees the words used by the insertion of
//! the deleted entries.
//! - `Prepare` doesn't use capacity.
//! //!
//! The _total lifetime_ of the store is below `L = ((E + 1) * N - 1) * (P - 2)` and //! The capacity used by each mutable operation is given below (a transient word only uses capacity
//! above `L - M` words, where `E` is the maximum number of erase cycles. The //! during the operation):
//! lifetime is used when capacity is used, including transiently, as well as when
//! compaction occurs. Compaction frequency and lifetime consumption are positively
//! correlated to the store load factor (the ratio of used capacity to total capacity).
//! //!
//! It is possible to approximate the cost of transient words in terms of capacity: //! | Operation/Update | Used capacity | Freed capacity | Transient capacity |
//! `L` transient words are equivalent to `C - x` words of capacity where `x` is the //! | ----------------------- | ---------------- | ----------------- | ------------------ |
//! average capacity (including transient) of operations. //! | [`StoreUpdate::Insert`] | 1 + value length | overwritten entry | 0 |
//! | [`StoreUpdate::Remove`] | 0 | deleted entry | see below\* |
//! | [`Store::transaction`] | 0 + updates | 0 + updates | 1 |
//! | [`Store::clear`] | 0 | deleted entries | 0 |
//! | [`Store::prepare`] | 0 | 0 | 0 |
//!
//! \*0 if the update is alone in the transaction, otherwise 1.
//!
//! The _total lifetime_ of the store is below L = ((E + 1) × N - 1) × (P - 2) and above L - M
//! words, where E is the maximum number of erase cycles. The lifetime is used when capacity is
//! used, including transiently, as well as when compaction occurs. Compaction frequency and
//! lifetime consumption are positively correlated to the store load factor (the ratio of used
//! capacity to total capacity).
//!
//! It is possible to approximate the cost of transient words in terms of capacity: L transient
//! words are equivalent to C - x words of capacity where x is the average capacity (including
//! transient) of operations.
//! //!
//! ## Preconditions //! ## Preconditions
//! //!
//! The following assumptions need to hold, or the store may behave in unexpected ways: //! The following assumptions need to hold, or the store may behave in unexpected ways:
//! - A word can be written twice between erase cycles. //! - A word can be written [twice](Storage::max_word_writes) between erase cycles.
//! - A page can be erased `E` times after the first boot of the store. //! - A page can be erased [E](Storage::max_page_erases) times after the first boot of the store.
//! - When power is lost while writing a slice or erasing a page, the next read //! - When power is lost while writing a slice or erasing a page, the next read returns a slice
//! returns a slice where a subset (possibly none or all) of the bits that //! where a subset (possibly none or all) of the bits that should have been modified have been
//! should have been modified have been modified. //! modified.
//! - Reading a slice is deterministic. When power is lost while writing a slice //! - Reading a slice is deterministic. When power is lost while writing a slice or erasing a
//! or erasing a slice (erasing a page containing that slice), reading that //! slice (erasing a page containing that slice), reading that slice repeatedly returns the same
//! slice repeatedly returns the same result (until it is overwritten or its //! result (until it is overwritten or its page is erased).
//! page is erased). //! - To decide whether a page has been erased, it is enough to test if all its bits are equal
//! - To decide whether a page has been erased, it is enough to test if all its //! to 1.
//! bits are equal to 1. //! - When power is lost while writing a slice or erasing a page, that operation does not count
//! - When power is lost while writing a slice or erasing a page, that operation //! towards the limits. However, completing that write or erase operation would count towards
//! does not count towards the limits. However, completing that write or erase //! the limits, as if the number of writes per word and number of erase cycles could be
//! operation would count towards the limits, as if the number of writes per //! fractional.
//! word and number of erase cycles could be fractional. //! - The storage is only modified by the store. Note that completely erasing the storage is
//! - The storage is only modified by the store. Note that completely erasing the //! supported, essentially losing all content and lifetime tracking. It is preferred to use
//! storage is supported, essentially losing all content and lifetime tracking. //! [`Store::clear`] with a threshold of 0 to keep the lifetime tracking.
//! It is preferred to use `Clear` with a threshold of 0 to keep the lifetime
//! tracking.
//! //!
//! The store properties may still hold outside some of those assumptions, but with //! The store properties may still hold outside some of those assumptions, but with an increasing
//! an increasing chance of failure. //! chance of failure.
//! //!
//! # Implementation //! # Implementation
//! //!
//! We define the following constants: //! We define the following constants:
//! - `E < 65536` the number of times a page can be erased. //! - [E](format::Format::max_page_erases) ≤ [65535](format::MAX_ERASE_CYCLE) the number of times
//! - `3 <= N < 64` the number of pages in the storage. //! a page can be erased.
//! - `8 <= P <= 1024` the number of words in a page. //! - 3 ≤ [N](format::Format::num_pages) < 64 the number of pages in the storage.
//! - `Q = P - 2` the number of words in a virtual page. //! - 8 ≤ P ≤ 1024 the number of words in a page.
//! - `K = 4096` the maximum number of keys. //! - [Q](format::Format::virt_page_size) = P - 2 the number of words in a virtual page.
//! - `M = min(Q - 1, 256)` the maximum length in words of a value. //! - [M](format::Format::max_prefix_len) = min(Q - 1, 256) the maximum length in words of a
//! - `V = (N - 1) * (Q - 1) - M` the virtual capacity. //! value.
//! - `C = V - N` the user capacity. //! - [V](format::Format::virt_size) = (N - 1) × (Q - 1) - M the virtual capacity.
//! - [C](format::Format::total_capacity) = V - N the user capacity.
//! //!
//! We build a virtual storage from the physical storage using the first 2 words of //! We build a virtual storage from the physical storage using the first 2 words of each page:
//! each page:
//! - The first word contains the number of times the page has been erased. //! - The first word contains the number of times the page has been erased.
//! - The second word contains the starting word to which this page is being moved //! - The second word contains the starting word to which this page is being moved during
//! during compaction. //! compaction.
//! //!
//! The virtual storage has a length of `(E + 1) * N * Q` words and represents the //! The virtual storage has a length of (E + 1) × N × Q words and represents the lifetime of the
//! lifetime of the store. (We reserve the last `Q + M` words to support adding //! store. (We reserve the last Q + M words to support adding emergency lifetime.) This virtual
//! emergency lifetime.) This virtual storage has a linear address space. //! storage has a linear address space.
//! //!
//! We define a set of overlapping windows of `N * Q` words at each `Q`-aligned //! We define a set of overlapping windows of N × Q words at each Q-aligned boundary. We call i the
//! boundary. We call `i` the window spanning from `i * Q` to `(i + N) * Q`. Only //! window spanning from i × Q to (i + N) × Q. Only those windows actually exist in the underlying
//! those windows actually exist in the underlying storage. We use compaction to //! storage. We use compaction to shift the current window from i to i + 1, preserving the content
//! shift the current window from `i` to `i + 1`, preserving the content of the //! of the store.
//! store.
//! //!
//! For a given state of the virtual storage, we define `h_i` as the position of the //! For a given state of the virtual storage, we define h\_i as the position of the first entry of
//! first entry of the window `i`. We call it the head of the window `i`. Because //! the window i. We call it the head of the window i. Because entries are at most M + 1 words, they
//! entries are at most `M + 1` words, they can overlap on the next page only by `M` //! can overlap on the next page only by M words. So we have i × Q ≤ h_i ≤ i × Q + M . Since there
//! words. So we have `i * Q <= h_i <= i * Q + M` . Since there are no entries //! are no entries before the first page, we have h\_0 = 0.
//! before the first page, we have `h_0 = 0`.
//! //!
//! We define `t_i` as one past the last entry of the window `i`. If there are no //! We define t\_i as one past the last entry of the window i. If there are no entries in that
//! entries in that window, we have `t_i = h_i`. We call `t_i` the tail of the //! window, we have t\_i = h\_i. We call t\_i the tail of the window i. We define the compaction
//! window `i`. We define the compaction invariant as `t_i - h_i <= V`. //! invariant as t\_i - h\_i V.
//! //!
//! We define `|x|` as the capacity used before position `x`. We have `|x| <= x`. We //! We define |x| as the capacity used before position x. We have |x| x. We define the capacity
//! define the capacity invariant as `|t_i| - |h_i| <= C`. //! invariant as |t\_i| - |h\_i| C.
//! //!
//! Using this virtual storage, entries are appended to the tail as long as there is //! Using this virtual storage, entries are appended to the tail as long as there is both virtual
//! both virtual capacity to preserve the compaction invariant and capacity to //! capacity to preserve the compaction invariant and capacity to preserve the capacity invariant.
//! preserve the capacity invariant. When virtual capacity runs out, the first page //! When virtual capacity runs out, the first page of the window is compacted and the window is
//! of the window is compacted and the window is shifted. //! shifted.
//! //!
//! Entries are identified by a prefix of bits. The prefix has to contain at least //! Entries are identified by a prefix of bits. The prefix has to contain at least one bit set to
//! one bit set to zero to differentiate from the tail. Entries can be one of: //! zero to differentiate from the tail. Entries can be one of:
//! - Padding: A word whose first bit is set to zero. The rest is arbitrary. This //! - [Padding](format::ID_PADDING): A word whose first bit is set to zero. The rest is arbitrary.
//! entry is used to mark words partially written after an interrupted operation //! This entry is used to mark words partially written after an interrupted operation as padding
//! as padding such that they are ignored by future operations. //! such that they are ignored by future operations.
//! - Header: A word whose second bit is set to zero. It contains the following fields: //! - [Header](format::ID_HEADER): A word whose second bit is set to zero. It contains the
//! - A bit indicating whether the entry is deleted. //! following fields:
//! - A bit indicating whether the value is word-aligned and has all bits set //! - A [bit](format::HEADER_DELETED) indicating whether the entry is deleted.
//! to 1 in its last word. The last word of an entry is used to detect that //! - A [bit](format::HEADER_FLIPPED) indicating whether the value is word-aligned and has all
//! an entry has been fully written. As such it must contain at least one //! bits set to 1 in its last word. The last word of an entry is used to detect that an
//! bit equal to zero. //! entry has been fully written. As such it must contain at least one bit equal to zero.
//! - The key of the entry. //! - The [key](format::HEADER_KEY) of the entry.
//! - The length in bytes of the value. The value follows the header. The //! - The [length](format::HEADER_LENGTH) in bytes of the value. The value follows the header.
//! entry is word-aligned if the value is not. //! The entry is word-aligned if the value is not.
//! - The checksum of the first and last word of the entry. //! - The [checksum](format::HEADER_CHECKSUM) of the first and last word of the entry.
//! - Erase: A word used during compaction. It contains the page to be erased and //! - [Erase](format::ID_ERASE): A word used during compaction. It contains the
//! a checksum. //! [page](format::ERASE_PAGE) to be erased and a [checksum](format::WORD_CHECKSUM).
//! - Clear: A word used during the `Clear` operation. It contains the threshold //! - [Clear](format::ID_CLEAR): A word used during the clear operation. It contains the
//! and a checksum. //! [threshold](format::CLEAR_MIN_KEY) and a [checksum](format::WORD_CHECKSUM).
//! - Marker: A word used during the `Transaction` operation. It contains the //! - [Marker](format::ID_MARKER): A word used during a transaction. It contains the [number of
//! number of updates following the marker and a checksum. //! updates](format::MARKER_COUNT) following the marker and a [checksum](format::WORD_CHECKSUM).
//! - Remove: A word used during the `Transaction` operation. It contains the key //! - [Remove](format::ID_REMOVE): A word used inside a transaction. It contains the
//! of the entry to be removed and a checksum. //! [key](format::REMOVE_KEY) of the entry to be removed and a
//! [checksum](format::WORD_CHECKSUM).
//! //!
//! Checksums are the number of bits equal to 0. //! Checksums are the number of bits equal to 0.
//! //!
@@ -204,107 +204,105 @@
//! //!
//! ## Compaction //! ## Compaction
//! //!
//! It should always be possible to fully compact the store, after what the //! It should always be possible to fully compact the store, after what the remaining capacity
//! remaining capacity should be available in the current window (restoring the //! should be available in the current window (restoring the compaction invariant). We consider all
//! compaction invariant). We consider all notations on the virtual storage after //! notations on the virtual storage after the full compaction. We will use the |x| notation
//! the full compaction. We will use the `|x|` notation although we update the state //! although we update the state of the virtual storage. This is fine because compaction doesn't
//! of the virtual storage. This is fine because compaction doesn't change the //! change the status of an existing word.
//! status of an existing word.
//! //!
//! We want to show that the next `N - 1` compactions won't move the tail past the //! We want to show that the next N - 1 compactions won't move the tail past the last page of their
//! last page of their window, with `I` the initial window: //! window, with I the initial window:
//! //!
//! ```text //! | | | | |
//! forall 1 <= i <= N - 1, t_{I + i} <= (I + i + N - 1) * Q //! | ----------------:| ----------:|:-:|:------------------- |
//! ``` //! | ∀(1 ≤ i ≤ N - 1) | t\_{I + i} | ≤ | (I + i + N - 1) × Q |
//! //!
//! We assume `i` between `1` and `N - 1`. //! We assume i between 1 and N - 1.
//! //!
//! One step of compaction advances the tail by how many words were used in the //! One step of compaction advances the tail by how many words were used in the first page of the
//! first page of the window with the last entry possibly overlapping on the next //! window with the last entry possibly overlapping on the next page.
//! page.
//! //!
//! ```text //! | | | | |
//! forall j, t_{j + 1} = t_j + |h_{j + 1}| - |h_j| + 1 //! | --:| ----------:|:-:|:------------------------------------ |
//! ``` //! | ∀j | t\_{j + 1} | = | t\_j + \|h\_{j + 1}\| - \|h\_j\| + 1 |
//! //!
//! By induction, we have: //! By induction, we have:
//! //!
//! ```text //! | | | |
//! t_{I + i} <= t_I + |h_{I + i}| - |h_I| + i //! | ----------:|:-:|:------------------------------------ |
//! ``` //! | t\_{I + i} | ≤ | t\_I + \|h\_{I + i}\| - \|h\_I\| + i |
//! //!
//! We have the following properties: //! We have the following properties:
//! //!
//! ```text //! | | | |
//! t_I <= h_I + V //! | -------------------------:|:-:|:----------------- |
//! |h_{I + i}| - |h_I| <= h_{I + i} - h_I //! | t\_I | | h\_I + V |
//! h_{I + i} <= (I + i) * Q + M //! | \|h\_{I + i}\| - \|h\_I\| | ≤ | h\_{I + i} - h\_I |
//! ``` //! | h\_{I + i} | ≤ | (I + i) × Q + M |
//! //!
//! Replacing into our previous equality, we can conclude: //! Replacing into our previous equality, we can conclude:
//! //!
//! ```text //! | | | |
//! t_{I + i} = t_I + |h_{I + i}| - |h_I| + i //! | ----------:|:-:| ------------------------------------------- |
//! <= h_I + V + (I + i) * Q + M - h_I + i //! | t\_{I + i} | = | t_I + \|h_{I + i}\| - \|h_I\| + i |
//! = (N - 1) * (Q - 1) - M + (I + i) * Q + M + i //! | | ≤ | h\_I + V + (I + i) * Q + M - h\_I + i |
//! = (N - 1) * (Q - 1) + (I + i) * Q + i //! | | = | (N - 1) × (Q - 1) - M + (I + i) × Q + M + i |
//! = (I + i + N - 1) * Q + i - (N - 1) //! | | = | (N - 1) × (Q - 1) + (I + i) × Q + i |
//! <= (I + i + N - 1) * Q //! | | = | (I + i + N - 1) × Q + i - (N - 1) |
//! ``` //! | | ≤ | (I + i + N - 1) × Q |
//! //!
//! We also want to show that after `N - 1` compactions, the remaining capacity is //! We also want to show that after N - 1 compactions, the remaining capacity is available without
//! available without compaction. //! compaction.
//! //!
//! ```text //! | | | |
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= // The available words in the window. //! | -:| --------------------------------------------- | --------------------------------- |
//! C - (|t_{I + N - 1}| - |h_{I + N - 1}|) // The remaining capacity. //! | | V - (t\_{I + N - 1} - h\_{I + N - 1}) | The available words in the window |
//! + 1 // Reserved for Clear. //! | ≥ | C - (\|t\_{I + N - 1}\| - \|h\_{I + N - 1}\|) | The remaining capacity |
//! ``` //! | + | 1 | Reserved for clear |
//! //!
//! We can replace the definition of `C` and simplify: //! We can replace the definition of C and simplify:
//! //!
//! ```text //! | | | | |
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= V - N - (|t_{I + N - 1}| - |h_{I + N - 1}|) + 1 //! | ---:| -------------------------------------:|:-:|:----------------------------------------------------- |
//! iff t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1 //! | | V - (t\_{I + N - 1} - h\_{I + N - 1}) | ≥ | V - N - (\|t\_{I + N - 1}\| - \|h\_{I + N - 1}\|) + 1 |
//! ``` //! | iff | t\_{I + N - 1} - h\_{I + N - 1} | ≤ | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| + N - 1 |
//! //!
//! We have the following properties: //! We have the following properties:
//! //!
//! ```text //!
//! t_{I + N - 1} = t_I + |h_{I + N - 1}| - |h_I| + N - 1 //! | | | | |
//! |t_{I + N - 1}| - |h_{I + N - 1}| = |t_I| - |h_I| // Compaction preserves capacity. //! | ---------------------------------------:|:-:|:-------------------------------------------- |:------ |
//! |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I //! | t\_{I + N - 1} | = | t\_I + \|h\_{I + N - 1}\| - \|h\_I\| + N - 1 | |
//! ``` //! | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| | = | \|t\_I\| - \|h\_I\| | Compaction preserves capacity |
//! | \|h\_{I + N - 1}\| - \|t\_I\| | ≤ | h\_{I + N - 1} - t\_I | |
//! //!
//! From which we conclude: //! From which we conclude:
//! //!
//! ```text //! | | | | |
//! t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1 //! | ---:| -------------------------------:|:-:|:----------------------------------------------- |
//! iff t_I + |h_{I + N - 1}| - |h_I| + N - 1 - h_{I + N - 1} <= |t_I| - |h_I| + N - 1 //! | | t\_{I + N - 1} - h\_{I + N - 1} | ≤ | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| + N - 1 |
//! iff t_I + |h_{I + N - 1}| - h_{I + N - 1} <= |t_I| //! | iff | t\_I + \|h\_{I + N - 1}\| - \|h\_I\| + N - 1 - h\_{I + N - 1} | ≤ | \|t\_I\| - \|h\_I\| + N - 1 |
//! iff |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I //! | iff | t\_I + \|h\_{I + N - 1}\| - h\_{I + N - 1} | ≤ | \|t\_I\| |
//! ``` //! | iff | \|h\_{I + N - 1}\| - \|t\_I\| | ≤ | h\_{I + N - 1} - t\_I |
//! //!
//! //!
//! ## Checksum //! ## Checksum
//! //!
//! The main property we want is that all partially written/erased words are either //! The main property we want is that all partially written/erased words are either the initial
//! the initial word, the final word, or invalid. //! word, the final word, or invalid.
//! //!
//! We say that a bit sequence `TARGET` is reachable from a bit sequence `SOURCE` if //! We say that a bit sequence `TARGET` is reachable from a bit sequence `SOURCE` if both have the
//! both have the same length and `SOURCE & TARGET == TARGET` where `&` is the //! same length and `SOURCE & TARGET == TARGET` where `&` is the bitwise AND operation on bit
//! bitwise AND operation on bit sequences of that length. In other words, when //! sequences of that length. In other words, when `SOURCE` has a bit equal to 0 then `TARGET` also
//! `SOURCE` has a bit equal to 0 then `TARGET` also has that bit equal to 0. //! has that bit equal to 0.
//! //!
//! The only written entries start with `101` or `110` and are written from an //! The only written entries start with `101` or `110` and are written from an erased word. Marking
//! erased word. Marking an entry as padding or deleted is a single bit operation, //! an entry as padding or deleted is a single bit operation, so the property trivially holds. For
//! so the property trivially holds. For those cases, the proof relies on the fact //! those cases, the proof relies on the fact that there is exactly one bit equal to 0 in the 3
//! that there is exactly one bit equal to 0 in the 3 first bits. Either the 3 first //! first bits. Either the 3 first bits are still `111` in which case we expect the remaining bits
//! bits are still `111` in which case we expect the remaining bits to be equal //! to be equal to 1. Otherwise we can use the checksum of the given type of entry because those 2
//! to 1. Otherwise we can use the checksum of the given type of entry because those //! types of entries are not reachable from each other. Here is a visualization of the partitioning
//! 2 types of entries are not reachable from each other. Here is a visualization of //! based on the first 3 bits:
//! the partitioning based on the first 3 bits:
//! //!
//! | First 3 bits | Description | How to check | //! | First 3 bits | Description | How to check |
//! | ------------:| ------------------ | ---------------------------- | //! | ------------:| ------------------ | ---------------------------- |
@@ -314,34 +312,27 @@
//! | `100` | Deleted user entry | No check, atomically written | //! | `100` | Deleted user entry | No check, atomically written |
//! | `0??` | Padding entry | No check, atomically written | //! | `0??` | Padding entry | No check, atomically written |
//! //!
//! To show that valid entries of a given type are not reachable from each other, we //! To show that valid entries of a given type are not reachable from each other, we show 3 lemmas:
//! show 3 lemmas:
//! //!
//! 1. A bit sequence is not reachable from another if its number of bits equal to //! 1. A bit sequence is not reachable from another if its number of bits equal to 0 is smaller.
//! 0 is smaller. //! 2. A bit sequence is not reachable from another if they have the same number of bits equals to
//! 0 and are different.
//! 3. A bit sequence is not reachable from another if it is bigger when they are interpreted as
//! numbers in binary representation.
//! //!
//! 2. A bit sequence is not reachable from another if they have the same number of //! From those lemmas we consider the 2 cases. If both entries have the same number of bits equal to
//! bits equals to 0 and are different. //! 0, they are either equal or not reachable from each other because of the second lemma. If they
//! //! don't have the same number of bits equal to 0, then the one with less bits equal to 0 is not
//! 3. A bit sequence is not reachable from another if it is bigger when they are //! reachable from the other because of the first lemma and the one with more bits equal to 0 is not
//! interpreted as numbers in binary representation. //! reachable from the other because of the third lemma and the definition of the checksum.
//!
//! From those lemmas we consider the 2 cases. If both entries have the same number
//! of bits equal to 0, they are either equal or not reachable from each other
//! because of the second lemma. If they don't have the same number of bits equal to
//! 0, then the one with less bits equal to 0 is not reachable from the other
//! because of the first lemma and the one with more bits equal to 0 is not
//! reachable from the other because of the third lemma and the definition of the
//! checksum.
//! //!
//! # Fuzzing //! # Fuzzing
//! //!
//! For any sequence of operations and interruptions starting from an erased //! For any sequence of operations and interruptions starting from an erased storage, the store is
//! storage, the store is checked against its model and some internal invariant at //! checked against its model and some internal invariant at each step.
//! each step.
//! //!
//! For any sequence of operations and interruptions starting from an arbitrary //! For any sequence of operations and interruptions starting from an arbitrary storage, the store
//! storage, the store is checked not to crash. //! is checked not to crash.
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
#![feature(try_trait)] #![feature(try_trait)]

View File

@@ -12,13 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Store specification.
use crate::format::Format; use crate::format::Format;
use crate::{usize_to_nat, StoreError, StoreRatio, StoreResult, StoreUpdate}; use crate::{usize_to_nat, StoreError, StoreRatio, StoreResult, StoreUpdate};
use std::collections::HashMap; use std::collections::HashMap;
/// Models the mutable operations of a store. /// Models the mutable operations of a store.
/// ///
/// The model doesn't model the storage and read-only operations. This is done by the driver. /// The model doesn't model the storage and read-only operations. This is done by the
/// [driver](crate::StoreDriver).
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct StoreModel { pub struct StoreModel {
/// Represents the content of the store. /// Represents the content of the store.

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Flash storage abstraction.
/// Represents a byte position in a storage. /// Represents a byte position in a storage.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct StorageIndex { pub struct StorageIndex {
@@ -65,12 +67,14 @@ pub trait Storage {
/// The following pre-conditions must hold: /// The following pre-conditions must hold:
/// - The `index` must designate `value.len()` bytes in the storage. /// - The `index` must designate `value.len()` bytes in the storage.
/// - Both `index` and `value.len()` must be word-aligned. /// - Both `index` and `value.len()` must be word-aligned.
/// - The written words should not have been written too many times since last page erasure. /// - The written words should not have been written [too many](Self::max_word_writes) times
/// since the last page erasure.
fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()>; fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()>;
/// Erases a page of the storage. /// Erases a page of the storage.
/// ///
/// The `page` must be in the storage. /// The `page` must be in the storage, i.e. less than [`Storage::num_pages`]. And the page
/// should not have been erased [too many](Self::max_page_erases) times.
fn erase_page(&mut self, page: usize) -> StorageResult<()>; fn erase_page(&mut self, page: usize) -> StorageResult<()>;
} }

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Store implementation.
use crate::format::{ use crate::format::{
is_erased, CompactInfo, Format, Header, InitInfo, InternalEntry, Padding, ParsedWord, Position, is_erased, CompactInfo, Format, Header, InitInfo, InternalEntry, Padding, ParsedWord, Position,
Word, WordState, Word, WordState,
@@ -55,17 +57,14 @@ pub enum StoreError {
/// ///
/// The consequences depend on the storage failure. In particular, the operation may or may not /// The consequences depend on the storage failure. In particular, the operation may or may not
/// have succeeded, and the storage may have become invalid. Before doing any other operation, /// have succeeded, and the storage may have become invalid. Before doing any other operation,
/// the store should be [recovered]. The operation may then be retried if idempotent. /// the store should be [recovered](Store::recover). The operation may then be retried if
/// /// idempotent.
/// [recovered]: struct.Store.html#method.recover
StorageError, StorageError,
/// Storage is invalid. /// Storage is invalid.
/// ///
/// The storage should be erased and the store [recovered]. The store would be empty and have /// The storage should be erased and the store [recovered](Store::recover). The store would be
/// lost track of lifetime. /// empty and have lost track of lifetime.
///
/// [recovered]: struct.Store.html#method.recover
InvalidStorage, InvalidStorage,
} }
@@ -92,14 +91,12 @@ pub type StoreResult<T> = Result<T, StoreError>;
/// Progression ratio for store metrics. /// Progression ratio for store metrics.
/// ///
/// This is used for the [capacity] and [lifetime] metrics. Those metrics are measured in words. /// This is used for the [`Store::capacity`] and [`Store::lifetime`] metrics. Those metrics are
/// measured in words.
/// ///
/// # Invariant /// # Invariant
/// ///
/// - The used value does not exceed the total: `used <= total`. /// - The used value does not exceed the total: `used` ≤ `total`.
///
/// [capacity]: struct.Store.html#method.capacity
/// [lifetime]: struct.Store.html#method.lifetime
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct StoreRatio { pub struct StoreRatio {
/// How much of the metric is used. /// How much of the metric is used.
@@ -148,11 +145,11 @@ impl StoreHandle {
self.key as usize self.key as usize
} }
/// Returns the length of value of the entry. /// Returns the value length of the entry.
/// ///
/// # Errors /// # Errors
/// ///
/// Returns `InvalidArgument` if the entry has been deleted or compacted. /// Returns [`StoreError::InvalidArgument`] if the entry has been deleted or compacted.
pub fn get_length<S: Storage>(&self, store: &Store<S>) -> StoreResult<usize> { pub fn get_length<S: Storage>(&self, store: &Store<S>) -> StoreResult<usize> {
store.get_length(self) store.get_length(self)
} }
@@ -161,7 +158,7 @@ impl StoreHandle {
/// ///
/// # Errors /// # Errors
/// ///
/// Returns `InvalidArgument` if the entry has been deleted or compacted. /// Returns [`StoreError::InvalidArgument`] if the entry has been deleted or compacted.
pub fn get_value<S: Storage>(&self, store: &Store<S>) -> StoreResult<Vec<u8>> { pub fn get_value<S: Storage>(&self, store: &Store<S>) -> StoreResult<Vec<u8>> {
store.get_value(self) store.get_value(self)
} }
@@ -211,7 +208,7 @@ pub struct Store<S: Storage> {
/// The list of the position of the user entries. /// The list of the position of the user entries.
/// ///
/// The position is encoded as the word offset from the [head](Store#structfield.head). /// The position is encoded as the word offset from the [head](Store::head).
entries: Option<Vec<u16>>, entries: Option<Vec<u16>>,
} }
@@ -224,7 +221,8 @@ impl<S: Storage> Store<S> {
/// ///
/// # Errors /// # Errors
/// ///
/// Returns `InvalidArgument` if the storage is not supported. /// Returns [`StoreError::InvalidArgument`] if the storage is not
/// [supported](Format::is_storage_supported).
pub fn new(storage: S) -> Result<Store<S>, (StoreError, S)> { pub fn new(storage: S) -> Result<Store<S>, (StoreError, S)> {
let format = match Format::new(&storage) { let format = match Format::new(&storage) {
None => return Err((StoreError::InvalidArgument, storage)), None => return Err((StoreError::InvalidArgument, storage)),
@@ -258,7 +256,7 @@ impl<S: Storage> Store<S> {
))) )))
} }
/// Returns the current capacity in words. /// Returns the current and total capacity in words.
/// ///
/// The capacity represents the size of what is stored. /// The capacity represents the size of what is stored.
pub fn capacity(&self) -> StoreResult<StoreRatio> { pub fn capacity(&self) -> StoreResult<StoreRatio> {
@@ -271,7 +269,7 @@ impl<S: Storage> Store<S> {
Ok(StoreRatio { used, total }) Ok(StoreRatio { used, total })
} }
/// Returns the current lifetime in words. /// Returns the current and total lifetime in words.
/// ///
/// The lifetime represents the age of the storage. The limit is an over-approximation by at /// The lifetime represents the age of the storage. The limit is an over-approximation by at
/// most the maximum length of a value (the actual limit depends on the length of the prefix of /// most the maximum length of a value (the actual limit depends on the length of the prefix of
@@ -286,10 +284,11 @@ impl<S: Storage> Store<S> {
/// ///
/// # Errors /// # Errors
/// ///
/// Returns `InvalidArgument` in the following circumstances: /// Returns [`StoreError::InvalidArgument`] in the following circumstances:
/// - There are too many updates. /// - There are [too many](Format::max_updates) updates.
/// - The updates overlap, i.e. their keys are not disjoint. /// - The updates overlap, i.e. their keys are not disjoint.
/// - The updates are invalid, e.g. key out of bound or value too long. /// - The updates are invalid, e.g. key [out of bound](Format::max_key) or value [too
/// long](Format::max_value_len).
pub fn transaction<ByteSlice: Borrow<[u8]>>( pub fn transaction<ByteSlice: Borrow<[u8]>>(
&mut self, &mut self,
updates: &[StoreUpdate<ByteSlice>], updates: &[StoreUpdate<ByteSlice>],