Merge branch 'develop' into usize_32_or_std

This commit is contained in:
Julien Cretin
2021-04-26 13:12:55 +02:00
92 changed files with 13886 additions and 6934 deletions

View File

@@ -24,5 +24,5 @@ pub mod values;
pub mod writer;
pub use self::reader::read;
pub use self::values::{KeyType, SimpleValue, Value};
pub use self::values::{SimpleValue, Value};
pub use self::writer::write;

View File

@@ -12,15 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::values::{KeyType, Value};
use alloc::collections::btree_map;
use crate::values::Value;
use alloc::vec;
use core::cmp::Ordering;
use core::iter::Peekable;
/// This macro generates code to extract multiple values from a `BTreeMap<KeyType, Value>` at once
/// in an optimized manner, consuming the input map.
/// This macro generates code to extract multiple values from a `Vec<(Value, Value)>` at once
/// in an optimized manner, consuming the input vector.
///
/// It takes as input a `BTreeMap` as well as a list of identifiers and keys, and generates code
/// It takes as input a `Vec` as well as a list of identifiers and keys, and generates code
/// that assigns the corresponding values to new variables using the given identifiers. Each of
/// these variables has type `Option<Value>`, to account for the case where keys aren't found.
///
@@ -32,16 +32,14 @@ use core::iter::Peekable;
/// the keys are indeed sorted. This macro is therefore **not suitable for dynamic keys** that can
/// change at runtime.
///
/// Semantically, provided that the keys are sorted as specified above, the following two snippets
/// of code are equivalent, but the `destructure_cbor_map!` version is more optimized, as it doesn't
/// re-balance the `BTreeMap` for each key, contrary to the `BTreeMap::remove` operations.
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::destructure_cbor_map;
/// #
/// # fn main() {
/// # let map = alloc::collections::BTreeMap::new();
/// # let map = alloc::vec::Vec::new();
/// destructure_cbor_map! {
/// let {
/// 1 => x,
@@ -50,17 +48,6 @@ use core::iter::Peekable;
/// }
/// # }
/// ```
///
/// ```rust
/// # extern crate alloc;
/// #
/// # fn main() {
/// # let mut map = alloc::collections::BTreeMap::<cbor::KeyType, _>::new();
/// use cbor::values::IntoCborKey;
/// let x: Option<cbor::Value> = map.remove(&1.into_cbor_key());
/// let y: Option<cbor::Value> = map.remove(&"key".into_cbor_key());
/// # }
/// ```
#[macro_export]
macro_rules! destructure_cbor_map {
( let { $( $key:expr => $variable:ident, )+ } = $map:expr; ) => {
@@ -70,7 +57,7 @@ macro_rules! destructure_cbor_map {
#[cfg(test)]
$crate::assert_sorted_keys!($( $key, )+);
use $crate::values::{IntoCborKey, Value};
use $crate::values::{IntoCborValue, Value};
use $crate::macros::destructure_cbor_map_peek_value;
// This algorithm first converts the map into a peekable iterator - whose items are sorted
@@ -83,7 +70,7 @@ macro_rules! destructure_cbor_map {
// to come in the same order (i.e. sorted).
let mut it = $map.into_iter().peekable();
$(
let $variable: Option<Value> = destructure_cbor_map_peek_value(&mut it, $key.into_cbor_key());
let $variable: Option<Value> = destructure_cbor_map_peek_value(&mut it, $key.into_cbor_value());
)+
};
}
@@ -100,14 +87,14 @@ macro_rules! destructure_cbor_map {
/// would be inlined for every use case. As of June 2020, this saves ~40KB of binary size for the
/// CTAP2 application of OpenSK.
pub fn destructure_cbor_map_peek_value(
it: &mut Peekable<btree_map::IntoIter<KeyType, Value>>,
needle: KeyType,
it: &mut Peekable<vec::IntoIter<(Value, Value)>>,
needle: Value,
) -> Option<Value> {
loop {
match it.peek() {
None => return None,
Some(item) => {
let key: &KeyType = &item.0;
let key: &Value = &item.0;
match key.cmp(&needle) {
Ordering::Less => {
it.next();
@@ -131,9 +118,9 @@ macro_rules! assert_sorted_keys {
( $key1:expr, $key2:expr, $( $keys:expr, )* ) => {
{
use $crate::values::{IntoCborKey, KeyType};
let k1: KeyType = $key1.into_cbor_key();
let k2: KeyType = $key2.into_cbor_key();
use $crate::values::{IntoCborValue, Value};
let k1: Value = $key1.into_cbor_value();
let k2: Value = $key2.into_cbor_value();
assert!(
k1 < k2,
"{:?} < {:?} failed. The destructure_cbor_map! macro requires keys in sorted order.",
@@ -145,6 +132,23 @@ macro_rules! assert_sorted_keys {
};
}
/// Creates a CBOR Value of type Map with the specified key-value pairs.
///
/// Keys and values are expressions and converted into CBOR Keys and Values.
/// The syntax for these pairs is `key_expression => value_expression,`.
/// Duplicate keys will lead to invalid CBOR, i.e. writing these values fails.
/// Keys do not have to be sorted.
///
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::cbor_map;
/// let map = cbor_map! {
/// 0x01 => false,
/// "02" => -3,
/// };
/// ```
#[macro_export]
macro_rules! cbor_map {
// trailing comma case
@@ -156,16 +160,36 @@ macro_rules! cbor_map {
{
// The import is unused if the list is empty.
#[allow(unused_imports)]
use $crate::values::{IntoCborKey, IntoCborValue};
let mut _map = ::alloc::collections::BTreeMap::new();
use $crate::values::IntoCborValue;
let mut _map = ::alloc::vec::Vec::new();
$(
_map.insert($key.into_cbor_key(), $value.into_cbor_value());
_map.push(($key.into_cbor_value(), $value.into_cbor_value()));
)*
$crate::values::Value::Map(_map)
}
};
}
/// Creates a CBOR Value of type Map with key-value pairs where values can be Options.
///
/// Keys and values are expressions and converted into CBOR Keys and Value Options.
/// The map entry is included iff the Value is not an Option or Option is Some.
/// The syntax for these pairs is `key_expression => value_expression,`.
/// Duplicate keys will lead to invalid CBOR, i.e. writing these values fails.
/// Keys do not have to be sorted.
///
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::cbor_map_options;
/// let missing_value: Option<bool> = None;
/// let map = cbor_map_options! {
/// 0x01 => Some(false),
/// "02" => -3,
/// "not in map" => missing_value,
/// };
/// ```
#[macro_export]
macro_rules! cbor_map_options {
// trailing comma case
@@ -177,13 +201,13 @@ macro_rules! cbor_map_options {
{
// The import is unused if the list is empty.
#[allow(unused_imports)]
use $crate::values::{IntoCborKey, IntoCborValueOption};
let mut _map = ::alloc::collections::BTreeMap::<_, $crate::values::Value>::new();
use $crate::values::{IntoCborValue, IntoCborValueOption};
let mut _map = ::alloc::vec::Vec::<(_, $crate::values::Value)>::new();
$(
{
let opt: Option<$crate::values::Value> = $value.into_cbor_value_option();
if let Some(val) = opt {
_map.insert($key.into_cbor_key(), val);
_map.push(($key.into_cbor_value(), val));
}
}
)*
@@ -192,13 +216,25 @@ macro_rules! cbor_map_options {
};
}
/// Creates a CBOR Value of type Map from a Vec<(Value, Value)>.
#[macro_export]
macro_rules! cbor_map_btree {
( $tree:expr ) => {
$crate::values::Value::Map($tree)
};
macro_rules! cbor_map_collection {
( $tree:expr ) => {{
$crate::values::Value::from($tree)
}};
}
/// Creates a CBOR Value of type Array with the given elements.
///
/// Elements are expressions and converted into CBOR Values. Elements are comma-separated.
///
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::cbor_array;
/// let array = cbor_array![1, "2"];
/// ```
#[macro_export]
macro_rules! cbor_array {
// trailing comma case
@@ -216,6 +252,7 @@ macro_rules! cbor_array {
};
}
/// Creates a CBOR Value of type Array from a Vec<Value>.
#[macro_export]
macro_rules! cbor_array_vec {
( $vec:expr ) => {{
@@ -224,6 +261,7 @@ macro_rules! cbor_array_vec {
}};
}
/// Creates a CBOR Value of type Simple with value true.
#[macro_export]
macro_rules! cbor_true {
( ) => {
@@ -231,6 +269,7 @@ macro_rules! cbor_true {
};
}
/// Creates a CBOR Value of type Simple with value false.
#[macro_export]
macro_rules! cbor_false {
( ) => {
@@ -238,6 +277,7 @@ macro_rules! cbor_false {
};
}
/// Creates a CBOR Value of type Simple with value null.
#[macro_export]
macro_rules! cbor_null {
( ) => {
@@ -245,6 +285,7 @@ macro_rules! cbor_null {
};
}
/// Creates a CBOR Value of type Simple with the undefined value.
#[macro_export]
macro_rules! cbor_undefined {
( ) => {
@@ -252,6 +293,7 @@ macro_rules! cbor_undefined {
};
}
/// Creates a CBOR Value of type Simple with the given bool value.
#[macro_export]
macro_rules! cbor_bool {
( $x:expr ) => {
@@ -259,37 +301,47 @@ macro_rules! cbor_bool {
};
}
// For key types, we construct a KeyType and call .into(), which will automatically convert it to a
// KeyType or a Value depending on the context.
/// Creates a CBOR Value of type Unsigned with the given numeric value.
#[macro_export]
macro_rules! cbor_unsigned {
( $x:expr ) => {
$crate::cbor_key_unsigned!($x).into()
$crate::values::Value::Unsigned($x)
};
}
/// Creates a CBOR Value of type Unsigned or Negative with the given numeric value.
#[macro_export]
macro_rules! cbor_int {
( $x:expr ) => {
$crate::cbor_key_int!($x).into()
$crate::values::Value::integer($x)
};
}
/// Creates a CBOR Value of type Text String with the given string.
#[macro_export]
macro_rules! cbor_text {
( $x:expr ) => {
$crate::cbor_key_text!($x).into()
$crate::values::Value::TextString($x.into())
};
}
/// Creates a CBOR Value of type Byte String with the given slice or vector.
#[macro_export]
macro_rules! cbor_bytes {
( $x:expr ) => {
$crate::cbor_key_bytes!($x).into()
$crate::values::Value::ByteString($x)
};
}
// Macro to use with a literal, e.g. cbor_bytes_lit!(b"foo")
/// Creates a CBOR Value of type Byte String with the given byte string literal.
///
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::cbor_bytes_lit;
/// let byte_array = cbor_bytes_lit!(b"foo");
/// ```
#[macro_export]
macro_rules! cbor_bytes_lit {
( $x:expr ) => {
@@ -297,39 +349,9 @@ macro_rules! cbor_bytes_lit {
};
}
// Some explicit macros are also available for contexts where the type is not explicit.
#[macro_export]
macro_rules! cbor_key_unsigned {
( $x:expr ) => {
$crate::values::KeyType::Unsigned($x)
};
}
#[macro_export]
macro_rules! cbor_key_int {
( $x:expr ) => {
$crate::values::KeyType::integer($x)
};
}
#[macro_export]
macro_rules! cbor_key_text {
( $x:expr ) => {
$crate::values::KeyType::TextString($x.into())
};
}
#[macro_export]
macro_rules! cbor_key_bytes {
( $x:expr ) => {
$crate::values::KeyType::ByteString($x)
};
}
#[cfg(test)]
mod test {
use super::super::values::{KeyType, SimpleValue, Value};
use alloc::collections::BTreeMap;
use super::super::values::{SimpleValue, Value};
#[test]
fn test_cbor_simple_values() {
@@ -347,23 +369,20 @@ mod test {
#[test]
fn test_cbor_int_unsigned() {
assert_eq!(cbor_key_int!(0), KeyType::Unsigned(0));
assert_eq!(cbor_key_int!(1), KeyType::Unsigned(1));
assert_eq!(cbor_key_int!(123456), KeyType::Unsigned(123456));
assert_eq!(cbor_int!(0), Value::Unsigned(0));
assert_eq!(cbor_int!(1), Value::Unsigned(1));
assert_eq!(cbor_int!(123456), Value::Unsigned(123456));
assert_eq!(
cbor_key_int!(std::i64::MAX),
KeyType::Unsigned(std::i64::MAX as u64)
cbor_int!(std::i64::MAX),
Value::Unsigned(std::i64::MAX as u64)
);
}
#[test]
fn test_cbor_int_negative() {
assert_eq!(cbor_key_int!(-1), KeyType::Negative(-1));
assert_eq!(cbor_key_int!(-123456), KeyType::Negative(-123456));
assert_eq!(
cbor_key_int!(std::i64::MIN),
KeyType::Negative(std::i64::MIN)
);
assert_eq!(cbor_int!(-1), Value::Negative(-1));
assert_eq!(cbor_int!(-123456), Value::Negative(-123456));
assert_eq!(cbor_int!(std::i64::MIN), Value::Negative(std::i64::MIN));
}
#[test]
@@ -381,16 +400,16 @@ mod test {
std::u64::MAX,
];
let b = Value::Array(vec![
Value::KeyValue(KeyType::Negative(std::i64::MIN)),
Value::KeyValue(KeyType::Negative(std::i32::MIN as i64)),
Value::KeyValue(KeyType::Negative(-123456)),
Value::KeyValue(KeyType::Negative(-1)),
Value::KeyValue(KeyType::Unsigned(0)),
Value::KeyValue(KeyType::Unsigned(1)),
Value::KeyValue(KeyType::Unsigned(123456)),
Value::KeyValue(KeyType::Unsigned(std::i32::MAX as u64)),
Value::KeyValue(KeyType::Unsigned(std::i64::MAX as u64)),
Value::KeyValue(KeyType::Unsigned(std::u64::MAX)),
Value::Negative(std::i64::MIN),
Value::Negative(std::i32::MIN as i64),
Value::Negative(-123456),
Value::Negative(-1),
Value::Unsigned(0),
Value::Unsigned(1),
Value::Unsigned(123456),
Value::Unsigned(std::i32::MAX as u64),
Value::Unsigned(std::i64::MAX as u64),
Value::Unsigned(std::u64::MAX),
]);
assert_eq!(a, b);
}
@@ -410,20 +429,17 @@ mod test {
cbor_map! {2 => 3},
];
let b = Value::Array(vec![
Value::KeyValue(KeyType::Negative(-123)),
Value::KeyValue(KeyType::Unsigned(456)),
Value::Negative(-123),
Value::Unsigned(456),
Value::Simple(SimpleValue::TrueValue),
Value::Simple(SimpleValue::NullValue),
Value::KeyValue(KeyType::TextString(String::from("foo"))),
Value::KeyValue(KeyType::ByteString(b"bar".to_vec())),
Value::TextString(String::from("foo")),
Value::ByteString(b"bar".to_vec()),
Value::Array(Vec::new()),
Value::Array(vec![
Value::KeyValue(KeyType::Unsigned(0)),
Value::KeyValue(KeyType::Unsigned(1)),
]),
Value::Map(BTreeMap::new()),
Value::Array(vec![Value::Unsigned(0), Value::Unsigned(1)]),
Value::Map(Vec::new()),
Value::Map(
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))]
[(Value::Unsigned(2), Value::Unsigned(3))]
.iter()
.cloned()
.collect(),
@@ -443,10 +459,10 @@ mod test {
fn test_cbor_array_vec_int() {
let a = cbor_array_vec!(vec![1, 2, 3, 4]);
let b = Value::Array(vec![
Value::KeyValue(KeyType::Unsigned(1)),
Value::KeyValue(KeyType::Unsigned(2)),
Value::KeyValue(KeyType::Unsigned(3)),
Value::KeyValue(KeyType::Unsigned(4)),
Value::Unsigned(1),
Value::Unsigned(2),
Value::Unsigned(3),
Value::Unsigned(4),
]);
assert_eq!(a, b);
}
@@ -455,9 +471,9 @@ mod test {
fn test_cbor_array_vec_text() {
let a = cbor_array_vec!(vec!["a", "b", "c"]);
let b = Value::Array(vec![
Value::KeyValue(KeyType::TextString(String::from("a"))),
Value::KeyValue(KeyType::TextString(String::from("b"))),
Value::KeyValue(KeyType::TextString(String::from("c"))),
Value::TextString(String::from("a")),
Value::TextString(String::from("b")),
Value::TextString(String::from("c")),
]);
assert_eq!(a, b);
}
@@ -466,9 +482,9 @@ mod test {
fn test_cbor_array_vec_bytes() {
let a = cbor_array_vec!(vec![b"a", b"b", b"c"]);
let b = Value::Array(vec![
Value::KeyValue(KeyType::ByteString(b"a".to_vec())),
Value::KeyValue(KeyType::ByteString(b"b".to_vec())),
Value::KeyValue(KeyType::ByteString(b"c".to_vec())),
Value::ByteString(b"a".to_vec()),
Value::ByteString(b"b".to_vec()),
Value::ByteString(b"c".to_vec()),
]);
assert_eq!(a, b);
}
@@ -489,40 +505,28 @@ mod test {
};
let b = Value::Map(
[
(Value::Negative(-1), Value::Negative(-23)),
(Value::Unsigned(4), Value::Unsigned(56)),
(
KeyType::Negative(-1),
Value::KeyValue(KeyType::Negative(-23)),
),
(KeyType::Unsigned(4), Value::KeyValue(KeyType::Unsigned(56))),
(
KeyType::TextString(String::from("foo")),
Value::TextString(String::from("foo")),
Value::Simple(SimpleValue::TrueValue),
),
(
KeyType::ByteString(b"bar".to_vec()),
Value::ByteString(b"bar".to_vec()),
Value::Simple(SimpleValue::NullValue),
),
(Value::Unsigned(5), Value::TextString(String::from("foo"))),
(Value::Unsigned(6), Value::ByteString(b"bar".to_vec())),
(Value::Unsigned(7), Value::Array(Vec::new())),
(
KeyType::Unsigned(5),
Value::KeyValue(KeyType::TextString(String::from("foo"))),
Value::Unsigned(8),
Value::Array(vec![Value::Unsigned(0), Value::Unsigned(1)]),
),
(Value::Unsigned(9), Value::Map(Vec::new())),
(
KeyType::Unsigned(6),
Value::KeyValue(KeyType::ByteString(b"bar".to_vec())),
),
(KeyType::Unsigned(7), Value::Array(Vec::new())),
(
KeyType::Unsigned(8),
Value::Array(vec![
Value::KeyValue(KeyType::Unsigned(0)),
Value::KeyValue(KeyType::Unsigned(1)),
]),
),
(KeyType::Unsigned(9), Value::Map(BTreeMap::new())),
(
KeyType::Unsigned(10),
Value::Unsigned(10),
Value::Map(
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))]
[(Value::Unsigned(2), Value::Unsigned(3))]
.iter()
.cloned()
.collect(),
@@ -560,40 +564,28 @@ mod test {
};
let b = Value::Map(
[
(Value::Negative(-1), Value::Negative(-23)),
(Value::Unsigned(4), Value::Unsigned(56)),
(
KeyType::Negative(-1),
Value::KeyValue(KeyType::Negative(-23)),
),
(KeyType::Unsigned(4), Value::KeyValue(KeyType::Unsigned(56))),
(
KeyType::TextString(String::from("foo")),
Value::TextString(String::from("foo")),
Value::Simple(SimpleValue::TrueValue),
),
(
KeyType::ByteString(b"bar".to_vec()),
Value::ByteString(b"bar".to_vec()),
Value::Simple(SimpleValue::NullValue),
),
(Value::Unsigned(5), Value::TextString(String::from("foo"))),
(Value::Unsigned(6), Value::ByteString(b"bar".to_vec())),
(Value::Unsigned(7), Value::Array(Vec::new())),
(
KeyType::Unsigned(5),
Value::KeyValue(KeyType::TextString(String::from("foo"))),
Value::Unsigned(8),
Value::Array(vec![Value::Unsigned(0), Value::Unsigned(1)]),
),
(Value::Unsigned(9), Value::Map(Vec::new())),
(
KeyType::Unsigned(6),
Value::KeyValue(KeyType::ByteString(b"bar".to_vec())),
),
(KeyType::Unsigned(7), Value::Array(Vec::new())),
(
KeyType::Unsigned(8),
Value::Array(vec![
Value::KeyValue(KeyType::Unsigned(0)),
Value::KeyValue(KeyType::Unsigned(1)),
]),
),
(KeyType::Unsigned(9), Value::Map(BTreeMap::new())),
(
KeyType::Unsigned(10),
Value::Unsigned(10),
Value::Map(
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))]
[(Value::Unsigned(2), Value::Unsigned(3))]
.iter()
.cloned()
.collect(),
@@ -608,30 +600,20 @@ mod test {
}
#[test]
fn test_cbor_map_btree_empty() {
let a = cbor_map_btree!(BTreeMap::new());
let b = Value::Map(BTreeMap::new());
fn test_cbor_map_collection_empty() {
let a = cbor_map_collection!(Vec::<(_, _)>::new());
let b = Value::Map(Vec::new());
assert_eq!(a, b);
}
#[test]
fn test_cbor_map_btree_foo() {
let a = cbor_map_btree!(
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))]
.iter()
.cloned()
.collect()
);
let b = Value::Map(
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))]
.iter()
.cloned()
.collect(),
);
fn test_cbor_map_collection_foo() {
let a = cbor_map_collection!(vec![(Value::Unsigned(2), Value::Unsigned(3))]);
let b = Value::Map(vec![(Value::Unsigned(2), Value::Unsigned(3))]);
assert_eq!(a, b);
}
fn extract_map(cbor_value: Value) -> BTreeMap<KeyType, Value> {
fn extract_map(cbor_value: Value) -> Vec<(Value, Value)> {
match cbor_value {
Value::Map(map) => map,
_ => panic!("Expected CBOR map."),

View File

@@ -12,9 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use super::values::{Constants, KeyType, SimpleValue, Value};
use crate::{cbor_array_vec, cbor_bytes_lit, cbor_map_btree, cbor_text, cbor_unsigned};
use alloc::collections::BTreeMap;
use super::values::{Constants, SimpleValue, Value};
use crate::{cbor_array_vec, cbor_bytes_lit, cbor_map_collection, cbor_text, cbor_unsigned};
use alloc::str;
use alloc::vec::Vec;
@@ -23,7 +22,6 @@ pub enum DecoderError {
UnsupportedMajorType,
UnknownAdditionalInfo,
IncompleteCborData,
IncorrectMapKeyType,
TooMuchNesting,
InvalidUtf8,
ExtranousData,
@@ -135,7 +133,7 @@ impl<'a> Reader<'a> {
if signed_size < 0 {
Err(DecoderError::OutOfRangeIntegerValue)
} else {
Ok(Value::KeyValue(KeyType::Negative(-(size_value as i64) - 1)))
Ok(Value::Negative(-(size_value as i64) - 1))
}
}
@@ -174,23 +172,19 @@ impl<'a> Reader<'a> {
size_value: u64,
remaining_depth: i8,
) -> Result<Value, DecoderError> {
let mut value_map = BTreeMap::new();
let mut value_map = Vec::new();
let mut last_key_option = None;
for _ in 0..size_value {
let key_value = self.decode_complete_data_item(remaining_depth - 1)?;
if let Value::KeyValue(key) = key_value {
if let Some(last_key) = last_key_option {
if last_key >= key {
return Err(DecoderError::OutOfOrderKey);
}
let key = self.decode_complete_data_item(remaining_depth - 1)?;
if let Some(last_key) = last_key_option {
if last_key >= key {
return Err(DecoderError::OutOfOrderKey);
}
last_key_option = Some(key.clone());
value_map.insert(key, self.decode_complete_data_item(remaining_depth - 1)?);
} else {
return Err(DecoderError::IncorrectMapKeyType);
}
last_key_option = Some(key.clone());
value_map.push((key, self.decode_complete_data_item(remaining_depth - 1)?));
}
Ok(cbor_map_btree!(value_map))
Ok(cbor_map_collection!(value_map))
}
fn decode_to_simple_value(
@@ -615,19 +609,6 @@ mod test {
}
}
#[test]
fn test_read_unsupported_map_key_format_error() {
// While CBOR can handle all types as map keys, we only support a subset.
let bad_map_cbor = vec![
0xa2, // map of 2 pairs
0x82, 0x01, 0x02, // invalid key : [1, 2]
0x02, // value : 2
0x61, 0x64, // key : "d"
0x03, // value : 3
];
assert_eq!(read(&bad_map_cbor), Err(DecoderError::IncorrectMapKeyType));
}
#[test]
fn test_read_unknown_additional_info_error() {
let cases = vec![

View File

@@ -12,32 +12,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use alloc::collections::BTreeMap;
use super::writer::write;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::cmp::Ordering;
#[derive(Clone, Debug, PartialEq)]
#[derive(Clone, Debug)]
pub enum Value {
KeyValue(KeyType),
Array(Vec<Value>),
Map(BTreeMap<KeyType, Value>),
// TAG is omitted
Simple(SimpleValue),
}
// The specification recommends to limit the available keys.
// Currently supported are both integer and string types.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum KeyType {
Unsigned(u64),
// We only use 63 bits of information here.
Negative(i64),
ByteString(Vec<u8>),
TextString(String),
Array(Vec<Value>),
Map(Vec<(Value, Value)>),
// TAG is omitted
Simple(SimpleValue),
}
#[derive(Clone, Debug, PartialEq)]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum SimpleValue {
FalseValue = 20,
TrueValue = 21,
@@ -58,6 +51,15 @@ impl Constants {
}
impl Value {
// For simplicity, this only takes i64. Construct directly for the last bit.
pub fn integer(int: i64) -> Value {
if int >= 0 {
Value::Unsigned(int as u64)
} else {
Value::Negative(int)
}
}
pub fn bool_value(b: bool) -> Value {
if b {
Value::Simple(SimpleValue::TrueValue)
@@ -67,8 +69,13 @@ impl Value {
}
pub fn type_label(&self) -> u8 {
// TODO use enum discriminant instead when stable
// https://github.com/rust-lang/rust/issues/60553
match self {
Value::KeyValue(key) => key.type_label(),
Value::Unsigned(_) => 0,
Value::Negative(_) => 1,
Value::ByteString(_) => 2,
Value::TextString(_) => 3,
Value::Array(_) => 4,
Value::Map(_) => 5,
Value::Simple(_) => 7,
@@ -76,29 +83,11 @@ impl Value {
}
}
impl KeyType {
// For simplicity, this only takes i64. Construct directly for the last bit.
pub fn integer(int: i64) -> KeyType {
if int >= 0 {
KeyType::Unsigned(int as u64)
} else {
KeyType::Negative(int)
}
}
pub fn type_label(&self) -> u8 {
match self {
KeyType::Unsigned(_) => 0,
KeyType::Negative(_) => 1,
KeyType::ByteString(_) => 2,
KeyType::TextString(_) => 3,
}
}
}
impl Ord for KeyType {
fn cmp(&self, other: &KeyType) -> Ordering {
use super::values::KeyType::{ByteString, Negative, TextString, Unsigned};
impl Ord for Value {
fn cmp(&self, other: &Value) -> Ordering {
use super::values::Value::{
Array, ByteString, Map, Negative, Simple, TextString, Unsigned,
};
let self_type_value = self.type_label();
let other_type_value = other.type_label();
if self_type_value != other_type_value {
@@ -109,17 +98,35 @@ impl Ord for KeyType {
(Negative(n1), Negative(n2)) => n1.cmp(n2).reverse(),
(ByteString(b1), ByteString(b2)) => b1.len().cmp(&b2.len()).then(b1.cmp(b2)),
(TextString(t1), TextString(t2)) => t1.len().cmp(&t2.len()).then(t1.cmp(t2)),
_ => unreachable!(),
(Array(a1), Array(a2)) if a1.len() != a2.len() => a1.len().cmp(&a2.len()),
(Map(m1), Map(m2)) if m1.len() != m2.len() => m1.len().cmp(&m2.len()),
(Simple(s1), Simple(s2)) => s1.cmp(s2),
(v1, v2) => {
// This case could handle all of the above as well. Checking individually is faster.
let mut encoding1 = Vec::new();
write(v1.clone(), &mut encoding1);
let mut encoding2 = Vec::new();
write(v2.clone(), &mut encoding2);
encoding1.cmp(&encoding2)
}
}
}
}
impl PartialOrd for KeyType {
fn partial_cmp(&self, other: &KeyType) -> Option<Ordering> {
impl PartialOrd for Value {
fn partial_cmp(&self, other: &Value) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for Value {}
impl PartialEq for Value {
fn eq(&self, other: &Value) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl SimpleValue {
pub fn from_integer(int: u64) -> Option<SimpleValue> {
match int {
@@ -132,54 +139,51 @@ impl SimpleValue {
}
}
impl From<u64> for KeyType {
impl From<u64> for Value {
fn from(unsigned: u64) -> Self {
KeyType::Unsigned(unsigned)
Value::Unsigned(unsigned)
}
}
impl From<i64> for KeyType {
impl From<i64> for Value {
fn from(i: i64) -> Self {
KeyType::integer(i)
Value::integer(i)
}
}
impl From<i32> for KeyType {
impl From<i32> for Value {
fn from(i: i32) -> Self {
KeyType::integer(i as i64)
Value::integer(i as i64)
}
}
impl From<Vec<u8>> for KeyType {
impl From<Vec<u8>> for Value {
fn from(bytes: Vec<u8>) -> Self {
KeyType::ByteString(bytes)
Value::ByteString(bytes)
}
}
impl From<&[u8]> for KeyType {
impl From<&[u8]> for Value {
fn from(bytes: &[u8]) -> Self {
KeyType::ByteString(bytes.to_vec())
Value::ByteString(bytes.to_vec())
}
}
impl From<String> for KeyType {
impl From<String> for Value {
fn from(text: String) -> Self {
KeyType::TextString(text)
Value::TextString(text)
}
}
impl From<&str> for KeyType {
impl From<&str> for Value {
fn from(text: &str) -> Self {
KeyType::TextString(text.to_string())
Value::TextString(text.to_string())
}
}
impl<T> From<T> for Value
where
KeyType: From<T>,
{
fn from(t: T) -> Self {
Value::KeyValue(KeyType::from(t))
impl From<Vec<(Value, Value)>> for Value {
fn from(map: Vec<(Value, Value)>) -> Self {
Value::Map(map)
}
}
@@ -189,19 +193,6 @@ impl From<bool> for Value {
}
}
pub trait IntoCborKey {
fn into_cbor_key(self) -> KeyType;
}
impl<T> IntoCborKey for T
where
KeyType: From<T>,
{
fn into_cbor_key(self) -> KeyType {
KeyType::from(self)
}
}
pub trait IntoCborValue {
fn into_cbor_value(self) -> Value;
}
@@ -239,32 +230,69 @@ where
#[cfg(test)]
mod test {
use crate::{cbor_key_bytes, cbor_key_int, cbor_key_text};
use super::*;
use crate::{cbor_array, cbor_bool, cbor_bytes, cbor_int, cbor_map, cbor_text};
#[test]
fn test_key_type_ordering() {
assert!(cbor_key_int!(0) < cbor_key_int!(23));
assert!(cbor_key_int!(23) < cbor_key_int!(24));
assert!(cbor_key_int!(24) < cbor_key_int!(1000));
assert!(cbor_key_int!(1000) < cbor_key_int!(1000000));
assert!(cbor_key_int!(1000000) < cbor_key_int!(std::i64::MAX));
assert!(cbor_key_int!(std::i64::MAX) < cbor_key_int!(-1));
assert!(cbor_key_int!(-1) < cbor_key_int!(-23));
assert!(cbor_key_int!(-23) < cbor_key_int!(-24));
assert!(cbor_key_int!(-24) < cbor_key_int!(-1000));
assert!(cbor_key_int!(-1000) < cbor_key_int!(-1000000));
assert!(cbor_key_int!(-1000000) < cbor_key_int!(std::i64::MIN));
assert!(cbor_key_int!(std::i64::MIN) < cbor_key_bytes!(vec![]));
assert!(cbor_key_bytes!(vec![]) < cbor_key_bytes!(vec![0x00]));
assert!(cbor_key_bytes!(vec![0x00]) < cbor_key_bytes!(vec![0x01]));
assert!(cbor_key_bytes!(vec![0x01]) < cbor_key_bytes!(vec![0xFF]));
assert!(cbor_key_bytes!(vec![0xFF]) < cbor_key_bytes!(vec![0x00, 0x00]));
assert!(cbor_key_bytes!(vec![0x00, 0x00]) < cbor_key_text!(""));
assert!(cbor_key_text!("") < cbor_key_text!("a"));
assert!(cbor_key_text!("a") < cbor_key_text!("b"));
assert!(cbor_key_text!("b") < cbor_key_text!("aa"));
assert!(cbor_key_int!(1) < cbor_key_bytes!(vec![0x00]));
assert!(cbor_key_int!(1) < cbor_key_text!("s"));
assert!(cbor_key_int!(-1) < cbor_key_text!("s"));
fn test_value_ordering() {
assert!(cbor_int!(0) < cbor_int!(23));
assert!(cbor_int!(23) < cbor_int!(24));
assert!(cbor_int!(24) < cbor_int!(1000));
assert!(cbor_int!(1000) < cbor_int!(1000000));
assert!(cbor_int!(1000000) < cbor_int!(std::i64::MAX));
assert!(cbor_int!(std::i64::MAX) < cbor_int!(-1));
assert!(cbor_int!(-1) < cbor_int!(-23));
assert!(cbor_int!(-23) < cbor_int!(-24));
assert!(cbor_int!(-24) < cbor_int!(-1000));
assert!(cbor_int!(-1000) < cbor_int!(-1000000));
assert!(cbor_int!(-1000000) < cbor_int!(std::i64::MIN));
assert!(cbor_int!(std::i64::MIN) < cbor_bytes!(vec![]));
assert!(cbor_bytes!(vec![]) < cbor_bytes!(vec![0x00]));
assert!(cbor_bytes!(vec![0x00]) < cbor_bytes!(vec![0x01]));
assert!(cbor_bytes!(vec![0x01]) < cbor_bytes!(vec![0xFF]));
assert!(cbor_bytes!(vec![0xFF]) < cbor_bytes!(vec![0x00, 0x00]));
assert!(cbor_bytes!(vec![0x00, 0x00]) < cbor_text!(""));
assert!(cbor_text!("") < cbor_text!("a"));
assert!(cbor_text!("a") < cbor_text!("b"));
assert!(cbor_text!("b") < cbor_text!("aa"));
assert!(cbor_text!("aa") < cbor_array![]);
assert!(cbor_array![] < cbor_array![0]);
assert!(cbor_array![0] < cbor_array![-1]);
assert!(cbor_array![1] < cbor_array![b""]);
assert!(cbor_array![b""] < cbor_array![""]);
assert!(cbor_array![""] < cbor_array![cbor_array![]]);
assert!(cbor_array![cbor_array![]] < cbor_array![cbor_map! {}]);
assert!(cbor_array![cbor_map! {}] < cbor_array![false]);
assert!(cbor_array![false] < cbor_array![0, 0]);
assert!(cbor_array![0, 0] < cbor_map! {});
assert!(cbor_map! {} < cbor_map! {0 => 0});
assert!(cbor_map! {0 => 0} < cbor_map! {0 => 1});
assert!(cbor_map! {0 => 1} < cbor_map! {1 => 0});
assert!(cbor_map! {1 => 0} < cbor_map! {-1 => 0});
assert!(cbor_map! {-1 => 0} < cbor_map! {b"" => 0});
assert!(cbor_map! {b"" => 0} < cbor_map! {"" => 0});
assert!(cbor_map! {"" => 0} < cbor_map! {cbor_array![] => 0});
assert!(cbor_map! {cbor_array![] => 0} < cbor_map! {cbor_map!{} => 0});
assert!(cbor_map! {cbor_map!{} => 0} < cbor_map! {false => 0});
assert!(cbor_map! {false => 0} < cbor_map! {0 => 0, 0 => 0});
assert!(cbor_map! {0 => 0, 0 => 0} < cbor_bool!(false));
assert!(cbor_bool!(false) < cbor_bool!(true));
assert!(cbor_bool!(true) < Value::Simple(SimpleValue::NullValue));
assert!(Value::Simple(SimpleValue::NullValue) < Value::Simple(SimpleValue::Undefined));
assert!(cbor_int!(1) < cbor_bytes!(vec![0x00]));
assert!(cbor_int!(1) < cbor_text!("s"));
assert!(cbor_int!(1) < cbor_array![]);
assert!(cbor_int!(1) < cbor_map! {});
assert!(cbor_int!(1) < cbor_bool!(false));
assert!(cbor_int!(-1) < cbor_text!("s"));
assert!(cbor_int!(-1) < cbor_array![]);
assert!(cbor_int!(-1) < cbor_map! {});
assert!(cbor_int!(-1) < cbor_bool!(false));
assert!(cbor_bytes!(vec![0x00]) < cbor_array![]);
assert!(cbor_bytes!(vec![0x00]) < cbor_map! {});
assert!(cbor_bytes!(vec![0x00]) < cbor_bool!(false));
assert!(cbor_text!("s") < cbor_map! {});
assert!(cbor_text!("s") < cbor_bool!(false));
assert!(cbor_array![] < cbor_bool!(false));
}
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use super::values::{Constants, KeyType, Value};
use super::values::{Constants, Value};
use alloc::vec::Vec;
pub fn write(value: Value, encoded_cbor: &mut Vec<u8>) -> bool {
@@ -35,31 +35,36 @@ impl<'a> Writer<'a> {
if remaining_depth < 0 {
return false;
}
let type_label = value.type_label();
match value {
Value::KeyValue(KeyType::Unsigned(unsigned)) => self.start_item(0, unsigned),
Value::KeyValue(KeyType::Negative(negative)) => {
self.start_item(1, -(negative + 1) as u64)
}
Value::KeyValue(KeyType::ByteString(byte_string)) => {
self.start_item(2, byte_string.len() as u64);
Value::Unsigned(unsigned) => self.start_item(type_label, unsigned),
Value::Negative(negative) => self.start_item(type_label, -(negative + 1) as u64),
Value::ByteString(byte_string) => {
self.start_item(type_label, byte_string.len() as u64);
self.encoded_cbor.extend(byte_string);
}
Value::KeyValue(KeyType::TextString(text_string)) => {
self.start_item(3, text_string.len() as u64);
Value::TextString(text_string) => {
self.start_item(type_label, text_string.len() as u64);
self.encoded_cbor.extend(text_string.into_bytes());
}
Value::Array(array) => {
self.start_item(4, array.len() as u64);
self.start_item(type_label, array.len() as u64);
for el in array {
if !self.encode_cbor(el, remaining_depth - 1) {
return false;
}
}
}
Value::Map(map) => {
self.start_item(5, map.len() as u64);
Value::Map(mut map) => {
map.sort_by(|a, b| a.0.cmp(&b.0));
let map_len = map.len();
map.dedup_by(|a, b| a.0.eq(&b.0));
if map_len != map.len() {
return false;
}
self.start_item(type_label, map_len as u64);
for (k, v) in map {
if !self.encode_cbor(Value::KeyValue(k), remaining_depth - 1) {
if !self.encode_cbor(k, remaining_depth - 1) {
return false;
}
if !self.encode_cbor(v, remaining_depth - 1) {
@@ -67,7 +72,7 @@ impl<'a> Writer<'a> {
}
}
}
Value::Simple(simple_value) => self.start_item(7, simple_value as u64),
Value::Simple(simple_value) => self.start_item(type_label, simple_value as u64),
}
true
}
@@ -209,9 +214,16 @@ mod test {
#[test]
fn test_write_map() {
let value_map = cbor_map! {
"aa" => "AA",
"e" => "E",
"" => ".",
0 => "a",
23 => "b",
24 => "c",
std::u8::MAX as i64 => "d",
256 => "e",
std::u16::MAX as i64 => "f",
65536 => "g",
std::u32::MAX as i64 => "h",
4294967296_i64 => "i",
std::i64::MAX => "j",
-1 => "k",
-24 => "l",
-25 => "m",
@@ -224,16 +236,9 @@ mod test {
b"a" => 2,
b"bar" => 3,
b"foo" => 4,
0 => "a",
23 => "b",
24 => "c",
std::u8::MAX as i64 => "d",
256 => "e",
std::u16::MAX as i64 => "f",
65536 => "g",
std::u32::MAX as i64 => "h",
4294967296_i64 => "i",
std::i64::MAX => "j",
"" => ".",
"e" => "E",
"aa" => "AA",
};
let expected_cbor = vec![
0xb8, 0x19, // map of 25 pairs:
@@ -288,6 +293,67 @@ mod test {
assert_eq!(write_return(value_map), Some(expected_cbor));
}
#[test]
fn test_write_map_sorted() {
let sorted_map = cbor_map! {
0 => "a",
1 => "b",
-1 => "c",
-2 => "d",
b"a" => "e",
b"b" => "f",
"" => "g",
"c" => "h",
};
let unsorted_map = cbor_map! {
1 => "b",
-2 => "d",
b"b" => "f",
"c" => "h",
"" => "g",
b"a" => "e",
-1 => "c",
0 => "a",
};
assert_eq!(write_return(sorted_map), write_return(unsorted_map));
}
#[test]
fn test_write_map_duplicates() {
let duplicate0 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
0 => "b",
};
assert_eq!(write_return(duplicate0), None);
let duplicate1 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
-1 => "d",
};
assert_eq!(write_return(duplicate1), None);
let duplicate2 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
b"a" => "f",
};
assert_eq!(write_return(duplicate2), None);
let duplicate3 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
"c" => "h",
};
assert_eq!(write_return(duplicate3), None);
}
#[test]
fn test_write_map_with_array() {
let value_map = cbor_map! {

View File

@@ -25,5 +25,4 @@ regex = { version = "1", optional = true }
[features]
std = ["cbor/std", "hex", "rand", "ring", "untrusted", "serde", "serde_json", "regex"]
derive_debug = []
with_ctap1 = []

View File

@@ -18,11 +18,10 @@ use core::ops::Mul;
use subtle::{self, Choice, ConditionallySelectable, CtOption};
// An exponent on the elliptic curve, that is an element modulo the curve order N.
#[derive(Clone, Copy, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
// TODO: remove this Default once https://github.com/dalek-cryptography/subtle/issues/63 is
// resolved.
#[derive(Default)]
#[cfg_attr(feature = "derive_debug", derive(Debug))]
pub struct ExponentP256 {
int: Int256,
}
@@ -92,11 +91,10 @@ impl Mul for &ExponentP256 {
}
// A non-zero exponent on the elliptic curve.
#[derive(Clone, Copy, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
// TODO: remove this Default once https://github.com/dalek-cryptography/subtle/issues/63 is
// resolved.
#[derive(Default)]
#[cfg_attr(feature = "derive_debug", derive(Debug))]
pub struct NonZeroExponentP256 {
e: ExponentP256,
}

View File

@@ -111,7 +111,6 @@ impl Mul for &GFP256 {
}
}
#[cfg(feature = "derive_debug")]
impl core::fmt::Debug for GFP256 {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "GFP256::{:?}", self.int)

View File

@@ -636,7 +636,6 @@ impl SubAssign<&Int256> for Int256 {
}
}
#[cfg(feature = "derive_debug")]
impl core::fmt::Debug for Int256 {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "Int256 {{ digits: {:08x?} }}", self.digits)

View File

@@ -542,7 +542,6 @@ impl Add for &PointProjective {
}
}
#[cfg(feature = "derive_debug")]
impl core::fmt::Debug for PointP256 {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("PointP256")
@@ -552,7 +551,6 @@ impl core::fmt::Debug for PointP256 {
}
}
#[cfg(feature = "derive_debug")]
impl PartialEq for PointP256 {
fn eq(&self, other: &PointP256) -> bool {
self.x == other.x && self.y == other.y

View File

@@ -17,8 +17,6 @@ use super::ec::int256;
use super::ec::int256::Int256;
use super::ec::point::PointP256;
use super::rng256::Rng256;
use super::sha256::Sha256;
use super::Hash256;
pub const NBYTES: usize = int256::NBYTES;
@@ -26,7 +24,7 @@ pub struct SecKey {
a: NonZeroExponentP256,
}
#[cfg_attr(feature = "derive_debug", derive(Clone, PartialEq, Debug))]
#[derive(Clone, Debug, PartialEq)]
pub struct PubKey {
p: PointP256,
}
@@ -62,13 +60,15 @@ impl SecKey {
// - https://www.secg.org/sec1-v2.pdf
}
// DH key agreement method defined in the FIDO2 specification, Section 5.5.4. "Getting
// sharedSecret from Authenticator"
pub fn exchange_x_sha256(&self, other: &PubKey) -> [u8; 32] {
/// Performs the handshake using the Diffie Hellman key agreement.
///
/// This function generates the Z in the PIN protocol v1 specification.
/// https://drafts.fidoalliance.org/fido-2/stable-links-to-latest/fido-client-to-authenticator-protocol.html#pinProto1
pub fn exchange_x(&self, other: &PubKey) -> [u8; 32] {
let p = self.exchange_raw(other);
let mut x: [u8; 32] = [Default::default(); 32];
p.getx().to_int().to_bin(&mut x);
Sha256::hash(&x)
x
}
}
@@ -83,11 +83,13 @@ impl PubKey {
self.p.to_bytes_uncompressed(bytes);
}
/// Creates a new PubKey from its coordinates on the elliptic curve.
pub fn from_coordinates(x: &[u8; NBYTES], y: &[u8; NBYTES]) -> Option<PubKey> {
PointP256::new_checked_vartime(Int256::from_bin(x), Int256::from_bin(y))
.map(|p| PubKey { p })
}
/// Writes the coordinates into the passed in arrays.
pub fn to_coordinates(&self, x: &mut [u8; NBYTES], y: &mut [u8; NBYTES]) {
self.p.getx().to_int().to_bin(x);
self.p.gety().to_int().to_bin(y);
@@ -119,7 +121,7 @@ mod test {
/** Test that the exchanged key is the same on both sides **/
#[test]
fn test_exchange_x_sha256_is_symmetric() {
fn test_exchange_x_is_symmetric() {
let mut rng = ThreadRng256 {};
for _ in 0..ITERATIONS {
@@ -127,12 +129,12 @@ mod test {
let pk_a = sk_a.genpk();
let sk_b = SecKey::gensk(&mut rng);
let pk_b = sk_b.genpk();
assert_eq!(sk_a.exchange_x_sha256(&pk_b), sk_b.exchange_x_sha256(&pk_a));
assert_eq!(sk_a.exchange_x(&pk_b), sk_b.exchange_x(&pk_a));
}
}
#[test]
fn test_exchange_x_sha256_bytes_is_symmetric() {
fn test_exchange_x_bytes_is_symmetric() {
let mut rng = ThreadRng256 {};
for _ in 0..ITERATIONS {
@@ -146,7 +148,7 @@ mod test {
let pk_a = PubKey::from_bytes_uncompressed(&pk_bytes_a).unwrap();
let pk_b = PubKey::from_bytes_uncompressed(&pk_bytes_b).unwrap();
assert_eq!(sk_a.exchange_x_sha256(&pk_b), sk_b.exchange_x_sha256(&pk_a));
assert_eq!(sk_a.exchange_x(&pk_b), sk_b.exchange_x(&pk_a));
}
}

View File

@@ -21,14 +21,16 @@ use super::rng256::Rng256;
use super::{Hash256, HashBlockSize64Bytes};
use alloc::vec;
use alloc::vec::Vec;
#[cfg(test)]
use arrayref::array_mut_ref;
#[cfg(feature = "std")]
use arrayref::array_ref;
use arrayref::{array_mut_ref, mut_array_refs};
use cbor::{cbor_bytes, cbor_map_options};
use arrayref::mut_array_refs;
use core::marker::PhantomData;
#[derive(Clone, PartialEq)]
#[cfg_attr(feature = "derive_debug", derive(Debug))]
pub const NBYTES: usize = int256::NBYTES;
#[derive(Clone, Debug, PartialEq)]
pub struct SecKey {
k: NonZeroExponentP256,
}
@@ -38,6 +40,7 @@ pub struct Signature {
s: NonZeroExponentP256,
}
#[derive(Clone)]
pub struct PubKey {
p: PointP256,
}
@@ -58,10 +61,11 @@ impl SecKey {
}
}
// ECDSA signature based on a RNG to generate a suitable randomization parameter.
// Under the hood, rejection sampling is used to make sure that the randomization parameter is
// uniformly distributed.
// The provided RNG must be cryptographically secure; otherwise this method is insecure.
/// Creates an ECDSA signature based on a RNG.
///
/// Under the hood, rejection sampling is used to make sure that the
/// randomization parameter is uniformly distributed. The provided RNG must
/// be cryptographically secure; otherwise this method is insecure.
pub fn sign_rng<H, R>(&self, msg: &[u8], rng: &mut R) -> Signature
where
H: Hash256,
@@ -77,8 +81,7 @@ impl SecKey {
}
}
// Deterministic ECDSA signature based on RFC 6979 to generate a suitable randomization
// parameter.
/// Creates a deterministic ECDSA signature based on RFC 6979.
pub fn sign_rfc6979<H>(&self, msg: &[u8]) -> Signature
where
H: Hash256 + HashBlockSize64Bytes,
@@ -101,8 +104,10 @@ impl SecKey {
}
}
// Try signing a curve element given a randomization parameter k. If no signature can be
// obtained from this k, None is returned and the caller should try again with another value.
/// Try signing a curve element given a randomization parameter k.
///
/// If no signature can be obtained from this k, None is returned and the
/// caller should try again with another value.
fn try_sign(&self, k: &NonZeroExponentP256, msg: &ExponentP256) -> Option<Signature> {
let r = ExponentP256::modn(PointP256::base_point_mul(k.as_exponent()).getx().to_int());
// The branching here is fine because all this reveals is that k generated an unsuitable r.
@@ -214,7 +219,6 @@ impl Signature {
}
impl PubKey {
pub const ES256_ALGORITHM: i64 = -7;
#[cfg(feature = "with_ctap1")]
const UNCOMPRESSED_LENGTH: usize = 1 + 2 * int256::NBYTES;
@@ -242,35 +246,10 @@ impl PubKey {
representation
}
// Encodes the key according to CBOR Object Signing and Encryption, defined in RFC 8152.
pub fn to_cose_key(&self) -> Option<Vec<u8>> {
const EC2_KEY_TYPE: i64 = 2;
const P_256_CURVE: i64 = 1;
let mut x_bytes = vec![0; int256::NBYTES];
self.p
.getx()
.to_int()
.to_bin(array_mut_ref![x_bytes.as_mut_slice(), 0, int256::NBYTES]);
let x_byte_cbor: cbor::Value = cbor_bytes!(x_bytes);
let mut y_bytes = vec![0; int256::NBYTES];
self.p
.gety()
.to_int()
.to_bin(array_mut_ref![y_bytes.as_mut_slice(), 0, int256::NBYTES]);
let y_byte_cbor: cbor::Value = cbor_bytes!(y_bytes);
let cbor_value = cbor_map_options! {
1 => EC2_KEY_TYPE,
3 => PubKey::ES256_ALGORITHM,
-1 => P_256_CURVE,
-2 => x_byte_cbor,
-3 => y_byte_cbor,
};
let mut encoded_key = Vec::new();
if cbor::write(cbor_value, &mut encoded_key) {
Some(encoded_key)
} else {
None
}
/// Writes the coordinates into the passed in arrays.
pub fn to_coordinates(&self, x: &mut [u8; NBYTES], y: &mut [u8; NBYTES]) {
self.p.getx().to_int().to_bin(x);
self.p.gety().to_int().to_bin(y);
}
#[cfg(feature = "std")]

View File

@@ -0,0 +1,226 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::hmac::hmac_256;
use super::{Hash256, HashBlockSize64Bytes};
const HASH_SIZE: usize = 32;
/// Computes the HKDF with empty salt and 256 bit (one block) output.
///
/// # Arguments
///
/// * `ikm` - Input keying material
/// * `info` - Optional context and application specific information
///
/// This implementation is equivalent to the below hkdf, with `salt` set to the
/// default block of zeros and the output length l as 32.
pub fn hkdf_empty_salt_256<H>(ikm: &[u8], info: &[u8]) -> [u8; HASH_SIZE]
where
H: Hash256 + HashBlockSize64Bytes,
{
// Salt is a zero block here.
let prk = hmac_256::<H>(&[0; HASH_SIZE], ikm);
// l is implicitly the block size, so we iterate exactly once.
let mut t = info.to_vec();
t.push(1);
hmac_256::<H>(&prk, t.as_slice())
}
/// Computes the HKDF.
///
/// # Arguments
///
/// * `salt` - Optional salt value (a non-secret random value)
/// * `ikm` - Input keying material
/// * `l` - Length of output keying material in octets
/// * `info` - Optional context and application specific information
///
/// Defined in RFC: https://tools.ietf.org/html/rfc5869
///
/// `salt` and `info` can be be empty. `salt` then defaults to one block of
/// zeros of size `HASH_SIZE`. Argument order is taken from:
/// https://fidoalliance.org/specs/fido-v2.1-rd-20201208/fido-client-to-authenticator-protocol-v2.1-rd-20201208.html#pinProto2
#[cfg(test)]
pub fn hkdf<H>(salt: &[u8], ikm: &[u8], l: u8, info: &[u8]) -> Vec<u8>
where
H: Hash256 + HashBlockSize64Bytes,
{
let prk = if salt.is_empty() {
hmac_256::<H>(&[0; HASH_SIZE], ikm)
} else {
hmac_256::<H>(salt, ikm)
};
let mut t = vec![];
let mut okm = vec![];
for i in 0..(l as usize + HASH_SIZE - 1) / HASH_SIZE {
t.extend_from_slice(info);
t.push((i + 1) as u8);
t = hmac_256::<H>(&prk, t.as_slice()).to_vec();
okm.extend_from_slice(t.as_slice());
}
okm.truncate(l as usize);
okm
}
#[cfg(test)]
mod test {
use super::super::sha256::Sha256;
use super::*;
use arrayref::array_ref;
#[test]
fn test_hkdf_sha256_vectors() {
// Test vectors taken from https://tools.ietf.org/html/rfc5869.
let ikm = hex::decode("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b").unwrap();
let salt = hex::decode("000102030405060708090a0b0c").unwrap();
let info = hex::decode("f0f1f2f3f4f5f6f7f8f9").unwrap();
let l = 42;
let okm = hex::decode(
"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865",
)
.unwrap();
assert_eq!(
hkdf::<Sha256>(salt.as_slice(), ikm.as_slice(), l, info.as_slice()),
okm
);
let ikm = hex::decode(
"000102030405060708090a0b0c0d0e0f\
101112131415161718191a1b1c1d1e1f\
202122232425262728292a2b2c2d2e2f\
303132333435363738393a3b3c3d3e3f\
404142434445464748494a4b4c4d4e4f",
)
.unwrap();
let salt = hex::decode(
"606162636465666768696a6b6c6d6e6f\
707172737475767778797a7b7c7d7e7f\
808182838485868788898a8b8c8d8e8f\
909192939495969798999a9b9c9d9e9f\
a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
)
.unwrap();
let info = hex::decode(
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf\
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf\
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf\
e0e1e2e3e4e5e6e7e8e9eaebecedeeef\
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
)
.unwrap();
let l = 82;
let okm = hex::decode(
"b11e398dc80327a1c8e7f78c596a4934\
4f012eda2d4efad8a050cc4c19afa97c\
59045a99cac7827271cb41c65e590e09\
da3275600c2f09b8367793a9aca3db71\
cc30c58179ec3e87c14c01d5c1f3434f\
1d87",
)
.unwrap();
assert_eq!(
hkdf::<Sha256>(salt.as_slice(), ikm.as_slice(), l, info.as_slice()),
okm
);
let ikm = hex::decode("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b").unwrap();
let salt = hex::decode("").unwrap();
let info = hex::decode("").unwrap();
let l = 42;
let okm = hex::decode(
"8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8",
)
.unwrap();
assert_eq!(
hkdf::<Sha256>(salt.as_slice(), ikm.as_slice(), l, info.as_slice()),
okm
);
}
#[test]
fn test_hkdf_empty_salt_256_sha256_vectors() {
// Test vectors generated by pycryptodome using:
// HKDF(b'0', 32, b'', SHA256, context=b'\x00').hex()
let test_okms = [
hex::decode("f9be72116cb97f41828210289caafeabde1f3dfb9723bf43538ab18f3666783a")
.unwrap(),
hex::decode("f50f964f5b94d62fd1da9356ab8662b0a0f5b8e36e277178b69b6ffecf50cf44")
.unwrap(),
hex::decode("fc8772ceb5592d67442dcb4353cdd28519e82d6e55b4cf664b5685252c2d2998")
.unwrap(),
hex::decode("62831b924839a180f53be5461eeea1b89dc21779f50142b5a54df0f0cc86d61a")
.unwrap(),
hex::decode("6991f00a12946a4e3b8315cdcf0132c2ca508fd17b769f08d1454d92d33733e0")
.unwrap(),
hex::decode("0f9bb7dddd1ec61f91d8c4f5369b5870f9d44c4ceabccca1b83f06fec115e4e3")
.unwrap(),
hex::decode("235367e2ab6cca2aba1a666825458dba6b272a215a2537c05feebe4b80dab709")
.unwrap(),
hex::decode("96e8edad661da48d1a133b38c255d33e05555bc9aa442579dea1cd8d8b8d2aef")
.unwrap(),
];
for (i, okm) in test_okms.iter().enumerate() {
// String of number i.
let ikm = i.to_string();
// Byte i.
let info = [i as u8];
assert_eq!(
&hkdf_empty_salt_256::<Sha256>(&ikm.as_bytes(), &info[..]),
array_ref!(okm, 0, 32)
);
}
}
#[test]
fn test_hkdf_length() {
let salt = [];
let mut input = Vec::new();
for l in 0..128 {
assert_eq!(
hkdf::<Sha256>(&salt, input.as_slice(), l, input.as_slice()).len(),
l as usize
);
input.push(b'A');
}
}
#[test]
fn test_hkdf_empty_salt() {
let salt = [];
let mut input = Vec::new();
for l in 0..128 {
assert_eq!(
hkdf::<Sha256>(&salt, input.as_slice(), l, input.as_slice()),
hkdf::<Sha256>(&[0; 32], input.as_slice(), l, input.as_slice())
);
input.push(b'A');
}
}
#[test]
fn test_hkdf_compare_implementations() {
let salt = [];
let l = 32;
let mut input = Vec::new();
for _ in 0..128 {
assert_eq!(
hkdf::<Sha256>(&salt, input.as_slice(), l, input.as_slice()),
hkdf_empty_salt_256::<Sha256>(input.as_slice(), input.as_slice())
);
input.push(b'A');
}
}
}

View File

@@ -22,6 +22,7 @@ pub mod cbc;
mod ec;
pub mod ecdh;
pub mod ecdsa;
pub mod hkdf;
pub mod hmac;
pub mod rng256;
pub mod sha256;

View File

@@ -11,6 +11,8 @@ cargo-fuzz = true
[dependencies]
libfuzzer-sys = "0.3"
persistent_store = { path = "..", features = ["std"] }
rand_core = "0.5"
rand_pcg = "0.2"
strum = { version = "0.19", features = ["derive"] }
# Prevent this from interfering with workspaces

View File

@@ -0,0 +1,116 @@
// Copyright 2019-2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use fuzz_store::{fuzz, StatKey, Stats};
use std::io::Write;
use std::io::{stdout, Read};
use std::path::Path;
fn usage(program: &str) {
println!(
r#"Usage: {} {{ [<artifact_file>] | <corpus_directory> <bucket_predicate>.. }}
If <artifact_file> is not provided, it is read from standard input.
When <bucket_predicate>.. are provided, only runs matching all predicates are shown. The format of
each <bucket_predicate> is <bucket_key>=<bucket_value>."#,
program
);
}
fn debug(data: &[u8]) {
println!("{:02x?}", data);
fuzz(data, true, None);
}
/// Bucket predicate.
struct Predicate {
/// Bucket key.
key: StatKey,
/// Bucket value.
value: usize,
}
impl std::str::FromStr for Predicate {
type Err = String;
fn from_str(input: &str) -> Result<Self, Self::Err> {
let predicate: Vec<&str> = input.split('=').collect();
if predicate.len() != 2 {
return Err("Predicate should have exactly one equal sign.".to_string());
}
let key = predicate[0]
.parse()
.map_err(|_| format!("Predicate key `{}` is not recognized.", predicate[0]))?;
let value: usize = predicate[1]
.parse()
.map_err(|_| format!("Predicate value `{}` is not a number.", predicate[1]))?;
if value != 0 && !value.is_power_of_two() {
return Err(format!(
"Predicate value `{}` is not a bucket.",
predicate[1]
));
}
Ok(Predicate { key, value })
}
}
fn analyze(corpus: &Path, predicates: Vec<Predicate>) {
let mut stats = Stats::default();
let mut count = 0;
let total = std::fs::read_dir(corpus).unwrap().count();
for entry in std::fs::read_dir(corpus).unwrap() {
let data = std::fs::read(entry.unwrap().path()).unwrap();
let mut stat = Stats::default();
fuzz(&data, false, Some(&mut stat));
if predicates
.iter()
.all(|p| stat.get_count(p.key, p.value).is_some())
{
stats.merge(&stat);
}
count += 1;
print!("\u{1b}[K{} / {}\r", count, total);
stdout().flush().unwrap();
}
// NOTE: To avoid reloading the corpus each time we want to check a different filter, we can
// start an interactive loop here taking filters as input and printing the filtered stats. We
// would keep all individual stats for each run in a vector.
print!("{}", stats);
}
fn main() {
let args: Vec<String> = std::env::args().collect();
// No arguments reads from stdin.
if args.len() <= 1 {
let stdin = std::io::stdin();
let mut data = Vec::new();
stdin.lock().read_to_end(&mut data).unwrap();
return debug(&data);
}
let path = Path::new(&args[1]);
// File argument assumes artifact.
if path.is_file() && args.len() == 2 {
return debug(&std::fs::read(path).unwrap());
}
// Directory argument assumes corpus.
if path.is_dir() {
match args[2..].iter().map(|x| x.parse()).collect() {
Ok(predicates) => return analyze(path, predicates),
Err(error) => eprintln!("Error: {}", error),
}
}
usage(&args[0]);
}

View File

@@ -17,5 +17,5 @@
use libfuzzer_sys::fuzz_target;
fuzz_target!(|data: &[u8]| {
// TODO(ia0): Call fuzzing when implemented.
fuzz_store::fuzz(data, false, None);
});

View File

@@ -25,13 +25,12 @@
//! situation where coverage takes precedence over surjectivity is for the value of insert updates
//! where a pseudo-random generator is used to avoid wasting entropy.
// TODO(ia0): Remove when used.
#![allow(dead_code)]
mod histogram;
mod stats;
mod store;
pub use stats::{StatKey, Stats};
pub use store::fuzz;
/// Bit-level entropy source based on a byte slice shared reference.
///

View File

@@ -0,0 +1,426 @@
// Copyright 2019-2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::stats::{StatKey, Stats};
use crate::Entropy;
use persistent_store::{
BufferOptions, BufferStorage, Store, StoreDriver, StoreDriverOff, StoreDriverOn,
StoreInterruption, StoreInvariant, StoreOperation, StoreUpdate,
};
use rand_core::{RngCore, SeedableRng};
use rand_pcg::Pcg32;
use std::collections::HashMap;
use std::convert::TryInto;
// NOTE: We should be able to improve coverage by only checking the last operation. Because
// operations before the last could be checked with a shorter entropy.
// NOTE: Maybe we should split the fuzz target in smaller parts (like one per init). We should also
// name the fuzz targets with action names.
/// Checks the store against a sequence of manipulations.
///
/// The entropy to generate the sequence of manipulation should be provided in `data`. Debugging
/// information is printed if `debug` is set. Statistics are gathered if `stats` is set.
pub fn fuzz(data: &[u8], debug: bool, stats: Option<&mut Stats>) {
let mut fuzzer = Fuzzer::new(data, debug, stats);
let mut driver = fuzzer.init();
let store = loop {
if fuzzer.debug {
print!("{}", driver.storage());
}
if let StoreDriver::On(driver) = &driver {
if !fuzzer.init.is_dirty() {
driver.check().unwrap();
}
if fuzzer.debug {
println!("----------------------------------------------------------------------");
}
}
if fuzzer.entropy.is_empty() {
if fuzzer.debug {
println!("No more entropy.");
}
if fuzzer.init.is_dirty() {
return;
}
fuzzer.record(StatKey::FinishedLifetime, 0);
break driver.power_on().unwrap().extract_store();
}
driver = match driver {
StoreDriver::On(driver) => match fuzzer.apply(driver) {
Ok(x) => x,
Err(store) => {
if fuzzer.debug {
println!("No more lifetime.");
}
if fuzzer.init.is_dirty() {
return;
}
fuzzer.record(StatKey::FinishedLifetime, 1);
break store;
}
},
StoreDriver::Off(driver) => fuzzer.power_on(driver),
}
};
let virt_window = (store.format().num_pages() * store.format().virt_page_size()) as usize;
let init_lifetime = fuzzer.init.used_cycles() * virt_window;
let lifetime = store.lifetime().unwrap().used() - init_lifetime;
fuzzer.record(StatKey::UsedLifetime, lifetime);
fuzzer.record(StatKey::NumCompactions, lifetime / virt_window);
fuzzer.record_counters();
}
/// Fuzzing state.
struct Fuzzer<'a> {
/// Remaining fuzzing entropy.
entropy: Entropy<'a>,
/// Unlimited pseudo entropy.
///
/// This source is only used to generate the values of entries. This is a compromise to avoid
/// consuming fuzzing entropy for low additional coverage.
values: Pcg32,
/// The fuzzing mode.
init: Init,
/// Whether debugging is enabled.
debug: bool,
/// Whether statistics should be gathered.
stats: Option<&'a mut Stats>,
/// Statistics counters (only used when gathering statistics).
///
/// The counters are written to the statistics at the end of the fuzzing run, when their value
/// is final.
counters: HashMap<StatKey, usize>,
}
impl<'a> Fuzzer<'a> {
/// Creates an initial fuzzing state.
fn new(data: &'a [u8], debug: bool, stats: Option<&'a mut Stats>) -> Fuzzer<'a> {
let mut entropy = Entropy::new(data);
let seed = entropy.read_slice(16);
let values = Pcg32::from_seed(seed[..].try_into().unwrap());
let mut fuzzer = Fuzzer {
entropy,
values,
init: Init::Clean,
debug,
stats,
counters: HashMap::new(),
};
fuzzer.init_counters();
fuzzer.record(StatKey::Entropy, data.len());
fuzzer
}
/// Initializes the fuzzing state and returns the store driver.
fn init(&mut self) -> StoreDriver {
let mut options = BufferOptions {
word_size: 4,
page_size: 1 << self.entropy.read_range(5, 12),
max_word_writes: 2,
max_page_erases: self.entropy.read_range(0, 50000),
strict_mode: true,
};
let num_pages = self.entropy.read_range(3, 64);
self.record(StatKey::PageSize, options.page_size);
self.record(StatKey::MaxPageErases, options.max_page_erases);
self.record(StatKey::NumPages, num_pages);
if self.debug {
println!("page_size: {}", options.page_size);
println!("num_pages: {}", num_pages);
println!("max_cycle: {}", options.max_page_erases);
}
let storage_size = num_pages * options.page_size;
if self.entropy.read_bit() {
self.init = Init::Dirty;
let mut storage = vec![0xff; storage_size].into_boxed_slice();
let length = self.entropy.read_range(0, storage_size);
self.record(StatKey::DirtyLength, length);
for byte in &mut storage[0..length] {
*byte = self.entropy.read_byte();
}
if self.debug {
println!("Start with dirty storage.");
}
options.strict_mode = false;
let storage = BufferStorage::new(storage, options);
StoreDriver::Off(StoreDriverOff::new_dirty(storage))
} else if self.entropy.read_bit() {
let cycle = self.entropy.read_range(0, options.max_page_erases);
self.init = Init::Used { cycle };
if self.debug {
println!("Start with {} consumed erase cycles.", cycle);
}
self.record(StatKey::InitCycles, cycle);
let storage = vec![0xff; storage_size].into_boxed_slice();
let mut storage = BufferStorage::new(storage, options);
Store::init_with_cycle(&mut storage, cycle);
StoreDriver::Off(StoreDriverOff::new_dirty(storage))
} else {
StoreDriver::Off(StoreDriverOff::new(options, num_pages))
}
}
/// Powers a driver with possible interruption.
fn power_on(&mut self, driver: StoreDriverOff) -> StoreDriver {
if self.debug {
println!("Power on the store.");
}
self.increment(StatKey::PowerOnCount);
let interruption = self.interruption(driver.count_operations());
match driver.partial_power_on(interruption) {
Err((storage, _)) if self.init.is_dirty() => {
self.entropy.consume_all();
StoreDriver::Off(StoreDriverOff::new_dirty(storage))
}
Err(error) => self.crash(error),
Ok(driver) => driver,
}
}
/// Generates and applies an operation with possible interruption.
fn apply(&mut self, driver: StoreDriverOn) -> Result<StoreDriver, Store<BufferStorage>> {
let operation = self.operation(&driver);
if self.debug {
println!("{:?}", operation);
}
let interruption = self.interruption(driver.count_operations(&operation));
match driver.partial_apply(operation, interruption) {
Err((store, _)) if self.init.is_dirty() => {
self.entropy.consume_all();
Err(store)
}
Err((store, StoreInvariant::NoLifetime)) => Err(store),
Err((store, error)) => self.crash((store.extract_storage(), error)),
Ok((error, driver)) => {
if self.debug {
if let Some(error) = error {
println!("{:?}", error);
}
}
Ok(driver)
}
}
}
/// Reports a broken invariant and terminates fuzzing.
fn crash(&self, error: (BufferStorage, StoreInvariant)) -> ! {
let (storage, invariant) = error;
if self.debug {
print!("{}", storage);
}
panic!("{:?}", invariant);
}
/// Records a statistics if enabled.
fn record(&mut self, key: StatKey, value: usize) {
if let Some(stats) = &mut self.stats {
stats.add(key, value);
}
}
/// Increments a counter if statistics are enabled.
fn increment(&mut self, key: StatKey) {
if self.stats.is_some() {
*self.counters.get_mut(&key).unwrap() += 1;
}
}
/// Initializes all counters if statistics are enabled.
fn init_counters(&mut self) {
if self.stats.is_some() {
use StatKey::*;
self.counters.insert(PowerOnCount, 0);
self.counters.insert(TransactionCount, 0);
self.counters.insert(ClearCount, 0);
self.counters.insert(PrepareCount, 0);
self.counters.insert(InsertCount, 0);
self.counters.insert(RemoveCount, 0);
self.counters.insert(InterruptionCount, 0);
}
}
/// Records all counters if statistics are enabled.
fn record_counters(&mut self) {
if let Some(stats) = &mut self.stats {
for (&key, &value) in self.counters.iter() {
stats.add(key, value);
}
}
}
/// Generates a possibly invalid operation.
fn operation(&mut self, driver: &StoreDriverOn) -> StoreOperation {
let format = driver.model().format();
match self.entropy.read_range(0, 2) {
0 => {
// We also generate an invalid count (one past the maximum value) to test the error
// scenario. Since the test for the error scenario is monotonic, this is a good
// compromise to keep entropy bounded.
let count = self
.entropy
.read_range(0, format.max_updates() as usize + 1);
let mut updates = Vec::with_capacity(count);
for _ in 0..count {
updates.push(self.update());
}
self.increment(StatKey::TransactionCount);
StoreOperation::Transaction { updates }
}
1 => {
let min_key = self.key();
self.increment(StatKey::ClearCount);
StoreOperation::Clear { min_key }
}
2 => {
// We also generate an invalid length (one past the total capacity) to test the
// error scenario. See the explanation for transactions above for why it's enough.
let length = self
.entropy
.read_range(0, format.total_capacity() as usize + 1);
self.increment(StatKey::PrepareCount);
StoreOperation::Prepare { length }
}
_ => unreachable!(),
}
}
/// Generates a possibly invalid update.
fn update(&mut self) -> StoreUpdate<Vec<u8>> {
match self.entropy.read_range(0, 1) {
0 => {
let key = self.key();
let value = self.value();
self.increment(StatKey::InsertCount);
StoreUpdate::Insert { key, value }
}
1 => {
let key = self.key();
self.increment(StatKey::RemoveCount);
StoreUpdate::Remove { key }
}
_ => unreachable!(),
}
}
/// Generates a possibly invalid key.
fn key(&mut self) -> usize {
// Use 4096 as the canonical invalid key.
self.entropy.read_range(0, 4096)
}
/// Generates a possibly invalid value.
fn value(&mut self) -> Vec<u8> {
// Use 1024 as the canonical invalid length.
let length = self.entropy.read_range(0, 1024);
let mut value = vec![0; length];
self.values.fill_bytes(&mut value);
value
}
/// Generates an interruption.
///
/// The `max_delay` describes the number of storage operations.
fn interruption(&mut self, max_delay: Option<usize>) -> StoreInterruption {
if self.init.is_dirty() {
// We only test that the store can power on without crashing. If it would get
// interrupted then it's like powering up with a different initial state, which would be
// tested with another fuzzing input.
return StoreInterruption::none();
}
let max_delay = match max_delay {
Some(x) => x,
None => return StoreInterruption::none(),
};
let delay = self.entropy.read_range(0, max_delay);
if self.debug {
if delay == max_delay {
println!("Do not interrupt.");
} else {
println!("Interrupt after {} operations.", delay);
}
}
if delay < max_delay {
self.increment(StatKey::InterruptionCount);
}
let corrupt = Box::new(move |old: &mut [u8], new: &[u8]| {
let mut count = 0;
let mut total = 0;
for (old, new) in old.iter_mut().zip(new.iter()) {
for bit in 0..8 {
let mask = 1 << bit;
if *old & mask == *new & mask {
continue;
}
total += 1;
if self.entropy.read_bit() {
count += 1;
*old ^= mask;
}
}
}
if self.debug {
println!("Flip {} bits out of {}.", count, total);
}
});
StoreInterruption { delay, corrupt }
}
}
/// The initial fuzzing mode.
enum Init {
/// Fuzzing starts from a clean storage.
///
/// All invariants are checked.
Clean,
/// Fuzzing starts from a dirty storage.
///
/// Only crashing is checked.
Dirty,
/// Fuzzing starts from a simulated old storage.
///
/// All invariants are checked.
Used {
/// Number of simulated used cycles.
cycle: usize,
},
}
impl Init {
/// Returns whether fuzzing is in dirty mode.
fn is_dirty(&self) -> bool {
match self {
Init::Dirty => true,
_ => false,
}
}
/// Returns the number of used cycles.
///
/// This is zero if the storage was not artificially aged.
fn used_cycles(&self) -> usize {
match self {
Init::Used { cycle } => *cycle,
_ => 0,
}
}
}

View File

@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Flash storage for testing.
//!
//! [`BufferStorage`] implements the flash [`Storage`] interface but doesn't interface with an
//! actual flash storage. Instead it uses a buffer in memory to represent the storage state.
use crate::{Storage, StorageError, StorageIndex, StorageResult};
use alloc::borrow::Borrow;
use alloc::boxed::Box;
@@ -23,9 +28,9 @@ use alloc::vec;
/// for tests and fuzzing, for which it has dedicated functionalities.
///
/// This storage tracks how many times words are written between page erase cycles, how many times
/// pages are erased, and whether an operation flips bits in the wrong direction (optional).
/// Operations panic if those conditions are broken. This storage also permits to interrupt
/// operations for inspection or to corrupt the operation.
/// pages are erased, and whether an operation flips bits in the wrong direction. Operations panic
/// if those conditions are broken (optional). This storage also permits to interrupt operations for
/// inspection or to corrupt the operation.
#[derive(Clone)]
pub struct BufferStorage {
/// Content of the storage.
@@ -59,8 +64,13 @@ pub struct BufferOptions {
/// How many times a page can be erased.
pub max_page_erases: usize,
/// Whether bits cannot be written from 0 to 1.
pub strict_write: bool,
/// Whether the storage should check the flash invariant.
///
/// When set, the following conditions would panic:
/// - A bit is written from 0 to 1.
/// - A word is written more than [`Self::max_word_writes`].
/// - A page is erased more than [`Self::max_page_erases`].
pub strict_mode: bool,
}
/// Corrupts a slice given actual and expected value.
@@ -105,15 +115,13 @@ impl BufferStorage {
///
/// Before each subsequent mutable operation (write or erase), the delay is decremented if
/// positive. If the delay is elapsed, the operation is saved and an error is returned.
/// Subsequent operations will panic until the interrupted operation is [corrupted] or the
/// interruption is [reset].
/// Subsequent operations will panic until either of:
/// - The interrupted operation is [corrupted](BufferStorage::corrupt_operation).
/// - The interruption is [reset](BufferStorage::reset_interruption).
///
/// # Panics
///
/// Panics if an interruption is already armed.
///
/// [corrupted]: struct.BufferStorage.html#method.corrupt_operation
/// [reset]: struct.BufferStorage.html#method.reset_interruption
pub fn arm_interruption(&mut self, delay: usize) {
self.interruption.arm(delay);
}
@@ -125,10 +133,8 @@ impl BufferStorage {
/// # Panics
///
/// Panics if any of the following conditions hold:
/// - An interruption was not [armed].
/// - An interruption was not [armed](BufferStorage::arm_interruption).
/// - An interruption was armed and it has triggered.
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
pub fn disarm_interruption(&mut self) -> usize {
self.interruption.get().err().unwrap()
}
@@ -137,16 +143,14 @@ impl BufferStorage {
///
/// # Panics
///
/// Panics if an interruption was not [armed].
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
/// Panics if an interruption was not [armed](BufferStorage::arm_interruption).
pub fn reset_interruption(&mut self) {
let _ = self.interruption.get();
}
/// Corrupts an interrupted operation.
///
/// Applies the [corruption function] to the storage. Counters are updated accordingly:
/// Applies the corruption function to the storage. Counters are updated accordingly:
/// - If a word is fully written, its counter is incremented regardless of whether other words
/// of the same operation have been fully written.
/// - If a page is fully erased, its counter is incremented (and its word counters are reset).
@@ -154,13 +158,10 @@ impl BufferStorage {
/// # Panics
///
/// Panics if any of the following conditions hold:
/// - An interruption was not [armed].
/// - An interruption was not [armed](BufferStorage::arm_interruption).
/// - An interruption was armed but did not trigger.
/// - The corruption function corrupts more bits than allowed.
/// - The interrupted operation itself would have panicked.
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
/// [corruption function]: type.BufferCorruptFunction.html
pub fn corrupt_operation(&mut self, corrupt: BufferCorruptFunction) {
let operation = self.interruption.get().unwrap();
let range = self.operation_range(&operation).unwrap();
@@ -212,9 +213,13 @@ impl BufferStorage {
///
/// # Panics
///
/// Panics if the maximum number of erase cycles per page is reached.
/// Panics if the [maximum number of erase cycles per page](BufferOptions::max_page_erases) is
/// reached.
fn incr_page_erases(&mut self, page: usize) {
assert!(self.page_erases[page] < self.max_page_erases());
// Check that pages are not erased too many times.
if self.options.strict_mode {
assert!(self.page_erases[page] < self.max_page_erases());
}
self.page_erases[page] += 1;
let num_words = self.page_size() / self.word_size();
for word in 0..num_words {
@@ -235,7 +240,8 @@ impl BufferStorage {
///
/// # Panics
///
/// Panics if the maximum number of writes per word is reached.
/// Panics if the [maximum number of writes per word](BufferOptions::max_word_writes) is
/// reached.
fn incr_word_writes(&mut self, index: usize, value: &[u8], complete: &[u8]) {
let word_size = self.word_size();
for i in 0..value.len() / word_size {
@@ -252,7 +258,10 @@ impl BufferStorage {
continue;
}
let word = index / word_size + i;
assert!(self.word_writes[word] < self.max_word_writes());
// Check that words are not written too many times.
if self.options.strict_mode {
assert!(self.word_writes[word] < self.max_word_writes());
}
self.word_writes[word] += 1;
}
}
@@ -306,8 +315,8 @@ impl Storage for BufferStorage {
self.interruption.tick(&operation)?;
// Check and update counters.
self.incr_word_writes(range.start, value, value);
// Check strict write.
if self.options.strict_write {
// Check that bits are correctly flipped.
if self.options.strict_mode {
for (byte, &val) in range.clone().zip(value.iter()) {
assert_eq!(self.storage[byte] & val, val);
}
@@ -472,7 +481,7 @@ mod tests {
page_size: 16,
max_word_writes: 2,
max_page_erases: 3,
strict_write: true,
strict_mode: true,
};
// Those words are decreasing bit patterns. Bits are only changed from 1 to 0 and at least one
// bit is changed.

View File

@@ -12,6 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Store wrapper for testing.
//!
//! [`StoreDriver`] wraps a [`Store`] and compares its behavior with its associated [`StoreModel`].
use crate::format::{Format, Position};
#[cfg(test)]
use crate::StoreUpdate;
@@ -181,6 +185,12 @@ pub enum StoreInvariant {
},
}
impl From<StoreError> for StoreInvariant {
fn from(error: StoreError) -> StoreInvariant {
StoreInvariant::StoreError(error)
}
}
impl StoreDriver {
/// Provides read-only access to the storage.
pub fn storage(&self) -> &BufferStorage {
@@ -249,6 +259,10 @@ impl StoreDriverOff {
}
/// Powers on the store without interruption.
///
/// # Panics
///
/// Panics if the store cannot be powered on.
pub fn power_on(self) -> Result<StoreDriverOn, StoreInvariant> {
Ok(self
.partial_power_on(StoreInterruption::none())
@@ -301,31 +315,15 @@ impl StoreDriverOff {
})
}
/// Returns a mapping from delay time to number of modified bits.
/// Returns the number of storage operations to power on.
///
/// For example if the `i`-th value is `n`, it means that the `i`-th operation modifies `n` bits
/// in the storage. For convenience, the vector always ends with `0` for one past the last
/// operation. This permits to choose a random index in the vector and then a random set of bit
/// positions among the number of modified bits to simulate any possible corruption (including
/// no corruption with the last index).
pub fn delay_map(&self) -> Result<Vec<usize>, (usize, BufferStorage)> {
let mut result = Vec::new();
loop {
let delay = result.len();
let mut storage = self.storage.clone();
storage.arm_interruption(delay);
match Store::new(storage) {
Err((StoreError::StorageError, x)) => storage = x,
Err((StoreError::InvalidStorage, mut storage)) => {
storage.reset_interruption();
return Err((delay, storage));
}
Ok(_) | Err(_) => break,
}
result.push(count_modified_bits(&mut storage));
}
result.push(0);
Ok(result)
/// Returns `None` if the store cannot power on successfully.
pub fn count_operations(&self) -> Option<usize> {
let initial_delay = usize::MAX;
let mut storage = self.storage.clone();
storage.arm_interruption(initial_delay);
let mut store = Store::new(storage).ok()?;
Some(initial_delay - store.storage_mut().disarm_interruption())
}
}
@@ -412,29 +410,15 @@ impl StoreDriverOn {
})
}
/// Returns a mapping from delay time to number of modified bits.
/// Returns the number of storage operations to apply a store operation.
///
/// See the documentation of [`StoreDriverOff::delay_map`] for details.
///
/// [`StoreDriverOff::delay_map`]: struct.StoreDriverOff.html#method.delay_map
pub fn delay_map(
&self,
operation: &StoreOperation,
) -> Result<Vec<usize>, (usize, BufferStorage)> {
let mut result = Vec::new();
loop {
let delay = result.len();
let mut store = self.store.clone();
store.storage_mut().arm_interruption(delay);
match store.apply(operation).1 {
Err(StoreError::StorageError) => (),
Err(StoreError::InvalidStorage) => return Err((delay, store.extract_storage())),
Ok(()) | Err(_) => break,
}
result.push(count_modified_bits(store.storage_mut()));
}
result.push(0);
Ok(result)
/// Returns `None` if the store cannot apply the operation successfully.
pub fn count_operations(&self, operation: &StoreOperation) -> Option<usize> {
let initial_delay = usize::MAX;
let mut store = self.store.clone();
store.storage_mut().arm_interruption(initial_delay);
store.apply(operation).1.ok()?;
Some(initial_delay - store.storage_mut().disarm_interruption())
}
/// Powers off the store.
@@ -506,8 +490,8 @@ impl StoreDriverOn {
/// Checks that the store and model are in sync.
fn check_model(&self) -> Result<(), StoreInvariant> {
let mut model_content = self.model.content().clone();
for handle in self.store.iter().unwrap() {
let handle = handle.unwrap();
for handle in self.store.iter()? {
let handle = handle?;
let model_value = match model_content.remove(&handle.get_key()) {
None => {
return Err(StoreInvariant::OnlyInStore {
@@ -516,7 +500,7 @@ impl StoreDriverOn {
}
Some(x) => x,
};
let store_value = handle.get_value(&self.store).unwrap().into_boxed_slice();
let store_value = handle.get_value(&self.store)?.into_boxed_slice();
if store_value != model_value {
return Err(StoreInvariant::DifferentValue {
key: handle.get_key(),
@@ -528,7 +512,7 @@ impl StoreDriverOn {
if let Some(&key) = model_content.keys().next() {
return Err(StoreInvariant::OnlyInModel { key });
}
let store_capacity = self.store.capacity().unwrap().remaining();
let store_capacity = self.store.capacity()?.remaining();
let model_capacity = self.model.capacity().remaining();
if store_capacity != model_capacity {
return Err(StoreInvariant::DifferentCapacity {
@@ -544,8 +528,8 @@ impl StoreDriverOn {
let format = self.model.format();
let storage = self.store.storage();
let num_words = format.page_size() / format.word_size();
let head = self.store.head().unwrap();
let tail = self.store.tail().unwrap();
let head = self.store.head()?;
let tail = self.store.tail()?;
for page in 0..format.num_pages() {
// Check the erase cycle of the page.
let store_erase = head.cycle(format) + (page < head.page(format)) as Nat;
@@ -619,22 +603,3 @@ impl<'a> StoreInterruption<'a> {
}
}
}
/// Counts the number of bits modified by an interrupted operation.
///
/// # Panics
///
/// Panics if an interruption did not trigger.
fn count_modified_bits(storage: &mut BufferStorage) -> usize {
let mut modified_bits = 0;
storage.corrupt_operation(Box::new(|before, after| {
modified_bits = before
.iter()
.zip(after.iter())
.map(|(x, y)| (x ^ y).count_ones() as usize)
.sum();
}));
// We should never write the same slice or erase an erased page.
assert!(modified_bits > 0);
modified_bits
}

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Storage representation of a store.
#[macro_use]
mod bitfield;
@@ -20,18 +22,20 @@ use self::bitfield::Length;
use self::bitfield::{count_zeros, num_bits, Bit, Checksum, ConstField, Field};
use crate::{usize_to_nat, Nat, Storage, StorageIndex, StoreError, StoreResult, StoreUpdate};
use alloc::vec::Vec;
use core::borrow::Borrow;
use core::cmp::min;
use core::convert::TryFrom;
/// Internal representation of a word in flash.
///
/// Currently, the store only supports storages where a word is 32 bits.
/// Currently, the store only supports storages where a word is 32 bits, i.e. the [word
/// size](Storage::word_size) is 4 bytes.
type WORD = u32;
/// Abstract representation of a word in flash.
///
/// This type is kept abstract to avoid possible confusion with `Nat` if they happen to have the
/// same representation. This is because they have different semantics, `Nat` represents natural
/// This type is kept abstract to avoid possible confusion with [`Nat`] if they happen to have the
/// same representation. This is because they have different semantics, [`Nat`] represents natural
/// numbers while `Word` represents sequences of bits (and thus has no arithmetic).
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Word(WORD);
@@ -46,7 +50,7 @@ impl Word {
///
/// # Panics
///
/// Panics if `slice.len() != WORD_SIZE`.
/// Panics if `slice.len()` is not [`WORD_SIZE`] bytes.
pub fn from_slice(slice: &[u8]) -> Word {
Word(WORD::from_le_bytes(<WordSlice>::try_from(slice).unwrap()))
}
@@ -59,47 +63,49 @@ impl Word {
/// Size of a word in bytes.
///
/// Currently, the store only supports storages where a word is 4 bytes.
/// Currently, the store only supports storages where the [word size](Storage::word_size) is 4
/// bytes.
const WORD_SIZE: Nat = core::mem::size_of::<WORD>() as Nat;
/// Minimum number of words per page.
///
/// Currently, the store only supports storages where pages have at least 8 words.
const MIN_NUM_WORDS_PER_PAGE: Nat = 8;
/// Currently, the store only supports storages where pages have at least 8 [words](WORD_SIZE), i.e.
/// the [page size](Storage::page_size) is at least 32 bytes.
const MIN_PAGE_SIZE: Nat = 8;
/// Maximum size of a page in bytes.
///
/// Currently, the store only supports storages where pages are between 8 and 1024 [words].
///
/// [words]: constant.WORD_SIZE.html
/// Currently, the store only supports storages where pages have at most 1024 [words](WORD_SIZE),
/// i.e. the [page size](Storage::page_size) is at most 4096 bytes.
const MAX_PAGE_SIZE: Nat = 4096;
/// Maximum number of erase cycles.
///
/// Currently, the store only supports storages where the maximum number of erase cycles fits on 16
/// bits.
/// Currently, the store only supports storages where the [maximum number of erase
/// cycles](Storage::max_page_erases) fits in 16 bits, i.e. it is at most 65535.
const MAX_ERASE_CYCLE: Nat = 65535;
/// Minimum number of pages.
///
/// Currently, the store only supports storages with at least 3 pages.
/// Currently, the store only supports storages where the [number of pages](Storage::num_pages) is
/// at least 3.
const MIN_NUM_PAGES: Nat = 3;
/// Maximum page index.
///
/// Thus the maximum number of pages is one more than this number. Currently, the store only
/// supports storages where the number of pages is between 3 and 64.
/// Currently, the store only supports storages where the [number of pages](Storage::num_pages) is
/// at most 64, i.e. the maximum page index is 63.
const MAX_PAGE_INDEX: Nat = 63;
/// Maximum key index.
///
/// Thus the number of keys is one more than this number. Currently, the store only supports 4096
/// keys.
/// Currently, the store only supports 4096 keys, i.e. the maximum key index is 4095.
const MAX_KEY_INDEX: Nat = 4095;
/// Maximum length in bytes of a user payload.
///
/// Currently, the store only supports values smaller than 1024 bytes.
/// Currently, the store only supports values at most 1023 bytes long. This may be further reduced
/// depending on the [page size](Storage::page_size), see [`Format::max_value_len`].
const MAX_VALUE_LEN: Nat = 1023;
/// Maximum number of updates per transaction.
@@ -108,9 +114,15 @@ const MAX_VALUE_LEN: Nat = 1023;
const MAX_UPDATES: Nat = 31;
/// Maximum number of words per virtual page.
const MAX_VIRT_PAGE_SIZE: Nat = div_ceil(MAX_PAGE_SIZE, WORD_SIZE) - CONTENT_WORD;
///
/// A virtual page has [`CONTENT_WORD`] less [words](WORD_SIZE) than the storage [page
/// size](Storage::page_size). Those words are used to store the page header. Since a page has at
/// least [8](MIN_PAGE_SIZE) words, a virtual page has at least 6 words.
const MAX_VIRT_PAGE_SIZE: Nat = MAX_PAGE_SIZE / WORD_SIZE - CONTENT_WORD;
/// Word with all bits set to one.
///
/// After a page is erased, all words are equal to this value.
const ERASED_WORD: Word = Word(!(0 as WORD));
/// Helpers for a given storage configuration.
@@ -120,33 +132,31 @@ pub struct Format {
///
/// # Invariant
///
/// - Words divide a page evenly.
/// - There are at least 8 words in a page.
/// - There are at most `MAX_PAGE_SIZE` bytes in a page.
/// - [Words](WORD_SIZE) divide a page evenly.
/// - There are at least [`MIN_PAGE_SIZE`] words in a page.
/// - There are at most [`MAX_PAGE_SIZE`] bytes in a page.
page_size: Nat,
/// The number of pages in the storage.
///
/// # Invariant
///
/// - There are at least 3 pages.
/// - There are at most `MAX_PAGE_INDEX + 1` pages.
/// - There are at least [`MIN_NUM_PAGES`] pages.
/// - There are at most [`MAX_PAGE_INDEX`] + 1 pages.
num_pages: Nat,
/// The maximum number of times a page can be erased.
///
/// # Invariant
///
/// - A page can be erased at most `MAX_ERASE_CYCLE` times.
/// - A page can be erased at most [`MAX_ERASE_CYCLE`] times.
max_page_erases: Nat,
}
impl Format {
/// Extracts the format from a storage.
///
/// Returns `None` if the storage is not [supported].
///
/// [supported]: struct.Format.html#method.is_storage_supported
/// Returns `None` if the storage is not [supported](Format::is_storage_supported).
pub fn new<S: Storage>(storage: &S) -> Option<Format> {
if Format::is_storage_supported(storage) {
Some(Format {
@@ -162,21 +172,12 @@ impl Format {
/// Returns whether a storage is supported.
///
/// A storage is supported if the following conditions hold:
/// - The size of a word is [`WORD_SIZE`] bytes.
/// - The size of a word evenly divides the size of a page.
/// - A page contains at least [`MIN_NUM_WORDS_PER_PAGE`] words.
/// - A page contains at most [`MAX_PAGE_SIZE`] bytes.
/// - There are at least [`MIN_NUM_PAGES`] pages.
/// - There are at most [`MAX_PAGE_INDEX`]` + 1` pages.
/// - A word can be written at least twice between erase cycles.
/// - The maximum number of erase cycles is at most [`MAX_ERASE_CYCLE`].
///
/// [`WORD_SIZE`]: constant.WORD_SIZE.html
/// [`MIN_NUM_WORDS_PER_PAGE`]: constant.MIN_NUM_WORDS_PER_PAGE.html
/// [`MAX_PAGE_SIZE`]: constant.MAX_PAGE_SIZE.html
/// [`MIN_NUM_PAGES`]: constant.MIN_NUM_PAGES.html
/// [`MAX_PAGE_INDEX`]: constant.MAX_PAGE_INDEX.html
/// [`MAX_ERASE_CYCLE`]: constant.MAX_ERASE_CYCLE.html
/// - The [`Storage::word_size`] is [`WORD_SIZE`] bytes.
/// - The [`Storage::word_size`] evenly divides the [`Storage::page_size`].
/// - The [`Storage::page_size`] is between [`MIN_PAGE_SIZE`] words and [`MAX_PAGE_SIZE`] bytes.
/// - The [`Storage::num_pages`] is between [`MIN_NUM_PAGES`] and [`MAX_PAGE_INDEX`] + 1.
/// - The [`Storage::max_word_writes`] is at least 2.
/// - The [`Storage::max_page_erases`] is at most [`MAX_ERASE_CYCLE`].
fn is_storage_supported<S: Storage>(storage: &S) -> bool {
let word_size = usize_to_nat(storage.word_size());
let page_size = usize_to_nat(storage.page_size());
@@ -185,7 +186,7 @@ impl Format {
let max_page_erases = usize_to_nat(storage.max_page_erases());
word_size == WORD_SIZE
&& page_size % word_size == 0
&& (MIN_NUM_WORDS_PER_PAGE * word_size <= page_size && page_size <= MAX_PAGE_SIZE)
&& (MIN_PAGE_SIZE * word_size <= page_size && page_size <= MAX_PAGE_SIZE)
&& (MIN_NUM_PAGES <= num_pages && num_pages <= MAX_PAGE_INDEX + 1)
&& max_word_writes >= 2
&& max_page_erases <= MAX_ERASE_CYCLE
@@ -198,28 +199,28 @@ impl Format {
/// The size of a page in bytes.
///
/// We have `MIN_NUM_WORDS_PER_PAGE * self.word_size() <= self.page_size() <= MAX_PAGE_SIZE`.
/// This is at least [`MIN_PAGE_SIZE`] [words](WORD_SIZE) and at most [`MAX_PAGE_SIZE`] bytes.
pub fn page_size(&self) -> Nat {
self.page_size
}
/// The number of pages in the storage, denoted by `N`.
/// The number of pages in the storage, denoted by N.
///
/// We have `MIN_NUM_PAGES <= N <= MAX_PAGE_INDEX + 1`.
/// We have [`MIN_NUM_PAGES`] ≤ N ≤ [`MAX_PAGE_INDEX`] + 1.
pub fn num_pages(&self) -> Nat {
self.num_pages
}
/// The maximum page index.
///
/// We have `2 <= self.max_page() <= MAX_PAGE_INDEX`.
/// This is at least [`MIN_NUM_PAGES`] - 1 and at most [`MAX_PAGE_INDEX`].
pub fn max_page(&self) -> Nat {
self.num_pages - 1
}
/// The maximum number of times a page can be erased, denoted by `E`.
/// The maximum number of times a page can be erased, denoted by E.
///
/// We have `E <= MAX_ERASE_CYCLE`.
/// We have E ≤ [`MAX_ERASE_CYCLE`].
pub fn max_page_erases(&self) -> Nat {
self.max_page_erases
}
@@ -234,19 +235,18 @@ impl Format {
MAX_UPDATES
}
/// The size of a virtual page in words, denoted by `Q`.
/// The size of a virtual page in words, denoted by Q.
///
/// A virtual page is stored in a physical page after the page header.
///
/// We have `MIN_NUM_WORDS_PER_PAGE - 2 <= Q <= MAX_VIRT_PAGE_SIZE`.
/// We have [`MIN_PAGE_SIZE`] - 2 Q ≤ [`MAX_VIRT_PAGE_SIZE`].
pub fn virt_page_size(&self) -> Nat {
self.page_size() / self.word_size() - CONTENT_WORD
}
/// The maximum length in bytes of a user payload.
///
/// We have `(MIN_NUM_WORDS_PER_PAGE - 3) * self.word_size() <= self.max_value_len() <=
/// MAX_VALUE_LEN`.
/// This is at least [`MIN_PAGE_SIZE`] - 3 [words](WORD_SIZE) and at most [`MAX_VALUE_LEN`].
pub fn max_value_len(&self) -> Nat {
min(
(self.virt_page_size() - 1) * self.word_size(),
@@ -254,57 +254,50 @@ impl Format {
)
}
/// The maximum prefix length in words, denoted by `M`.
/// The maximum prefix length in words, denoted by M.
///
/// A prefix is the first words of a virtual page that belong to the last entry of the previous
/// virtual page. This happens because entries may overlap up to 2 virtual pages.
///
/// We have `MIN_NUM_WORDS_PER_PAGE - 3 <= M < Q`.
/// We have [`MIN_PAGE_SIZE`] - 3 M < Q.
pub fn max_prefix_len(&self) -> Nat {
self.bytes_to_words(self.max_value_len())
}
/// The total virtual capacity in words, denoted by `V`.
/// The total virtual capacity in words, denoted by V.
///
/// We have `V = (N - 1) * (Q - 1) - M`.
/// We have V = (N - 1) × (Q - 1) - M.
///
/// We can show `V >= (N - 2) * (Q - 1)` with the following steps:
/// - `M <= Q - 1` from `M < Q` from [`M`] definition
/// - `-M >= -(Q - 1)` from above
/// - `V >= (N - 1) * (Q - 1) - (Q - 1)` from `V` definition
///
/// [`M`]: struct.Format.html#method.max_prefix_len
/// We can show V (N - 2) × (Q - 1) with the following steps:
/// - M Q - 1 from M < Q from [M](Format::max_prefix_len)'s definition
/// - -M -(Q - 1) from above
/// - V (N - 1) × (Q - 1) - (Q - 1) from V's definition
pub fn virt_size(&self) -> Nat {
(self.num_pages() - 1) * (self.virt_page_size() - 1) - self.max_prefix_len()
}
/// The total user capacity in words, denoted by `C`.
/// The total user capacity in words, denoted by C.
///
/// We have `C = V - N = (N - 1) * (Q - 2) - M - 1`.
/// We have C = V - N = (N - 1) × (Q - 2) - M - 1.
///
/// We can show `C >= (N - 2) * (Q - 2) - 2` with the following steps:
/// - `V >= (N - 2) * (Q - 1)` from [`V`] definition
/// - `C >= (N - 2) * (Q - 1) - N` from `C` definition
/// - `(N - 2) * (Q - 1) - N = (N - 2) * (Q - 2) - 2` by calculus
///
/// [`V`]: struct.Format.html#method.virt_size
/// We can show C (N - 2) × (Q - 2) - 2 with the following steps:
/// - V (N - 2) × (Q - 1) from [V](Format::virt_size)'s definition
/// - C (N - 2) × (Q - 1) - N from C's definition
/// - (N - 2) × (Q - 1) - N = (N - 2) × (Q - 2) - 2 by calculus
pub fn total_capacity(&self) -> Nat {
// From the virtual capacity, we reserve N - 1 words for `Erase` entries and 1 word for a
// `Clear` entry.
self.virt_size() - self.num_pages()
}
/// The total virtual lifetime in words, denoted by `L`.
/// The total virtual lifetime in words, denoted by L.
///
/// We have `L = (E * N + N - 1) * Q`.
/// We have L = (E × N + N - 1) × Q.
pub fn total_lifetime(&self) -> Position {
Position::new(self, self.max_page_erases(), self.num_pages() - 1, 0)
}
/// Returns the word position of the first entry of a page.
///
/// The init info of the page must be provided to know where the first entry of the page
/// starts.
pub fn page_head(&self, init: InitInfo, page: Nat) -> Position {
Position::new(self, init.cycle, page, init.prefix)
}
@@ -335,12 +328,12 @@ impl Format {
}
/// Builds the storage representation of an init info.
pub fn build_init(&self, init: InitInfo) -> WordSlice {
pub fn build_init(&self, init: InitInfo) -> StoreResult<WordSlice> {
let mut word = ERASED_WORD;
INIT_CYCLE.set(&mut word, init.cycle);
INIT_PREFIX.set(&mut word, init.prefix);
WORD_CHECKSUM.set(&mut word, 0);
word.as_slice()
INIT_CYCLE.set(&mut word, init.cycle)?;
INIT_PREFIX.set(&mut word, init.prefix)?;
WORD_CHECKSUM.set(&mut word, 0)?;
Ok(word.as_slice())
}
/// Returns the storage index of the compact info of a page.
@@ -368,36 +361,36 @@ impl Format {
}
/// Builds the storage representation of a compact info.
pub fn build_compact(&self, compact: CompactInfo) -> WordSlice {
pub fn build_compact(&self, compact: CompactInfo) -> StoreResult<WordSlice> {
let mut word = ERASED_WORD;
COMPACT_TAIL.set(&mut word, compact.tail);
WORD_CHECKSUM.set(&mut word, 0);
word.as_slice()
COMPACT_TAIL.set(&mut word, compact.tail)?;
WORD_CHECKSUM.set(&mut word, 0)?;
Ok(word.as_slice())
}
/// Builds the storage representation of an internal entry.
pub fn build_internal(&self, internal: InternalEntry) -> WordSlice {
pub fn build_internal(&self, internal: InternalEntry) -> StoreResult<WordSlice> {
let mut word = ERASED_WORD;
match internal {
InternalEntry::Erase { page } => {
ID_ERASE.set(&mut word);
ERASE_PAGE.set(&mut word, page);
ID_ERASE.set(&mut word)?;
ERASE_PAGE.set(&mut word, page)?;
}
InternalEntry::Clear { min_key } => {
ID_CLEAR.set(&mut word);
CLEAR_MIN_KEY.set(&mut word, min_key);
ID_CLEAR.set(&mut word)?;
CLEAR_MIN_KEY.set(&mut word, min_key)?;
}
InternalEntry::Marker { count } => {
ID_MARKER.set(&mut word);
MARKER_COUNT.set(&mut word, count);
ID_MARKER.set(&mut word)?;
MARKER_COUNT.set(&mut word, count)?;
}
InternalEntry::Remove { key } => {
ID_REMOVE.set(&mut word);
REMOVE_KEY.set(&mut word, key);
ID_REMOVE.set(&mut word)?;
REMOVE_KEY.set(&mut word, key)?;
}
}
WORD_CHECKSUM.set(&mut word, 0);
word.as_slice()
WORD_CHECKSUM.set(&mut word, 0)?;
Ok(word.as_slice())
}
/// Parses the first word of an entry from its storage representation.
@@ -459,31 +452,31 @@ impl Format {
}
/// Builds the storage representation of a user entry.
pub fn build_user(&self, key: Nat, value: &[u8]) -> Vec<u8> {
pub fn build_user(&self, key: Nat, value: &[u8]) -> StoreResult<Vec<u8>> {
let length = usize_to_nat(value.len());
let word_size = self.word_size();
let footer = self.bytes_to_words(length);
let mut result = vec![0xff; ((1 + footer) * word_size) as usize];
result[word_size as usize..][..length as usize].copy_from_slice(value);
let mut word = ERASED_WORD;
ID_HEADER.set(&mut word);
ID_HEADER.set(&mut word)?;
if footer > 0 && is_erased(&result[(footer * word_size) as usize..]) {
HEADER_FLIPPED.set(&mut word);
*result.last_mut().unwrap() = 0x7f;
}
HEADER_LENGTH.set(&mut word, length);
HEADER_KEY.set(&mut word, key);
HEADER_LENGTH.set(&mut word, length)?;
HEADER_KEY.set(&mut word, key)?;
HEADER_CHECKSUM.set(
&mut word,
count_zeros(&result[(footer * word_size) as usize..]),
);
)?;
result[..word_size as usize].copy_from_slice(&word.as_slice());
result
Ok(result)
}
/// Sets the padding bit in the first word of a user entry.
pub fn set_padding(&self, word: &mut Word) {
ID_PADDING.set(word);
pub fn set_padding(&self, word: &mut Word) -> StoreResult<()> {
ID_PADDING.set(word)
}
/// Sets the deleted bit in the first word of a user entry.
@@ -492,13 +485,16 @@ impl Format {
}
/// Returns the capacity required by a transaction.
pub fn transaction_capacity(&self, updates: &[StoreUpdate]) -> Nat {
pub fn transaction_capacity<ByteSlice: Borrow<[u8]>>(
&self,
updates: &[StoreUpdate<ByteSlice>],
) -> Nat {
match updates.len() {
// An empty transaction doesn't consume anything.
0 => 0,
// Transactions with a single update are optimized by avoiding a marker entry.
1 => match &updates[0] {
StoreUpdate::Insert { value, .. } => self.entry_size(value),
StoreUpdate::Insert { value, .. } => self.entry_size(value.borrow()),
// Transactions with a single update which is a removal don't consume anything.
StoreUpdate::Remove { .. } => 0,
},
@@ -508,9 +504,9 @@ impl Format {
}
/// Returns the capacity of an update.
fn update_capacity(&self, update: &StoreUpdate) -> Nat {
fn update_capacity<ByteSlice: Borrow<[u8]>>(&self, update: &StoreUpdate<ByteSlice>) -> Nat {
match update {
StoreUpdate::Insert { value, .. } => self.entry_size(value),
StoreUpdate::Insert { value, .. } => self.entry_size(value.borrow()),
StoreUpdate::Remove { .. } => 1,
}
}
@@ -523,7 +519,10 @@ impl Format {
/// Checks if a transaction is valid and returns its sorted keys.
///
/// Returns `None` if the transaction is invalid.
pub fn transaction_valid(&self, updates: &[StoreUpdate]) -> Option<Vec<Nat>> {
pub fn transaction_valid<ByteSlice: Borrow<[u8]>>(
&self,
updates: &[StoreUpdate<ByteSlice>],
) -> Option<Vec<Nat>> {
if usize_to_nat(updates.len()) > self.max_updates() {
return None;
}
@@ -550,7 +549,7 @@ impl Format {
///
/// # Preconditions
///
/// - `bytes + self.word_size()` does not overflow.
/// - `bytes` + [`Self::word_size`] does not overflow.
pub fn bytes_to_words(&self, bytes: Nat) -> Nat {
div_ceil(bytes, self.word_size())
}
@@ -564,7 +563,7 @@ const COMPACT_WORD: Nat = 1;
/// The word index of the content of a page.
///
/// Since a page is at least 8 words, there is always at least 6 words of content.
/// This is also the length in words of the page header.
const CONTENT_WORD: Nat = 2;
/// The checksum for a single word.
@@ -711,21 +710,21 @@ bitfield! {
/// The position of a word in the virtual storage.
///
/// With the notations defined in `Format`, let:
/// - `w` a virtual word offset in a page which is between `0` and `Q - 1`
/// - `p` a page offset which is between `0` and `N - 1`
/// - `c` the number of erase cycles of a page which is between `0` and `E`
/// With the notations defined in [`Format`], let:
/// - w denote a word offset in a virtual page, thus between 0 and Q - 1
/// - p denote a page offset, thus between 0 and N - 1
/// - c denote the number of times a page was erased, thus between 0 and E
///
/// Then the position of a word is `(c*N + p)*Q + w`. This position monotonically increases and
/// The position of a word is (c × N + p) × Q + w. This position monotonically increases and
/// represents the consumed lifetime of the storage.
///
/// This type is kept abstract to avoid possible confusion with `Nat` and `Word` if they happen to
/// have the same representation. Here is an overview of their semantics:
/// This type is kept abstract to avoid possible confusion with [`Nat`] and [`Word`] if they happen
/// to have the same representation. Here is an overview of their semantics:
///
/// | Name | Semantics | Arithmetic operations | Bit-wise operations |
/// | ---------- | --------------------------- | --------------------- | ------------------- |
/// | `Nat` | Natural numbers | Yes (no overflow) | No |
/// | `Word` | Word in flash | No | Yes |
/// | [`Nat`] | Natural numbers | Yes (no overflow) | No |
/// | [`Word`] | Word in flash | No | Yes |
/// | `Position` | Position in virtual storage | Yes (no overflow) | No |
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Position(Nat);
@@ -756,9 +755,9 @@ impl Position {
/// Create a word position given its coordinates.
///
/// The coordinates of a word are:
/// - Its word index in its page.
/// - Its word index in its virtual page.
/// - Its page index in the storage.
/// - The number of times that page was erased.
/// - The number of times its page was erased.
pub fn new(format: &Format, cycle: Nat, page: Nat, word: Nat) -> Position {
Position((cycle * format.num_pages() + page) * format.virt_page_size() + word)
}
@@ -921,11 +920,11 @@ pub fn is_erased(slice: &[u8]) -> bool {
/// Divides then takes ceiling.
///
/// Returns `ceil(x / m)` in mathematical notations (not Rust code).
/// Returns ⌈x / m⌉, i.e. the lowest natural number r such that r ≥ x / m.
///
/// # Preconditions
///
/// - `x + m` does not overflow.
/// - x + m does not overflow.
const fn div_ceil(x: Nat, m: Nat) -> Nat {
(x + m - 1) / m
}
@@ -1077,4 +1076,15 @@ mod tests {
0xff800000
);
}
#[test]
fn position_offsets_fit_in_a_halfword() {
// The store stores in RAM the entry positions as their offset from the head. Those offsets
// are represented as u16. The bound below is a large over-approximation of the maximal
// offset. We first make sure it fits in a u16.
const MAX_POS: Nat = (MAX_PAGE_INDEX + 1) * MAX_VIRT_PAGE_SIZE;
assert!(MAX_POS <= u16::MAX as Nat);
// We also check the actual value for up-to-date documentation, since it's a constant.
assert_eq!(MAX_POS, 0xff80);
}
}

View File

@@ -42,15 +42,20 @@ impl Field {
/// Sets the value of a bit field.
///
/// # Preconditions
/// # Errors
///
/// - The value must fit in the bit field: `num_bits(value) < self.len`.
/// - The value must only change bits from 1 to 0: `self.get(*word) & value == value`.
pub fn set(&self, word: &mut Word, value: Nat) {
debug_assert_eq!(value & self.mask(), value);
pub fn set(&self, word: &mut Word, value: Nat) -> StoreResult<()> {
if value & self.mask() != value {
return Err(StoreError::InvalidStorage);
}
let mask = !(self.mask() << self.pos);
word.0 &= mask | (value << self.pos);
debug_assert_eq!(self.get(*word), value);
if self.get(*word) != value {
return Err(StoreError::InvalidStorage);
}
Ok(())
}
/// Returns a bit mask the length of the bit field.
@@ -82,8 +87,8 @@ impl ConstField {
}
/// Sets the bit field to its value.
pub fn set(&self, word: &mut Word) {
self.field.set(word, self.value);
pub fn set(&self, word: &mut Word) -> StoreResult<()> {
self.field.set(word, self.value)
}
}
@@ -135,15 +140,15 @@ impl Checksum {
/// Sets the checksum to the external increment value.
///
/// # Preconditions
/// # Errors
///
/// - The bits of the checksum bit field should be set to one: `self.field.get(*word) ==
/// self.field.mask()`.
/// - The checksum value should fit in the checksum bit field: `num_bits(word.count_zeros() +
/// value) < self.field.len`.
pub fn set(&self, word: &mut Word, value: Nat) {
pub fn set(&self, word: &mut Word, value: Nat) -> StoreResult<()> {
debug_assert_eq!(self.field.get(*word), self.field.mask());
self.field.set(word, word.0.count_zeros() + value);
self.field.set(word, word.0.count_zeros() + value)
}
}
@@ -290,7 +295,7 @@ mod tests {
assert_eq!(field.get(Word(0x000000f8)), 0x1f);
assert_eq!(field.get(Word(0x0000ff37)), 6);
let mut word = Word(0xffffffff);
field.set(&mut word, 3);
field.set(&mut word, 3).unwrap();
assert_eq!(word, Word(0xffffff1f));
}
@@ -305,7 +310,7 @@ mod tests {
assert!(field.check(Word(0x00000048)));
assert!(field.check(Word(0x0000ff4f)));
let mut word = Word(0xffffffff);
field.set(&mut word);
field.set(&mut word).unwrap();
assert_eq!(word, Word(0xffffff4f));
}
@@ -333,7 +338,7 @@ mod tests {
assert_eq!(field.get(Word(0x00ffff67)), Ok(4));
assert_eq!(field.get(Word(0x7fffff07)), Err(StoreError::InvalidStorage));
let mut word = Word(0x0fffffff);
field.set(&mut word, 4);
field.set(&mut word, 4).unwrap();
assert_eq!(word, Word(0x0fffff47));
}

View File

@@ -0,0 +1,345 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Support for fragmented entries.
//!
//! This module permits to handle entries larger than the [maximum value
//! length](Store::max_value_length) by storing ordered consecutive fragments in a sequence of keys.
//! The first keys hold fragments of maximal length, followed by a possibly partial fragment. The
//! remaining keys are not used.
use crate::{Storage, Store, StoreError, StoreHandle, StoreResult, StoreUpdate};
use alloc::vec::Vec;
use core::ops::Range;
/// Represents a sequence of keys.
#[allow(clippy::len_without_is_empty)]
pub trait Keys {
/// Returns the number of keys.
fn len(&self) -> usize;
/// Returns the position of a key in the sequence.
fn pos(&self, key: usize) -> Option<usize>;
/// Returns the key of a position in the sequence.
///
/// # Preconditions
///
/// The position must be within the length: `pos` < [`Self::len`].
fn key(&self, pos: usize) -> usize;
}
impl Keys for Range<usize> {
fn len(&self) -> usize {
self.end - self.start
}
fn pos(&self, key: usize) -> Option<usize> {
if self.start <= key && key < self.end {
Some(key - self.start)
} else {
None
}
}
fn key(&self, pos: usize) -> usize {
debug_assert!(pos < Keys::len(self));
self.start + pos
}
}
/// Reads the concatenated value of a sequence of keys.
pub fn read(store: &Store<impl Storage>, keys: &impl Keys) -> StoreResult<Option<Vec<u8>>> {
let handles = get_handles(store, keys)?;
if handles.is_empty() {
return Ok(None);
}
let mut result = Vec::with_capacity(handles.len() * store.max_value_length());
for handle in handles {
result.extend(handle.get_value(store)?);
}
Ok(Some(result))
}
/// Reads a range from the concatenated value of a sequence of keys.
///
/// This is equivalent to calling [`read`] then taking the range except that:
/// - Only the needed chunks are read.
/// - The range is truncated to fit in the value.
pub fn read_range(
store: &Store<impl Storage>,
keys: &impl Keys,
range: Range<usize>,
) -> StoreResult<Option<Vec<u8>>> {
let range_len = match range.end.checked_sub(range.start) {
None => return Err(StoreError::InvalidArgument),
Some(x) => x,
};
let handles = get_handles(store, keys)?;
if handles.is_empty() {
return Ok(None);
}
let mut result = Vec::with_capacity(range_len);
let mut offset = 0;
for handle in handles {
let start = range.start.saturating_sub(offset);
let length = handle.get_length(store)?;
let end = core::cmp::min(range.end.saturating_sub(offset), length);
offset += length;
if start < end {
result.extend(&handle.get_value(store)?[start..end]);
}
}
Ok(Some(result))
}
/// Writes a value to a sequence of keys as chunks.
pub fn write(store: &mut Store<impl Storage>, keys: &impl Keys, value: &[u8]) -> StoreResult<()> {
let handles = get_handles(store, keys)?;
let keys_len = keys.len();
let mut updates = Vec::with_capacity(keys_len);
let mut chunks = value.chunks(store.max_value_length());
for pos in 0..keys_len {
let key = keys.key(pos);
match (handles.get(pos), chunks.next()) {
// No existing handle and no new chunk: nothing to do.
(None, None) => (),
// Existing handle and no new chunk: remove old handle.
(Some(_), None) => updates.push(StoreUpdate::Remove { key }),
// Existing handle with same value as new chunk: nothing to do.
(Some(handle), Some(value)) if handle.get_value(store)? == value => (),
// New chunk: Write (or overwrite) the new value.
(_, Some(value)) => updates.push(StoreUpdate::Insert { key, value }),
}
}
if chunks.next().is_some() {
// The value is too long.
return Err(StoreError::InvalidArgument);
}
store.transaction(&updates)
}
/// Deletes the value of a sequence of keys.
pub fn delete(store: &mut Store<impl Storage>, keys: &impl Keys) -> StoreResult<()> {
let updates: Vec<StoreUpdate<Vec<u8>>> = get_handles(store, keys)?
.iter()
.map(|handle| StoreUpdate::Remove {
key: handle.get_key(),
})
.collect();
store.transaction(&updates)
}
/// Returns the handles of a sequence of keys.
///
/// The handles are truncated to the keys that are present.
fn get_handles(store: &Store<impl Storage>, keys: &impl Keys) -> StoreResult<Vec<StoreHandle>> {
let keys_len = keys.len();
let mut handles: Vec<Option<StoreHandle>> = vec![None; keys_len as usize];
for handle in store.iter()? {
let handle = handle?;
let pos = match keys.pos(handle.get_key()) {
Some(pos) => pos,
None => continue,
};
if pos >= keys_len {
return Err(StoreError::InvalidArgument);
}
if let Some(old_handle) = &handles[pos] {
if old_handle.get_key() != handle.get_key() {
// The user provided a non-injective `pos` function.
return Err(StoreError::InvalidArgument);
} else {
return Err(StoreError::InvalidStorage);
}
}
handles[pos] = Some(handle);
}
let num_handles = handles.iter().filter(|x| x.is_some()).count();
let mut result = Vec::with_capacity(num_handles);
for (i, handle) in handles.into_iter().enumerate() {
match (i < num_handles, handle) {
(true, Some(handle)) => result.push(handle),
(false, None) => (),
// We should have `num_handles` Somes followed by Nones.
_ => return Err(StoreError::InvalidStorage),
}
}
Ok(result)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::MINIMAL;
#[test]
fn read_empty_entry() {
let store = MINIMAL.new_store();
assert_eq!(read(&store, &(0..4)), Ok(None));
}
#[test]
fn read_single_chunk() {
let mut store = MINIMAL.new_store();
let value = b"hello".to_vec();
assert_eq!(store.insert(0, &value), Ok(()));
assert_eq!(read(&store, &(0..4)), Ok(Some(value)));
}
#[test]
fn read_multiple_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(read(&store, &(0..4)), Ok(Some(value)));
}
#[test]
fn read_range_first_chunk() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(
read_range(&store, &(0..4), 0..10),
Ok(Some((0..10).collect()))
);
assert_eq!(
read_range(&store, &(0..4), 10..20),
Ok(Some((10..20).collect()))
);
assert_eq!(
read_range(&store, &(0..4), 40..52),
Ok(Some((40..52).collect()))
);
}
#[test]
fn read_range_second_chunk() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(read_range(&store, &(0..4), 52..53), Ok(Some(vec![52])));
assert_eq!(read_range(&store, &(0..4), 53..54), Ok(Some(vec![53])));
assert_eq!(read_range(&store, &(0..4), 59..60), Ok(Some(vec![59])));
}
#[test]
fn read_range_both_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(
read_range(&store, &(0..4), 40..60),
Ok(Some((40..60).collect()))
);
assert_eq!(
read_range(&store, &(0..4), 0..60),
Ok(Some((0..60).collect()))
);
}
#[test]
fn read_range_outside() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(
read_range(&store, &(0..4), 40..100),
Ok(Some((40..60).collect()))
);
assert_eq!(read_range(&store, &(0..4), 60..100), Ok(Some(vec![])));
}
#[test]
fn write_single_chunk() {
let mut store = MINIMAL.new_store();
let value = b"hello".to_vec();
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
assert_eq!(store.find(0), Ok(Some(value)));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn write_multiple_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
assert_eq!(store.find(0), Ok(Some((0..52).collect())));
assert_eq!(store.find(1), Ok(Some((52..60).collect())));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn overwrite_less_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
let value: Vec<_> = (42..69).collect();
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
assert_eq!(store.find(0), Ok(Some((42..69).collect())));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn overwrite_needed_chunks() {
let mut store = MINIMAL.new_store();
let mut value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
// Current lifetime is 2 words of overhead (2 insert) and 60 bytes of data.
let mut lifetime = 2 + 60 / 4;
assert_eq!(store.lifetime().unwrap().used(), lifetime);
// Update the value.
value.extend(60..80);
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
// Added lifetime is 1 word of overhead (1 insert) and (80 - 52) bytes of data.
lifetime += 1 + (80 - 52) / 4;
assert_eq!(store.lifetime().unwrap().used(), lifetime);
}
#[test]
fn delete_empty() {
let mut store = MINIMAL.new_store();
assert_eq!(delete(&mut store, &(0..4)), Ok(()));
assert_eq!(store.find(0), Ok(None));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn delete_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(delete(&mut store, &(0..4)), Ok(()));
assert_eq!(store.find(0), Ok(None));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2019-2020 Google LLC
// Copyright 2019-2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,191 +12,191 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO(ia0): Add links once the code is complete.
// The documentation is easier to read from a browser:
// - Run: cargo doc --document-private-items --features=std
// - Open: target/doc/persistent_store/index.html
//! Store abstraction for flash storage
//!
//! # Specification
//!
//! The store provides a partial function from keys to values on top of a storage
//! interface. The store total capacity depends on the size of the storage. Store
//! updates may be bundled in transactions. Mutable operations are atomic, including
//! when interrupted.
//! The [store](Store) provides a partial function from keys to values on top of a
//! [storage](Storage) interface. The store total [capacity](Store::capacity) depends on the size of
//! the storage. Store [updates](StoreUpdate) may be bundled in [transactions](Store::transaction).
//! Mutable operations are atomic, including when interrupted.
//!
//! The store is flash-efficient in the sense that it uses the storage lifetime
//! efficiently. For each page, all words are written at least once between erase
//! cycles and all erase cycles are used. However, not all written words are user
//! content: lifetime is also consumed with metadata and compaction.
//! The store is flash-efficient in the sense that it uses the storage [lifetime](Store::lifetime)
//! efficiently. For each page, all words are written at least once between erase cycles and all
//! erase cycles are used. However, not all written words are user content: Lifetime is also
//! consumed with metadata and compaction.
//!
//! The store is extendable with other entries than key-values. It is essentially a
//! framework providing access to the storage lifetime. The partial function is
//! simply the most common usage and can be used to encode other usages.
//! The store is extendable with other entries than key-values. It is essentially a framework
//! providing access to the storage lifetime. The partial function is simply the most common usage
//! and can be used to encode other usages.
//!
//! ## Definitions
//!
//! An _entry_ is a pair of a key and a value. A _key_ is a number between 0
//! and 4095. A _value_ is a byte slice with a length between 0 and 1023 bytes (for
//! large enough pages).
//! An _entry_ is a pair of a key and a value. A _key_ is a number between 0 and
//! [4095](format::MAX_KEY_INDEX). A _value_ is a byte slice with a length between 0 and
//! [1023](format::Format::max_value_len) bytes (for large enough pages).
//!
//! The store provides the following _updates_:
//! - Given a key and a value, `Insert` updates the store such that the value is
//! - Given a key and a value, [`StoreUpdate::Insert`] updates the store such that the value is
//! associated with the key. The values for other keys are left unchanged.
//! - Given a key, `Remove` updates the store such that no value is associated with
//! the key. The values for other keys are left unchanged. Additionally, if there
//! was a value associated with the key, the value is wiped from the storage
//! (all its bits are set to 0).
//! - Given a key, [`StoreUpdate::Remove`] updates the store such that no value is associated with
//! the key. The values for other keys are left unchanged. Additionally, if there was a value
//! associated with the key, the value is wiped from the storage (all its bits are set to 0).
//!
//! The store provides the following _read-only operations_:
//! - `Iter` iterates through the store returning all entries exactly once. The
//! iteration order is not specified but stable between mutable operations.
//! - `Capacity` returns how many words can be stored before the store is full.
//! - `Lifetime` returns how many words can be written before the storage lifetime
//! is consumed.
//! - [`Store::iter`] iterates through the store returning all entries exactly once. The iteration
//! order is not specified but stable between mutable operations.
//! - [`Store::capacity`] returns how many words can be stored before the store is full.
//! - [`Store::lifetime`] returns how many words can be written before the storage lifetime is
//! consumed.
//!
//! The store provides the following _mutable operations_:
//! - Given a set of independent updates, `Transaction` applies the sequence of
//! updates.
//! - Given a threshold, `Clear` removes all entries with a key greater or equal
//! to the threshold.
//! - Given a length in words, `Prepare` makes one step of compaction unless that
//! many words can be written without compaction. This operation has no effect
//! on the store but may still mutate its storage. In particular, the store has
//! the same capacity but a possibly reduced lifetime.
//! - Given a set of independent updates, [`Store::transaction`] applies the sequence of updates.
//! - Given a threshold, [`Store::clear`] removes all entries with a key greater or equal to the
//! threshold.
//! - Given a length in words, [`Store::prepare`] makes one step of compaction unless that many
//! words can be written without compaction. This operation has no effect on the store but may
//! still mutate its storage. In particular, the store has the same capacity but a possibly
//! reduced lifetime.
//!
//! A mutable operation is _atomic_ if, when power is lost during the operation, the
//! store is either updated (as if the operation succeeded) or left unchanged (as if
//! the operation did not occur). If the store is left unchanged, lifetime may still
//! be consumed.
//! A mutable operation is _atomic_ if, when power is lost during the operation, the store is either
//! updated (as if the operation succeeded) or left unchanged (as if the operation did not occur).
//! If the store is left unchanged, lifetime may still be consumed.
//!
//! The store relies on the following _storage interface_:
//! - It is possible to read a byte slice. The slice won't span multiple pages.
//! - It is possible to write a word slice. The slice won't span multiple pages.
//! - It is possible to erase a page.
//! - The pages are sequentially indexed from 0. If the actual underlying storage
//! is segmented, then the storage layer should translate those indices to
//! actual page addresses.
//! - It is possible to [read](Storage::read_slice) a byte slice. The slice won't span multiple
//! pages.
//! - It is possible to [write](Storage::write_slice) a word slice. The slice won't span multiple
//! pages.
//! - It is possible to [erase](Storage::erase_page) a page.
//! - The pages are sequentially indexed from 0. If the actual underlying storage is segmented,
//! then the storage layer should translate those indices to actual page addresses.
//!
//! The store has a _total capacity_ of `C = (N - 1) * (P - 4) - M - 1` words, where
//! `P` is the number of words per page, `N` is the number of pages, and `M` is the
//! maximum length in words of a value (256 for large enough pages). The capacity
//! used by each mutable operation is given below (a transient word only uses
//! capacity during the operation):
//! - `Insert` uses `1 + ceil(len / 4)` words where `len` is the length of the
//! value in bytes. If an entry was replaced, the words used by its insertion
//! are freed.
//! - `Remove` doesn't use capacity if alone in the transaction and 1 transient
//! word otherwise. If an entry was deleted, the words used by its insertion are
//! freed.
//! - `Transaction` uses 1 transient word. In addition, the updates of the
//! transaction use and free words as described above.
//! - `Clear` doesn't use capacity and frees the words used by the insertion of
//! the deleted entries.
//! - `Prepare` doesn't use capacity.
//! The store has a _total capacity_ of C = (N - 1) × (P - 4) - M - 1 words, where:
//! - P is the number of words per page
//! - [N](format::Format::num_pages) is the number of pages
//! - [M](format::Format::max_prefix_len) is the maximum length in words of a value (256 for large
//! enough pages)
//!
//! The _total lifetime_ of the store is below `L = ((E + 1) * N - 1) * (P - 2)` and
//! above `L - M` words, where `E` is the maximum number of erase cycles. The
//! lifetime is used when capacity is used, including transiently, as well as when
//! compaction occurs. Compaction frequency and lifetime consumption are positively
//! correlated to the store load factor (the ratio of used capacity to total capacity).
//! The capacity used by each mutable operation is given below (a transient word only uses capacity
//! during the operation):
//!
//! It is possible to approximate the cost of transient words in terms of capacity:
//! `L` transient words are equivalent to `C - x` words of capacity where `x` is the
//! average capacity (including transient) of operations.
//! | Operation/Update | Used capacity | Freed capacity | Transient capacity |
//! | ----------------------- | ---------------- | ----------------- | ------------------ |
//! | [`StoreUpdate::Insert`] | 1 + value length | overwritten entry | 0 |
//! | [`StoreUpdate::Remove`] | 0 | deleted entry | see below\* |
//! | [`Store::transaction`] | 0 + updates | 0 + updates | 1 |
//! | [`Store::clear`] | 0 | deleted entries | 0 |
//! | [`Store::prepare`] | 0 | 0 | 0 |
//!
//! \*0 if the update is alone in the transaction, otherwise 1.
//!
//! The _total lifetime_ of the store is below L = ((E + 1) × N - 1) × (P - 2) and above L - M
//! words, where E is the maximum number of erase cycles. The lifetime is used when capacity is
//! used, including transiently, as well as when compaction occurs. Compaction frequency and
//! lifetime consumption are positively correlated to the store load factor (the ratio of used
//! capacity to total capacity).
//!
//! It is possible to approximate the cost of transient words in terms of capacity: L transient
//! words are equivalent to C - x words of capacity where x is the average capacity (including
//! transient) of operations.
//!
//! ## Preconditions
//!
//! The following assumptions need to hold, or the store may behave in unexpected ways:
//! - A word can be written twice between erase cycles.
//! - A page can be erased `E` times after the first boot of the store.
//! - When power is lost while writing a slice or erasing a page, the next read
//! returns a slice where a subset (possibly none or all) of the bits that
//! should have been modified have been modified.
//! - Reading a slice is deterministic. When power is lost while writing a slice
//! or erasing a slice (erasing a page containing that slice), reading that
//! slice repeatedly returns the same result (until it is overwritten or its
//! page is erased).
//! - To decide whether a page has been erased, it is enough to test if all its
//! bits are equal to 1.
//! - When power is lost while writing a slice or erasing a page, that operation
//! does not count towards the limits. However, completing that write or erase
//! operation would count towards the limits, as if the number of writes per
//! word and number of erase cycles could be fractional.
//! - The storage is only modified by the store. Note that completely erasing the
//! storage is supported, essentially losing all content and lifetime tracking.
//! It is preferred to use `Clear` with a threshold of 0 to keep the lifetime
//! tracking.
//! - A word can be written [twice](Storage::max_word_writes) between erase cycles.
//! - A page can be erased [E](Storage::max_page_erases) times after the first boot of the store.
//! - When power is lost while writing a slice or erasing a page, the next read returns a slice
//! where a subset (possibly none or all) of the bits that should have been modified have been
//! modified.
//! - Reading a slice is deterministic. When power is lost while writing a slice or erasing a
//! slice (erasing a page containing that slice), reading that slice repeatedly returns the same
//! result (until it is overwritten or its page is erased).
//! - To decide whether a page has been erased, it is enough to test if all its bits are equal
//! to 1.
//! - When power is lost while writing a slice or erasing a page, that operation does not count
//! towards the limits. However, completing that write or erase operation would count towards
//! the limits, as if the number of writes per word and number of erase cycles could be
//! fractional.
//! - The storage is only modified by the store. Note that completely erasing the storage is
//! supported, essentially losing all content and lifetime tracking. It is preferred to use
//! [`Store::clear`] with a threshold of 0 to keep the lifetime tracking.
//!
//! The store properties may still hold outside some of those assumptions, but with
//! an increasing chance of failure.
//! The store properties may still hold outside some of those assumptions, but with an increasing
//! chance of failure.
//!
//! # Implementation
//!
//! We define the following constants:
//! - `E < 65536` the number of times a page can be erased.
//! - `3 <= N < 64` the number of pages in the storage.
//! - `8 <= P <= 1024` the number of words in a page.
//! - `Q = P - 2` the number of words in a virtual page.
//! - `K = 4096` the maximum number of keys.
//! - `M = min(Q - 1, 256)` the maximum length in words of a value.
//! - `V = (N - 1) * (Q - 1) - M` the virtual capacity.
//! - `C = V - N` the user capacity.
//! - [E](format::Format::max_page_erases) ≤ [65535](format::MAX_ERASE_CYCLE) the number of times
//! a page can be erased.
//! - 3 ≤ [N](format::Format::num_pages) < 64 the number of pages in the storage.
//! - 8 ≤ P ≤ 1024 the number of words in a page.
//! - [Q](format::Format::virt_page_size) = P - 2 the number of words in a virtual page.
//! - [M](format::Format::max_prefix_len) = min(Q - 1, 256) the maximum length in words of a
//! value.
//! - [V](format::Format::virt_size) = (N - 1) × (Q - 1) - M the virtual capacity.
//! - [C](format::Format::total_capacity) = V - N the user capacity.
//!
//! We build a virtual storage from the physical storage using the first 2 words of
//! each page:
//! We build a virtual storage from the physical storage using the first 2 words of each page:
//! - The first word contains the number of times the page has been erased.
//! - The second word contains the starting word to which this page is being moved
//! during compaction.
//! - The second word contains the starting word to which this page is being moved during
//! compaction.
//!
//! The virtual storage has a length of `(E + 1) * N * Q` words and represents the
//! lifetime of the store. (We reserve the last `Q + M` words to support adding
//! emergency lifetime.) This virtual storage has a linear address space.
//! The virtual storage has a length of (E + 1) × N × Q words and represents the lifetime of the
//! store. (We reserve the last Q + M words to support adding emergency lifetime.) This virtual
//! storage has a linear address space.
//!
//! We define a set of overlapping windows of `N * Q` words at each `Q`-aligned
//! boundary. We call `i` the window spanning from `i * Q` to `(i + N) * Q`. Only
//! those windows actually exist in the underlying storage. We use compaction to
//! shift the current window from `i` to `i + 1`, preserving the content of the
//! store.
//! We define a set of overlapping windows of N × Q words at each Q-aligned boundary. We call i the
//! window spanning from i × Q to (i + N) × Q. Only those windows actually exist in the underlying
//! storage. We use compaction to shift the current window from i to i + 1, preserving the content
//! of the store.
//!
//! For a given state of the virtual storage, we define `h_i` as the position of the
//! first entry of the window `i`. We call it the head of the window `i`. Because
//! entries are at most `M + 1` words, they can overlap on the next page only by `M`
//! words. So we have `i * Q <= h_i <= i * Q + M` . Since there are no entries
//! before the first page, we have `h_0 = 0`.
//! For a given state of the virtual storage, we define h\_i as the position of the first entry of
//! the window i. We call it the head of the window i. Because entries are at most M + 1 words, they
//! can overlap on the next page only by M words. So we have i × Q ≤ h_i ≤ i × Q + M . Since there
//! are no entries before the first page, we have h\_0 = 0.
//!
//! We define `t_i` as one past the last entry of the window `i`. If there are no
//! entries in that window, we have `t_i = h_i`. We call `t_i` the tail of the
//! window `i`. We define the compaction invariant as `t_i - h_i <= V`.
//! We define t\_i as one past the last entry of the window i. If there are no entries in that
//! window, we have t\_i = h\_i. We call t\_i the tail of the window i. We define the compaction
//! invariant as t\_i - h\_i V.
//!
//! We define `|x|` as the capacity used before position `x`. We have `|x| <= x`. We
//! define the capacity invariant as `|t_i| - |h_i| <= C`.
//! We define |x| as the capacity used before position x. We have |x| x. We define the capacity
//! invariant as |t\_i| - |h\_i| C.
//!
//! Using this virtual storage, entries are appended to the tail as long as there is
//! both virtual capacity to preserve the compaction invariant and capacity to
//! preserve the capacity invariant. When virtual capacity runs out, the first page
//! of the window is compacted and the window is shifted.
//! Using this virtual storage, entries are appended to the tail as long as there is both virtual
//! capacity to preserve the compaction invariant and capacity to preserve the capacity invariant.
//! When virtual capacity runs out, the first page of the window is compacted and the window is
//! shifted.
//!
//! Entries are identified by a prefix of bits. The prefix has to contain at least
//! one bit set to zero to differentiate from the tail. Entries can be one of:
//! - Padding: A word whose first bit is set to zero. The rest is arbitrary. This
//! entry is used to mark words partially written after an interrupted operation
//! as padding such that they are ignored by future operations.
//! - Header: A word whose second bit is set to zero. It contains the following fields:
//! - A bit indicating whether the entry is deleted.
//! - A bit indicating whether the value is word-aligned and has all bits set
//! to 1 in its last word. The last word of an entry is used to detect that
//! an entry has been fully written. As such it must contain at least one
//! bit equal to zero.
//! - The key of the entry.
//! - The length in bytes of the value. The value follows the header. The
//! entry is word-aligned if the value is not.
//! - The checksum of the first and last word of the entry.
//! - Erase: A word used during compaction. It contains the page to be erased and
//! a checksum.
//! - Clear: A word used during the `Clear` operation. It contains the threshold
//! and a checksum.
//! - Marker: A word used during the `Transaction` operation. It contains the
//! number of updates following the marker and a checksum.
//! - Remove: A word used during the `Transaction` operation. It contains the key
//! of the entry to be removed and a checksum.
//! Entries are identified by a prefix of bits. The prefix has to contain at least one bit set to
//! zero to differentiate from the tail. Entries can be one of:
//! - [Padding](format::ID_PADDING): A word whose first bit is set to zero. The rest is arbitrary.
//! This entry is used to mark words partially written after an interrupted operation as padding
//! such that they are ignored by future operations.
//! - [Header](format::ID_HEADER): A word whose second bit is set to zero. It contains the
//! following fields:
//! - A [bit](format::HEADER_DELETED) indicating whether the entry is deleted.
//! - A [bit](format::HEADER_FLIPPED) indicating whether the value is word-aligned and has all
//! bits set to 1 in its last word. The last word of an entry is used to detect that an
//! entry has been fully written. As such it must contain at least one bit equal to zero.
//! - The [key](format::HEADER_KEY) of the entry.
//! - The [length](format::HEADER_LENGTH) in bytes of the value. The value follows the header.
//! The entry is word-aligned if the value is not.
//! - The [checksum](format::HEADER_CHECKSUM) of the first and last word of the entry.
//! - [Erase](format::ID_ERASE): A word used during compaction. It contains the
//! [page](format::ERASE_PAGE) to be erased and a [checksum](format::WORD_CHECKSUM).
//! - [Clear](format::ID_CLEAR): A word used during the clear operation. It contains the
//! [threshold](format::CLEAR_MIN_KEY) and a [checksum](format::WORD_CHECKSUM).
//! - [Marker](format::ID_MARKER): A word used during a transaction. It contains the [number of
//! updates](format::MARKER_COUNT) following the marker and a [checksum](format::WORD_CHECKSUM).
//! - [Remove](format::ID_REMOVE): A word used inside a transaction. It contains the
//! [key](format::REMOVE_KEY) of the entry to be removed and a
//! [checksum](format::WORD_CHECKSUM).
//!
//! Checksums are the number of bits equal to 0.
//!
@@ -204,107 +204,105 @@
//!
//! ## Compaction
//!
//! It should always be possible to fully compact the store, after what the
//! remaining capacity should be available in the current window (restoring the
//! compaction invariant). We consider all notations on the virtual storage after
//! the full compaction. We will use the `|x|` notation although we update the state
//! of the virtual storage. This is fine because compaction doesn't change the
//! status of an existing word.
//! It should always be possible to fully compact the store, after what the remaining capacity
//! should be available in the current window (restoring the compaction invariant). We consider all
//! notations on the virtual storage after the full compaction. We will use the |x| notation
//! although we update the state of the virtual storage. This is fine because compaction doesn't
//! change the status of an existing word.
//!
//! We want to show that the next `N - 1` compactions won't move the tail past the
//! last page of their window, with `I` the initial window:
//! We want to show that the next N - 1 compactions won't move the tail past the last page of their
//! window, with I the initial window:
//!
//! ```text
//! forall 1 <= i <= N - 1, t_{I + i} <= (I + i + N - 1) * Q
//! ```
//! | | | | |
//! | ----------------:| ----------:|:-:|:------------------- |
//! | ∀(1 ≤ i ≤ N - 1) | t\_{I + i} | ≤ | (I + i + N - 1) × Q |
//!
//! We assume `i` between `1` and `N - 1`.
//! We assume i between 1 and N - 1.
//!
//! One step of compaction advances the tail by how many words were used in the
//! first page of the window with the last entry possibly overlapping on the next
//! page.
//! One step of compaction advances the tail by how many words were used in the first page of the
//! window with the last entry possibly overlapping on the next page.
//!
//! ```text
//! forall j, t_{j + 1} = t_j + |h_{j + 1}| - |h_j| + 1
//! ```
//! | | | | |
//! | --:| ----------:|:-:|:------------------------------------ |
//! | ∀j | t\_{j + 1} | = | t\_j + \|h\_{j + 1}\| - \|h\_j\| + 1 |
//!
//! By induction, we have:
//!
//! ```text
//! t_{I + i} <= t_I + |h_{I + i}| - |h_I| + i
//! ```
//! | | | |
//! | ----------:|:-:|:------------------------------------ |
//! | t\_{I + i} | ≤ | t\_I + \|h\_{I + i}\| - \|h\_I\| + i |
//!
//! We have the following properties:
//!
//! ```text
//! t_I <= h_I + V
//! |h_{I + i}| - |h_I| <= h_{I + i} - h_I
//! h_{I + i} <= (I + i) * Q + M
//! ```
//! | | | |
//! | -------------------------:|:-:|:----------------- |
//! | t\_I | | h\_I + V |
//! | \|h\_{I + i}\| - \|h\_I\| | ≤ | h\_{I + i} - h\_I |
//! | h\_{I + i} | ≤ | (I + i) × Q + M |
//!
//! Replacing into our previous equality, we can conclude:
//!
//! ```text
//! t_{I + i} = t_I + |h_{I + i}| - |h_I| + i
//! <= h_I + V + (I + i) * Q + M - h_I + i
//! = (N - 1) * (Q - 1) - M + (I + i) * Q + M + i
//! = (N - 1) * (Q - 1) + (I + i) * Q + i
//! = (I + i + N - 1) * Q + i - (N - 1)
//! <= (I + i + N - 1) * Q
//! ```
//! | | | |
//! | ----------:|:-:| ------------------------------------------- |
//! | t\_{I + i} | = | t_I + \|h_{I + i}\| - \|h_I\| + i |
//! | | ≤ | h\_I + V + (I + i) * Q + M - h\_I + i |
//! | | = | (N - 1) × (Q - 1) - M + (I + i) × Q + M + i |
//! | | = | (N - 1) × (Q - 1) + (I + i) × Q + i |
//! | | = | (I + i + N - 1) × Q + i - (N - 1) |
//! | | ≤ | (I + i + N - 1) × Q |
//!
//! We also want to show that after `N - 1` compactions, the remaining capacity is
//! available without compaction.
//! We also want to show that after N - 1 compactions, the remaining capacity is available without
//! compaction.
//!
//! ```text
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= // The available words in the window.
//! C - (|t_{I + N - 1}| - |h_{I + N - 1}|) // The remaining capacity.
//! + 1 // Reserved for Clear.
//! ```
//! | | | |
//! | -:| --------------------------------------------- | --------------------------------- |
//! | | V - (t\_{I + N - 1} - h\_{I + N - 1}) | The available words in the window |
//! | ≥ | C - (\|t\_{I + N - 1}\| - \|h\_{I + N - 1}\|) | The remaining capacity |
//! | + | 1 | Reserved for clear |
//!
//! We can replace the definition of `C` and simplify:
//! We can replace the definition of C and simplify:
//!
//! ```text
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= V - N - (|t_{I + N - 1}| - |h_{I + N - 1}|) + 1
//! iff t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1
//! ```
//! | | | | |
//! | ---:| -------------------------------------:|:-:|:----------------------------------------------------- |
//! | | V - (t\_{I + N - 1} - h\_{I + N - 1}) | ≥ | V - N - (\|t\_{I + N - 1}\| - \|h\_{I + N - 1}\|) + 1 |
//! | iff | t\_{I + N - 1} - h\_{I + N - 1} | ≤ | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| + N - 1 |
//!
//! We have the following properties:
//!
//! ```text
//! t_{I + N - 1} = t_I + |h_{I + N - 1}| - |h_I| + N - 1
//! |t_{I + N - 1}| - |h_{I + N - 1}| = |t_I| - |h_I| // Compaction preserves capacity.
//! |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I
//! ```
//!
//! | | | | |
//! | ---------------------------------------:|:-:|:-------------------------------------------- |:------ |
//! | t\_{I + N - 1} | = | t\_I + \|h\_{I + N - 1}\| - \|h\_I\| + N - 1 | |
//! | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| | = | \|t\_I\| - \|h\_I\| | Compaction preserves capacity |
//! | \|h\_{I + N - 1}\| - \|t\_I\| | ≤ | h\_{I + N - 1} - t\_I | |
//!
//! From which we conclude:
//!
//! ```text
//! t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1
//! iff t_I + |h_{I + N - 1}| - |h_I| + N - 1 - h_{I + N - 1} <= |t_I| - |h_I| + N - 1
//! iff t_I + |h_{I + N - 1}| - h_{I + N - 1} <= |t_I|
//! iff |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I
//! ```
//! | | | | |
//! | ---:| -------------------------------:|:-:|:----------------------------------------------- |
//! | | t\_{I + N - 1} - h\_{I + N - 1} | ≤ | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| + N - 1 |
//! | iff | t\_I + \|h\_{I + N - 1}\| - \|h\_I\| + N - 1 - h\_{I + N - 1} | ≤ | \|t\_I\| - \|h\_I\| + N - 1 |
//! | iff | t\_I + \|h\_{I + N - 1}\| - h\_{I + N - 1} | ≤ | \|t\_I\| |
//! | iff | \|h\_{I + N - 1}\| - \|t\_I\| | ≤ | h\_{I + N - 1} - t\_I |
//!
//!
//! ## Checksum
//!
//! The main property we want is that all partially written/erased words are either
//! the initial word, the final word, or invalid.
//! The main property we want is that all partially written/erased words are either the initial
//! word, the final word, or invalid.
//!
//! We say that a bit sequence `TARGET` is reachable from a bit sequence `SOURCE` if
//! both have the same length and `SOURCE & TARGET == TARGET` where `&` is the
//! bitwise AND operation on bit sequences of that length. In other words, when
//! `SOURCE` has a bit equal to 0 then `TARGET` also has that bit equal to 0.
//! We say that a bit sequence `TARGET` is reachable from a bit sequence `SOURCE` if both have the
//! same length and `SOURCE & TARGET == TARGET` where `&` is the bitwise AND operation on bit
//! sequences of that length. In other words, when `SOURCE` has a bit equal to 0 then `TARGET` also
//! has that bit equal to 0.
//!
//! The only written entries start with `101` or `110` and are written from an
//! erased word. Marking an entry as padding or deleted is a single bit operation,
//! so the property trivially holds. For those cases, the proof relies on the fact
//! that there is exactly one bit equal to 0 in the 3 first bits. Either the 3 first
//! bits are still `111` in which case we expect the remaining bits to be equal
//! to 1. Otherwise we can use the checksum of the given type of entry because those
//! 2 types of entries are not reachable from each other. Here is a visualization of
//! the partitioning based on the first 3 bits:
//! The only written entries start with `101` or `110` and are written from an erased word. Marking
//! an entry as padding or deleted is a single bit operation, so the property trivially holds. For
//! those cases, the proof relies on the fact that there is exactly one bit equal to 0 in the 3
//! first bits. Either the 3 first bits are still `111` in which case we expect the remaining bits
//! to be equal to 1. Otherwise we can use the checksum of the given type of entry because those 2
//! types of entries are not reachable from each other. Here is a visualization of the partitioning
//! based on the first 3 bits:
//!
//! | First 3 bits | Description | How to check |
//! | ------------:| ------------------ | ---------------------------- |
@@ -314,49 +312,48 @@
//! | `100` | Deleted user entry | No check, atomically written |
//! | `0??` | Padding entry | No check, atomically written |
//!
//! To show that valid entries of a given type are not reachable from each other, we
//! show 3 lemmas:
//! To show that valid entries of a given type are not reachable from each other, we show 3 lemmas:
//!
//! 1. A bit sequence is not reachable from another if its number of bits equal to
//! 0 is smaller.
//! 1. A bit sequence is not reachable from another if its number of bits equal to 0 is smaller.
//! 2. A bit sequence is not reachable from another if they have the same number of bits equals to
//! 0 and are different.
//! 3. A bit sequence is not reachable from another if it is bigger when they are interpreted as
//! numbers in binary representation.
//!
//! 2. A bit sequence is not reachable from another if they have the same number of
//! bits equals to 0 and are different.
//!
//! 3. A bit sequence is not reachable from another if it is bigger when they are
//! interpreted as numbers in binary representation.
//!
//! From those lemmas we consider the 2 cases. If both entries have the same number
//! of bits equal to 0, they are either equal or not reachable from each other
//! because of the second lemma. If they don't have the same number of bits equal to
//! 0, then the one with less bits equal to 0 is not reachable from the other
//! because of the first lemma and the one with more bits equal to 0 is not
//! reachable from the other because of the third lemma and the definition of the
//! checksum.
//! From those lemmas we consider the 2 cases. If both entries have the same number of bits equal to
//! 0, they are either equal or not reachable from each other because of the second lemma. If they
//! don't have the same number of bits equal to 0, then the one with less bits equal to 0 is not
//! reachable from the other because of the first lemma and the one with more bits equal to 0 is not
//! reachable from the other because of the third lemma and the definition of the checksum.
//!
//! # Fuzzing
//!
//! For any sequence of operations and interruptions starting from an erased
//! storage, the store is checked against its model and some internal invariant at
//! each step.
//! For any sequence of operations and interruptions starting from an erased storage, the store is
//! checked against its model and some internal invariant at each step.
//!
//! For any sequence of operations and interruptions starting from an arbitrary
//! storage, the store is checked not to crash.
//! For any sequence of operations and interruptions starting from an arbitrary storage, the store
//! is checked not to crash.
#![cfg_attr(not(feature = "std"), no_std)]
#![feature(try_trait)]
#[macro_use]
extern crate alloc;
#[cfg(feature = "std")]
mod buffer;
#[cfg(feature = "std")]
mod driver;
mod format;
pub mod fragment;
#[cfg(feature = "std")]
mod model;
mod storage;
mod store;
#[cfg(test)]
mod test;
#[cfg(feature = "std")]
pub use self::buffer::{BufferCorruptFunction, BufferOptions, BufferStorage};
#[cfg(feature = "std")]
pub use self::driver::{

View File

@@ -12,13 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Store specification.
use crate::format::Format;
use crate::{usize_to_nat, StoreError, StoreRatio, StoreResult, StoreUpdate};
use std::collections::HashMap;
/// Models the mutable operations of a store.
///
/// The model doesn't model the storage and read-only operations. This is done by the driver.
/// The model doesn't model the storage and read-only operations. This is done by the
/// [driver](crate::StoreDriver).
#[derive(Clone, Debug)]
pub struct StoreModel {
/// Represents the content of the store.
@@ -34,7 +37,7 @@ pub enum StoreOperation {
/// Applies a transaction.
Transaction {
/// The list of updates to be applied.
updates: Vec<StoreUpdate>,
updates: Vec<StoreUpdate<Vec<u8>>>,
},
/// Deletes all keys above a threshold.
@@ -89,7 +92,7 @@ impl StoreModel {
}
/// Applies a transaction.
fn transaction(&mut self, updates: Vec<StoreUpdate>) -> StoreResult<()> {
fn transaction(&mut self, updates: Vec<StoreUpdate<Vec<u8>>>) -> StoreResult<()> {
// Fail if the transaction is invalid.
if self.format.transaction_valid(&updates).is_none() {
return Err(StoreError::InvalidArgument);

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Flash storage abstraction.
/// Represents a byte position in a storage.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct StorageIndex {
@@ -65,12 +67,14 @@ pub trait Storage {
/// The following pre-conditions must hold:
/// - The `index` must designate `value.len()` bytes in the storage.
/// - Both `index` and `value.len()` must be word-aligned.
/// - The written words should not have been written too many times since last page erasure.
/// - The written words should not have been written [too many](Self::max_word_writes) times
/// since the last page erasure.
fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()>;
/// Erases a page of the storage.
///
/// The `page` must be in the storage.
/// The `page` must be in the storage, i.e. less than [`Storage::num_pages`]. And the page
/// should not have been erased [too many](Self::max_page_erases) times.
fn erase_page(&mut self, page: usize) -> StorageResult<()>;
}

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Store implementation.
use crate::format::{
is_erased, CompactInfo, Format, Header, InitInfo, InternalEntry, Padding, ParsedWord, Position,
Word, WordState,
@@ -23,8 +25,12 @@ use crate::{usize_to_nat, Nat, Storage, StorageError, StorageIndex};
pub use crate::{
BufferStorage, StoreDriver, StoreDriverOff, StoreDriverOn, StoreInterruption, StoreInvariant,
};
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::borrow::Borrow;
use core::cmp::{max, min, Ordering};
use core::convert::TryFrom;
use core::option::NoneError;
#[cfg(feature = "std")]
use std::collections::HashSet;
@@ -51,17 +57,14 @@ pub enum StoreError {
///
/// The consequences depend on the storage failure. In particular, the operation may or may not
/// have succeeded, and the storage may have become invalid. Before doing any other operation,
/// the store should be [recovered]. The operation may then be retried if idempotent.
///
/// [recovered]: struct.Store.html#method.recover
/// the store should be [recovered](Store::recover). The operation may then be retried if
/// idempotent.
StorageError,
/// Storage is invalid.
///
/// The storage should be erased and the store [recovered]. The store would be empty and have
/// lost track of lifetime.
///
/// [recovered]: struct.Store.html#method.recover
/// The storage should be erased and the store [recovered](Store::recover). The store would be
/// empty and have lost track of lifetime.
InvalidStorage,
}
@@ -75,20 +78,26 @@ impl From<StorageError> for StoreError {
}
}
impl From<NoneError> for StoreError {
fn from(error: NoneError) -> StoreError {
match error {
NoneError => StoreError::InvalidStorage,
}
}
}
/// Result of store operations.
pub type StoreResult<T> = Result<T, StoreError>;
/// Progression ratio for store metrics.
///
/// This is used for the [capacity] and [lifetime] metrics. Those metrics are measured in words.
/// This is used for the [`Store::capacity`] and [`Store::lifetime`] metrics. Those metrics are
/// measured in words.
///
/// # Invariant
///
/// - The used value does not exceed the total: `used <= total`.
///
/// [capacity]: struct.Store.html#method.capacity
/// [lifetime]: struct.Store.html#method.lifetime
#[derive(Copy, Clone, PartialEq, Eq)]
/// - The used value does not exceed the total: `used` ≤ `total`.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct StoreRatio {
/// How much of the metric is used.
pub(crate) used: Nat,
@@ -136,11 +145,20 @@ impl StoreHandle {
self.key as usize
}
/// Returns the value length of the entry.
///
/// # Errors
///
/// Returns [`StoreError::InvalidArgument`] if the entry has been deleted or compacted.
pub fn get_length<S: Storage>(&self, store: &Store<S>) -> StoreResult<usize> {
store.get_length(self)
}
/// Returns the value of the entry.
///
/// # Errors
///
/// Returns `InvalidArgument` if the entry has been deleted or compacted.
/// Returns [`StoreError::InvalidArgument`] if the entry has been deleted or compacted.
pub fn get_value<S: Storage>(&self, store: &Store<S>) -> StoreResult<Vec<u8>> {
store.get_value(self)
}
@@ -148,15 +166,15 @@ impl StoreHandle {
/// Represents an update to the store as part of a transaction.
#[derive(Clone, Debug)]
pub enum StoreUpdate {
pub enum StoreUpdate<ByteSlice: Borrow<[u8]>> {
/// Inserts or replaces an entry in the store.
Insert { key: usize, value: Vec<u8> },
Insert { key: usize, value: ByteSlice },
/// Removes an entry from the store.
Remove { key: usize },
}
impl StoreUpdate {
impl<ByteSlice: Borrow<[u8]>> StoreUpdate<ByteSlice> {
/// Returns the key affected by the update.
pub fn key(&self) -> usize {
match *self {
@@ -168,12 +186,14 @@ impl StoreUpdate {
/// Returns the value written by the update.
pub fn value(&self) -> Option<&[u8]> {
match self {
StoreUpdate::Insert { value, .. } => Some(value),
StoreUpdate::Insert { value, .. } => Some(value.borrow()),
StoreUpdate::Remove { .. } => None,
}
}
}
pub type StoreIter<'a> = Box<dyn Iterator<Item = StoreResult<StoreHandle>> + 'a>;
/// Implements a store with a map interface over a storage.
#[derive(Clone)]
pub struct Store<S: Storage> {
@@ -182,6 +202,14 @@ pub struct Store<S: Storage> {
/// The storage configuration.
format: Format,
/// The position of the first word in the store.
head: Option<Position>,
/// The list of the position of the user entries.
///
/// The position is encoded as the word offset from the [head](Store::head).
entries: Option<Vec<u16>>,
}
impl<S: Storage> Store<S> {
@@ -193,13 +221,19 @@ impl<S: Storage> Store<S> {
///
/// # Errors
///
/// Returns `InvalidArgument` if the storage is not supported.
/// Returns [`StoreError::InvalidArgument`] if the storage is not
/// [supported](Format::is_storage_supported).
pub fn new(storage: S) -> Result<Store<S>, (StoreError, S)> {
let format = match Format::new(&storage) {
None => return Err((StoreError::InvalidArgument, storage)),
Some(x) => x,
};
let mut store = Store { storage, format };
let mut store = Store {
storage,
format,
head: None,
entries: None,
};
if let Err(error) = store.recover() {
return Err((error, store.storage));
}
@@ -207,31 +241,35 @@ impl<S: Storage> Store<S> {
}
/// Iterates over the entries.
pub fn iter<'a>(&'a self) -> StoreResult<StoreIter<'a, S>> {
StoreIter::new(self)
pub fn iter<'a>(&'a self) -> StoreResult<StoreIter<'a>> {
let head = self.head?;
Ok(Box::new(self.entries.as_ref()?.iter().map(
move |&offset| {
let pos = head + offset as Nat;
match self.parse_entry(&mut pos.clone())? {
ParsedEntry::User(Header {
key, length: len, ..
}) => Ok(StoreHandle { key, pos, len }),
_ => Err(StoreError::InvalidStorage),
}
},
)))
}
/// Returns the current capacity in words.
/// Returns the current and total capacity in words.
///
/// The capacity represents the size of what is stored.
pub fn capacity(&self) -> StoreResult<StoreRatio> {
let total = self.format.total_capacity();
let mut used = 0;
let mut pos = self.head()?;
let end = pos + self.format.virt_size();
while pos < end {
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break,
ParsedEntry::Padding => (),
ParsedEntry::User(_) => used += pos - entry_pos,
_ => return Err(StoreError::InvalidStorage),
}
for handle in self.iter()? {
let handle = handle?;
used += 1 + self.format.bytes_to_words(handle.len);
}
Ok(StoreRatio { used, total })
}
/// Returns the current lifetime in words.
/// Returns the current and total lifetime in words.
///
/// The lifetime represents the age of the storage. The limit is an over-approximation by at
/// most the maximum length of a value (the actual limit depends on the length of the prefix of
@@ -246,18 +284,22 @@ impl<S: Storage> Store<S> {
///
/// # Errors
///
/// Returns `InvalidArgument` in the following circumstances:
/// - There are too many updates.
/// Returns [`StoreError::InvalidArgument`] in the following circumstances:
/// - There are [too many](Format::max_updates) updates.
/// - The updates overlap, i.e. their keys are not disjoint.
/// - The updates are invalid, e.g. key out of bound or value too long.
pub fn transaction(&mut self, updates: &[StoreUpdate]) -> StoreResult<()> {
/// - The updates are invalid, e.g. key [out of bound](Format::max_key) or value [too
/// long](Format::max_value_len).
pub fn transaction<ByteSlice: Borrow<[u8]>>(
&mut self,
updates: &[StoreUpdate<ByteSlice>],
) -> StoreResult<()> {
let count = usize_to_nat(updates.len());
if count == 0 {
return Ok(());
}
if count == 1 {
match updates[0] {
StoreUpdate::Insert { key, ref value } => return self.insert(key, value),
StoreUpdate::Insert { key, ref value } => return self.insert(key, value.borrow()),
StoreUpdate::Remove { key } => return self.remove(key),
}
}
@@ -270,7 +312,9 @@ impl<S: Storage> Store<S> {
self.reserve(self.format.transaction_capacity(updates))?;
// Write the marker entry.
let marker = self.tail()?;
let entry = self.format.build_internal(InternalEntry::Marker { count });
let entry = self
.format
.build_internal(InternalEntry::Marker { count })?;
self.write_slice(marker, &entry)?;
self.init_page(marker, marker)?;
// Write the updates.
@@ -278,7 +322,7 @@ impl<S: Storage> Store<S> {
for update in updates {
let length = match *update {
StoreUpdate::Insert { key, ref value } => {
let entry = self.format.build_user(usize_to_nat(key), value);
let entry = self.format.build_user(usize_to_nat(key), value.borrow())?;
let word_size = self.format.word_size();
let footer = usize_to_nat(entry.len()) / word_size - 1;
self.write_slice(tail, &entry[..(footer * word_size) as usize])?;
@@ -287,7 +331,7 @@ impl<S: Storage> Store<S> {
}
StoreUpdate::Remove { key } => {
let key = usize_to_nat(key);
let remove = self.format.build_internal(InternalEntry::Remove { key });
let remove = self.format.build_internal(InternalEntry::Remove { key })?;
self.write_slice(tail, &remove)?;
0
}
@@ -307,7 +351,9 @@ impl<S: Storage> Store<S> {
if min_key > self.format.max_key() {
return Err(StoreError::InvalidArgument);
}
let clear = self.format.build_internal(InternalEntry::Clear { min_key });
let clear = self
.format
.build_internal(InternalEntry::Clear { min_key })?;
// We always have one word available. We can't use `reserve` because this is internal
// capacity, not user capacity.
while self.immediate_capacity()? < 1 {
@@ -373,7 +419,7 @@ impl<S: Storage> Store<S> {
if key > self.format.max_key() || value_len > self.format.max_value_len() {
return Err(StoreError::InvalidArgument);
}
let entry = self.format.build_user(key, value);
let entry = self.format.build_user(key, value)?;
let entry_len = usize_to_nat(entry.len());
self.reserve(entry_len / self.format.word_size())?;
let tail = self.tail()?;
@@ -381,6 +427,7 @@ impl<S: Storage> Store<S> {
let footer = entry_len / word_size - 1;
self.write_slice(tail, &entry[..(footer * word_size) as usize])?;
self.write_slice(tail + footer, &entry[(footer * word_size) as usize..])?;
self.push_entry(tail)?;
self.insert_init(tail, footer, key)
}
@@ -398,7 +445,8 @@ impl<S: Storage> Store<S> {
/// Removes an entry given a handle.
pub fn remove_handle(&mut self, handle: &StoreHandle) -> StoreResult<()> {
self.check_handle(handle)?;
self.delete_pos(handle.pos, self.format.bytes_to_words(handle.len))
self.delete_pos(handle.pos, self.format.bytes_to_words(handle.len))?;
self.remove_entry(handle.pos)
}
/// Returns the maximum length in bytes of a value.
@@ -406,6 +454,17 @@ impl<S: Storage> Store<S> {
self.format.max_value_len() as usize
}
/// Returns the length of the value of an entry given its handle.
fn get_length(&self, handle: &StoreHandle) -> StoreResult<usize> {
self.check_handle(handle)?;
let mut pos = handle.pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::User(header) => Ok(header.length as usize),
ParsedEntry::Padding => Err(StoreError::InvalidArgument),
_ => Err(StoreError::InvalidStorage),
}
}
/// Returns the value of an entry given its handle.
fn get_value(&self, handle: &StoreHandle) -> StoreResult<Vec<u8>> {
self.check_handle(handle)?;
@@ -437,7 +496,7 @@ impl<S: Storage> Store<S> {
let init_info = self.format.build_init(InitInfo {
cycle: 0,
prefix: 0,
});
})?;
self.storage_write_slice(index, &init_info)
}
@@ -460,7 +519,9 @@ impl<S: Storage> Store<S> {
/// Recovers a possible compaction interrupted while copying the entries.
fn recover_compaction(&mut self) -> StoreResult<()> {
let head_page = self.head()?.page(&self.format);
let head = self.get_extremum_page_head(Ordering::Less)?;
self.head = Some(head);
let head_page = head.page(&self.format);
match self.parse_compact(head_page)? {
WordState::Erased => Ok(()),
WordState::Partial => self.compact(),
@@ -470,14 +531,15 @@ impl<S: Storage> Store<S> {
/// Recover a possible interrupted operation which is not a compaction.
fn recover_operation(&mut self) -> StoreResult<()> {
let mut pos = self.head()?;
self.entries = Some(Vec::new());
let mut pos = self.head?;
let mut prev_pos = pos;
let end = pos + self.format.virt_size();
while pos < end {
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break,
ParsedEntry::User(_) => (),
ParsedEntry::User(_) => self.push_entry(entry_pos)?,
ParsedEntry::Padding => {
self.wipe_span(entry_pos + 1, pos - entry_pos - 1)?;
}
@@ -610,7 +672,7 @@ impl<S: Storage> Store<S> {
///
/// In particular, the handle has not been compacted.
fn check_handle(&self, handle: &StoreHandle) -> StoreResult<()> {
if handle.pos < self.head()? {
if handle.pos < self.head? {
Err(StoreError::InvalidArgument)
} else {
Ok(())
@@ -640,20 +702,22 @@ impl<S: Storage> Store<S> {
/// Compacts one page.
fn compact(&mut self) -> StoreResult<()> {
let head = self.head()?;
let head = self.head?;
if head.cycle(&self.format) >= self.format.max_page_erases() {
return Err(StoreError::NoLifetime);
}
let tail = max(self.tail()?, head.next_page(&self.format));
let index = self.format.index_compact(head.page(&self.format));
let compact_info = self.format.build_compact(CompactInfo { tail: tail - head });
let compact_info = self
.format
.build_compact(CompactInfo { tail: tail - head })?;
self.storage_write_slice(index, &compact_info)?;
self.compact_copy()
}
/// Continues a compaction after its compact page info has been written.
fn compact_copy(&mut self) -> StoreResult<()> {
let mut head = self.head()?;
let mut head = self.head?;
let page = head.page(&self.format);
let end = head.next_page(&self.format);
let mut tail = match self.parse_compact(page)? {
@@ -667,8 +731,12 @@ impl<S: Storage> Store<S> {
let pos = head;
match self.parse_entry(&mut head)? {
ParsedEntry::Tail => break,
// This can happen if we copy to the next page. We actually reached the tail but we
// read what we just copied.
ParsedEntry::Partial if head > end => break,
ParsedEntry::User(_) => (),
_ => continue,
ParsedEntry::Padding => continue,
_ => return Err(StoreError::InvalidStorage),
};
let length = head - pos;
// We have to copy the slice for 2 reasons:
@@ -676,11 +744,13 @@ impl<S: Storage> Store<S> {
// 2. We can't pass a flash slice to the kernel. This should get fixed with
// https://github.com/tock/tock/issues/1274.
let entry = self.read_slice(pos, length * self.format.word_size());
self.remove_entry(pos)?;
self.write_slice(tail, &entry)?;
self.push_entry(tail)?;
self.init_page(tail, tail + (length - 1))?;
tail += length;
}
let erase = self.format.build_internal(InternalEntry::Erase { page });
let erase = self.format.build_internal(InternalEntry::Erase { page })?;
self.write_slice(tail, &erase)?;
self.init_page(tail, tail)?;
self.compact_erase(tail)
@@ -688,14 +758,31 @@ impl<S: Storage> Store<S> {
/// Continues a compaction after its erase entry has been written.
fn compact_erase(&mut self, erase: Position) -> StoreResult<()> {
let page = match self.parse_entry(&mut erase.clone())? {
// Read the page to erase from the erase entry.
let mut page = match self.parse_entry(&mut erase.clone())? {
ParsedEntry::Internal(InternalEntry::Erase { page }) => page,
_ => return Err(StoreError::InvalidStorage),
};
// Erase the page.
self.storage_erase_page(page)?;
let head = self.head()?;
// Update the head.
page = (page + 1) % self.format.num_pages();
let init = match self.parse_init(page)? {
WordState::Valid(x) => x,
_ => return Err(StoreError::InvalidStorage),
};
let head = self.format.page_head(init, page);
if let Some(entries) = &mut self.entries {
let head_offset = u16::try_from(head - self.head?).ok()?;
for entry in entries {
*entry = entry.checked_sub(head_offset)?;
}
}
self.head = Some(head);
// Wipe the overlapping entry from the erased page.
let pos = head.page_begin(&self.format);
self.wipe_span(pos, head - pos)?;
// Mark the erase entry as done.
self.set_padding(erase)?;
Ok(())
}
@@ -704,13 +791,13 @@ impl<S: Storage> Store<S> {
fn transaction_apply(&mut self, sorted_keys: &[Nat], marker: Position) -> StoreResult<()> {
self.delete_keys(&sorted_keys, marker)?;
self.set_padding(marker)?;
let end = self.head()? + self.format.virt_size();
let end = self.head? + self.format.virt_size();
let mut pos = marker + 1;
while pos < end {
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break,
ParsedEntry::User(_) => (),
ParsedEntry::User(_) => self.push_entry(entry_pos)?,
ParsedEntry::Internal(InternalEntry::Remove { .. }) => {
self.set_padding(entry_pos)?
}
@@ -727,37 +814,38 @@ impl<S: Storage> Store<S> {
ParsedEntry::Internal(InternalEntry::Clear { min_key }) => min_key,
_ => return Err(StoreError::InvalidStorage),
};
let mut pos = self.head()?;
let end = pos + self.format.virt_size();
while pos < end {
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Internal(InternalEntry::Clear { .. }) if entry_pos == clear => break,
ParsedEntry::User(header) if header.key >= min_key => {
self.delete_pos(entry_pos, pos - entry_pos - 1)?;
}
ParsedEntry::Padding | ParsedEntry::User(_) => (),
_ => return Err(StoreError::InvalidStorage),
}
}
self.delete_if(clear, |key| key >= min_key)?;
self.set_padding(clear)?;
Ok(())
}
/// Deletes a set of entries up to a certain position.
fn delete_keys(&mut self, sorted_keys: &[Nat], end: Position) -> StoreResult<()> {
let mut pos = self.head()?;
while pos < end {
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break,
ParsedEntry::User(header) if sorted_keys.binary_search(&header.key).is_ok() => {
self.delete_pos(entry_pos, pos - entry_pos - 1)?;
}
ParsedEntry::Padding | ParsedEntry::User(_) => (),
self.delete_if(end, |key| sorted_keys.binary_search(&key).is_ok())
}
/// Deletes entries matching a predicate up to a certain position.
fn delete_if(&mut self, end: Position, delete: impl Fn(Nat) -> bool) -> StoreResult<()> {
let head = self.head?;
let mut entries = self.entries.take()?;
let mut i = 0;
while i < entries.len() {
let pos = head + entries[i] as Nat;
if pos >= end {
break;
}
let header = match self.parse_entry(&mut pos.clone())? {
ParsedEntry::User(x) => x,
_ => return Err(StoreError::InvalidStorage),
};
if delete(header.key) {
self.delete_pos(pos, self.format.bytes_to_words(header.length))?;
entries.swap_remove(i);
} else {
i += 1;
}
}
self.entries = Some(entries);
Ok(())
}
@@ -792,7 +880,7 @@ impl<S: Storage> Store<S> {
let init_info = self.format.build_init(InitInfo {
cycle: new_first.cycle(&self.format),
prefix: new_first.word(&self.format),
});
})?;
self.storage_write_slice(index, &init_info)?;
Ok(())
}
@@ -800,7 +888,7 @@ impl<S: Storage> Store<S> {
/// Sets the padding bit of a user header.
fn set_padding(&mut self, pos: Position) -> StoreResult<()> {
let mut word = Word::from_slice(self.read_word(pos));
self.format.set_padding(&mut word);
self.format.set_padding(&mut word)?;
self.write_slice(pos, &word.as_slice())?;
Ok(())
}
@@ -836,19 +924,20 @@ impl<S: Storage> Store<S> {
}
}
// There is always at least one initialized page.
best.ok_or(StoreError::InvalidStorage)
Ok(best?)
}
/// Returns the number of words that can be written without compaction.
fn immediate_capacity(&self) -> StoreResult<Nat> {
let tail = self.tail()?;
let end = self.head()? + self.format.virt_size();
let end = self.head? + self.format.virt_size();
Ok(end.get().saturating_sub(tail.get()))
}
/// Returns the position of the first word in the store.
#[cfg(feature = "std")]
pub(crate) fn head(&self) -> StoreResult<Position> {
self.get_extremum_page_head(Ordering::Less)
Ok(self.head?)
}
/// Returns one past the position of the last word in the store.
@@ -863,6 +952,30 @@ impl<S: Storage> Store<S> {
Ok(pos)
}
fn push_entry(&mut self, pos: Position) -> StoreResult<()> {
let entries = match &mut self.entries {
None => return Ok(()),
Some(x) => x,
};
let head = self.head?;
let offset = u16::try_from(pos - head).ok()?;
debug_assert!(!entries.contains(&offset));
entries.push(offset);
Ok(())
}
fn remove_entry(&mut self, pos: Position) -> StoreResult<()> {
let entries = match &mut self.entries {
None => return Ok(()),
Some(x) => x,
};
let head = self.head?;
let offset = u16::try_from(pos - head).ok()?;
let i = entries.iter().position(|x| *x == offset)?;
entries.swap_remove(i);
Ok(())
}
/// Parses the entry at a given position.
///
/// The position is updated to point to the next entry.
@@ -1061,7 +1174,7 @@ impl Store<BufferStorage> {
/// If the value has been partially compacted, only return the non-compacted part. Returns an
/// empty value if it has been fully compacted.
pub fn inspect_value(&self, handle: &StoreHandle) -> Vec<u8> {
let head = self.head().unwrap();
let head = self.head.unwrap();
let length = self.format.bytes_to_words(handle.len);
if head <= handle.pos {
// The value has not been compacted.
@@ -1087,20 +1200,21 @@ impl Store<BufferStorage> {
store
.iter()
.unwrap()
.map(|x| x.unwrap())
.filter(|x| delete_key(x.key as usize))
.collect::<Vec<_>>()
.filter(|x| x.is_err() || delete_key(x.as_ref().unwrap().key as usize))
.collect::<Result<Vec<_>, _>>()
};
match *operation {
StoreOperation::Transaction { ref updates } => {
let keys: HashSet<usize> = updates.iter().map(|x| x.key()).collect();
let deleted = deleted(self, &|key| keys.contains(&key));
(deleted, self.transaction(updates))
}
StoreOperation::Clear { min_key } => {
let deleted = deleted(self, &|key| key >= min_key);
(deleted, self.clear(min_key))
match deleted(self, &|key| keys.contains(&key)) {
Ok(deleted) => (deleted, self.transaction(updates)),
Err(error) => (Vec::new(), Err(error)),
}
}
StoreOperation::Clear { min_key } => match deleted(self, &|key| key >= min_key) {
Ok(deleted) => (deleted, self.clear(min_key)),
Err(error) => (Vec::new(), Err(error)),
},
StoreOperation::Prepare { length } => (Vec::new(), self.prepare(length)),
}
}
@@ -1110,10 +1224,12 @@ impl Store<BufferStorage> {
let format = Format::new(storage).unwrap();
// Write the init info of the first page.
let mut index = format.index_init(0);
let init_info = format.build_init(InitInfo {
cycle: usize_to_nat(cycle),
prefix: 0,
});
let init_info = format
.build_init(InitInfo {
cycle: usize_to_nat(cycle),
prefix: 0,
})
.unwrap();
storage.write_slice(index, &init_info).unwrap();
// Pad the first word of the page. This makes the store looks used, otherwise we may confuse
// it with a partially initialized store.
@@ -1165,61 +1281,6 @@ enum ParsedEntry {
Tail,
}
/// Iterates over the entries of a store.
pub struct StoreIter<'a, S: Storage> {
/// The store being iterated.
store: &'a Store<S>,
/// The position of the next entry.
pos: Position,
/// Iteration stops when reaching this position.
end: Position,
}
impl<'a, S: Storage> StoreIter<'a, S> {
/// Creates an iterator over the entries of a store.
fn new(store: &'a Store<S>) -> StoreResult<StoreIter<'a, S>> {
let pos = store.head()?;
let end = pos + store.format.virt_size();
Ok(StoreIter { store, pos, end })
}
}
impl<'a, S: Storage> StoreIter<'a, S> {
/// Returns the next entry and advances the iterator.
fn transposed_next(&mut self) -> StoreResult<Option<StoreHandle>> {
if self.pos >= self.end {
return Ok(None);
}
while self.pos < self.end {
let entry_pos = self.pos;
match self.store.parse_entry(&mut self.pos)? {
ParsedEntry::Tail => break,
ParsedEntry::Padding => (),
ParsedEntry::User(header) => {
return Ok(Some(StoreHandle {
key: header.key,
pos: entry_pos,
len: header.length,
}))
}
_ => return Err(StoreError::InvalidStorage),
}
}
self.pos = self.end;
Ok(None)
}
}
impl<'a, S: Storage> Iterator for StoreIter<'a, S> {
type Item = StoreResult<StoreHandle>;
fn next(&mut self) -> Option<StoreResult<StoreHandle>> {
self.transposed_next().transpose()
}
}
/// Returns whether 2 slices are different.
///
/// Returns an error if `target` has a bit set to one for which `source` is set to zero.
@@ -1239,71 +1300,15 @@ fn is_write_needed(source: &[u8], target: &[u8]) -> StoreResult<bool> {
#[cfg(test)]
mod tests {
use super::*;
use crate::BufferOptions;
#[derive(Clone)]
struct Config {
word_size: usize,
page_size: usize,
num_pages: usize,
max_word_writes: usize,
max_page_erases: usize,
}
impl Config {
fn new_driver(&self) -> StoreDriverOff {
let options = BufferOptions {
word_size: self.word_size,
page_size: self.page_size,
max_word_writes: self.max_word_writes,
max_page_erases: self.max_page_erases,
strict_write: true,
};
StoreDriverOff::new(options, self.num_pages)
}
}
const MINIMAL: Config = Config {
word_size: 4,
page_size: 64,
num_pages: 5,
max_word_writes: 2,
max_page_erases: 9,
};
const NORDIC: Config = Config {
word_size: 4,
page_size: 0x1000,
num_pages: 20,
max_word_writes: 2,
max_page_erases: 10000,
};
const TITAN: Config = Config {
word_size: 4,
page_size: 0x800,
num_pages: 10,
max_word_writes: 2,
max_page_erases: 10000,
};
use crate::test::MINIMAL;
#[test]
fn nordic_capacity() {
let driver = NORDIC.new_driver().power_on().unwrap();
assert_eq!(driver.model().capacity().total, 19123);
}
#[test]
fn titan_capacity() {
let driver = TITAN.new_driver().power_on().unwrap();
assert_eq!(driver.model().capacity().total, 4315);
}
#[test]
fn minimal_virt_page_size() {
// Make sure a virtual page has 14 words. We use this property in the other tests below to
// know whether entries are spanning, starting, and ending pages.
assert_eq!(MINIMAL.new_driver().model().format().virt_page_size(), 14);
fn is_write_needed_ok() {
assert_eq!(is_write_needed(&[], &[]), Ok(false));
assert_eq!(is_write_needed(&[0], &[0]), Ok(false));
assert_eq!(is_write_needed(&[0], &[1]), Err(StoreError::InvalidStorage));
assert_eq!(is_write_needed(&[1], &[0]), Ok(true));
assert_eq!(is_write_needed(&[1], &[1]), Ok(false));
}
#[test]
@@ -1438,4 +1443,22 @@ mod tests {
driver = driver.power_off().power_on().unwrap();
driver.check().unwrap();
}
#[test]
fn entries_ok() {
let mut driver = MINIMAL.new_driver().power_on().unwrap();
// The store is initially empty.
assert!(driver.store().entries.as_ref().unwrap().is_empty());
// Inserted elements are added.
const LEN: usize = 6;
driver.insert(0, &[0x38; (LEN - 1) * 4]).unwrap();
driver.insert(1, &[0x5c; 4]).unwrap();
assert_eq!(driver.store().entries, Some(vec![0, LEN as u16]));
// Deleted elements are removed.
driver.remove(0).unwrap();
assert_eq!(driver.store().entries, Some(vec![LEN as u16]));
}
}

View File

@@ -0,0 +1,84 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{BufferOptions, BufferStorage, Store, StoreDriverOff};
#[derive(Clone)]
pub struct Config {
word_size: usize,
page_size: usize,
num_pages: usize,
max_word_writes: usize,
max_page_erases: usize,
}
impl Config {
pub fn new_driver(&self) -> StoreDriverOff {
let options = BufferOptions {
word_size: self.word_size,
page_size: self.page_size,
max_word_writes: self.max_word_writes,
max_page_erases: self.max_page_erases,
strict_mode: true,
};
StoreDriverOff::new(options, self.num_pages)
}
pub fn new_store(&self) -> Store<BufferStorage> {
self.new_driver().power_on().unwrap().extract_store()
}
}
pub const MINIMAL: Config = Config {
word_size: 4,
page_size: 64,
num_pages: 5,
max_word_writes: 2,
max_page_erases: 9,
};
const NORDIC: Config = Config {
word_size: 4,
page_size: 0x1000,
num_pages: 20,
max_word_writes: 2,
max_page_erases: 10000,
};
const TITAN: Config = Config {
word_size: 4,
page_size: 0x800,
num_pages: 10,
max_word_writes: 2,
max_page_erases: 10000,
};
#[test]
fn nordic_capacity() {
let driver = NORDIC.new_driver().power_on().unwrap();
assert_eq!(driver.model().capacity().total, 19123);
}
#[test]
fn titan_capacity() {
let driver = TITAN.new_driver().power_on().unwrap();
assert_eq!(driver.model().capacity().total, 4315);
}
#[test]
fn minimal_virt_page_size() {
// Make sure a virtual page has 14 words. We use this property in the other tests below to
// know whether entries are spanning, starting, and ending pages.
assert_eq!(MINIMAL.new_driver().model().format().virt_page_size(), 14);
}