Merge remote-tracking branch 'origin/develop' into bugfix

This commit is contained in:
Jean-Michel Picod
2021-04-13 15:04:14 +02:00
58 changed files with 9744 additions and 3432 deletions

View File

@@ -42,12 +42,6 @@ jobs:
command: check command: check
args: --target thumbv7em-none-eabi --release --features with_ctap1 args: --target thumbv7em-none-eabi --release --features with_ctap1
- name: Check OpenSK with_ctap2_1
uses: actions-rs/cargo@v1
with:
command: check
args: --target thumbv7em-none-eabi --release --features with_ctap2_1
- name: Check OpenSK debug_ctap - name: Check OpenSK debug_ctap
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
@@ -78,17 +72,11 @@ jobs:
command: check command: check
args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1 args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1
- name: Check OpenSK debug_ctap,with_ctap2_1 - name: Check OpenSK debug_ctap,with_ctap1,panic_console,debug_allocations,verbose
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: check command: check
args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap2_1 args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1,,panic_console,debug_allocations,verbose
- name: Check OpenSK debug_ctap,with_ctap1,with_ctap2_1,panic_console,debug_allocations,verbose
uses: actions-rs/cargo@v1
with:
command: check
args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1,with_ctap2_1,panic_console,debug_allocations,verbose
- name: Check examples - name: Check examples
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1

View File

@@ -33,10 +33,10 @@ jobs:
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --manifest-path libraries/crypto/Cargo.toml --release --features std,derive_debug args: --manifest-path libraries/crypto/Cargo.toml --release --features std
- name: Unit testing of crypto library (debug mode) - name: Unit testing of crypto library (debug mode)
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --manifest-path libraries/crypto/Cargo.toml --features std,derive_debug args: --manifest-path libraries/crypto/Cargo.toml --features std

View File

@@ -51,27 +51,3 @@ jobs:
command: test command: test
args: --features std,with_ctap1 args: --features std,with_ctap1
- name: Unit testing of CTAP2 (release mode + CTAP2.1)
uses: actions-rs/cargo@v1
with:
command: test
args: --release --features std,with_ctap2_1
- name: Unit testing of CTAP2 (debug mode + CTAP2.1)
uses: actions-rs/cargo@v1
with:
command: test
args: --features std,with_ctap2_1
- name: Unit testing of CTAP2 (release mode + CTAP1 + CTAP2.1)
uses: actions-rs/cargo@v1
with:
command: test
args: --release --features std,with_ctap1,with_ctap2_1
- name: Unit testing of CTAP2 (debug mode + CTAP1 + CTAP2.1)
uses: actions-rs/cargo@v1
with:
command: test
args: --features std,with_ctap1,with_ctap2_1

View File

@@ -13,6 +13,11 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- name: Unit testing of Persistent store library (release mode) - name: Unit testing of Persistent store library (release mode)
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:

View File

@@ -22,12 +22,11 @@ subtle = { version = "2.2", default-features = false, features = ["nightly"] }
[features] [features]
debug_allocations = ["lang_items/debug_allocations"] debug_allocations = ["lang_items/debug_allocations"]
debug_ctap = ["crypto/derive_debug", "libtock_drivers/debug_ctap"] debug_ctap = ["libtock_drivers/debug_ctap"]
panic_console = ["lang_items/panic_console"] panic_console = ["lang_items/panic_console"]
std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std", "persistent_store/std"] std = ["cbor/std", "crypto/std", "lang_items/std", "persistent_store/std"]
verbose = ["debug_ctap", "libtock_drivers/verbose_usb"] verbose = ["debug_ctap", "libtock_drivers/verbose_usb"]
with_ctap1 = ["crypto/with_ctap1"] with_ctap1 = ["crypto/with_ctap1"]
with_ctap2_1 = []
with_nfc = ["libtock_drivers/with_nfc"] with_nfc = ["libtock_drivers/with_nfc"]
[dev-dependencies] [dev-dependencies]

View File

@@ -94,33 +94,19 @@ If you build your own security key, depending on the hardware you use, there are
a few things you can personalize: a few things you can personalize:
1. If you have multiple buttons, choose the buttons responsible for user 1. If you have multiple buttons, choose the buttons responsible for user
presence in `main.rs`. presence in `src/main.rs`.
2. Decide whether you want to use batch attestation. There is a boolean flag in 1. If you have colored LEDs, like different blinking patterns and want to play
`ctap/mod.rs`. It is mandatory for U2F, and you can create your own around with the code in `src/main.rs` more, take a look at e.g. `wink_leds`.
self-signed certificate. The flag is used for FIDO2 and has some privacy 1. You find more options and documentation in `src/ctap/customization.rs`,
implications. Please check including:
[WebAuthn](https://www.w3.org/TR/webauthn/#attestation) for more - The default level for the credProtect extension.
information. - The default minimum PIN length, and what relying parties can set it.
3. Decide whether you want to use signature counters. Currently, only global - Whether you want to enforce alwaysUv.
signature counters are implemented, as they are the default option for U2F. - Settings for enterprise attestation.
The flag in `ctap/mod.rs` only turns them off for FIDO2. The most privacy - The maximum PIN retries.
preserving solution is individual or no signature counters. Again, please - Whether you want to use batch attestation.
check [WebAuthn](https://www.w3.org/TR/webauthn/#signature-counter) for - Whether you want to use signature counters.
documentation. - Various constants to adapt to different hardware.
4. Depending on your available flash storage, choose an appropriate maximum
number of supported residential keys and number of pages in
`ctap/storage.rs`.
5. Change the default level for the credProtect extension in `ctap/mod.rs`.
When changing the default, resident credentials become undiscoverable without
user verification. This helps privacy, but can make usage less comfortable
for credentials that need less protection.
6. Increase the default minimum length for PINs in `ctap/storage.rs`.
The current minimum is 4. Values from 4 to 63 are allowed. Requiring longer
PINs can help establish trust between users and relying parties. It makes
user verification harder to break, but less convenient.
NIST recommends at least 6-digit PINs in section 5.1.9.1:
https://pages.nist.gov/800-63-3/sp800-63b.html
You can add relying parties to the list of readers of the minimum PIN length.
### 3D printed enclosure ### 3D printed enclosure

View File

@@ -352,6 +352,7 @@ class OpenSKInstaller:
def build_opensk(self): def build_opensk(self):
info("Building OpenSK application") info("Building OpenSK application")
self._check_invariants()
self._build_app_or_example(is_example=False) self._build_app_or_example(is_example=False)
def _build_app_or_example(self, is_example): def _build_app_or_example(self, is_example):
@@ -390,6 +391,11 @@ class OpenSKInstaller:
# Create a TAB file # Create a TAB file
self.create_tab_file({props.arch: app_path}) self.create_tab_file({props.arch: app_path})
def _check_invariants(self):
print("Testing invariants in customization.rs...")
self.checked_command_output(
["cargo", "test", "--features=std", "--lib", "customization"])
def generate_crypto_materials(self, force_regenerate): def generate_crypto_materials(self, force_regenerate):
has_error = subprocess.call([ has_error = subprocess.call([
os.path.join("tools", "gen_key_materials.sh"), os.path.join("tools", "gen_key_materials.sh"),
@@ -881,14 +887,6 @@ if __name__ == "__main__":
help=("Compiles the OpenSK application without backward compatible " help=("Compiles the OpenSK application without backward compatible "
"support for U2F/CTAP1 protocol."), "support for U2F/CTAP1 protocol."),
) )
main_parser.add_argument(
"--ctap2.1",
action="append_const",
const="with_ctap2_1",
dest="features",
help=("Compiles the OpenSK application with backward compatible "
"support for CTAP2.1 protocol."),
)
main_parser.add_argument( main_parser.add_argument(
"--nfc", "--nfc",
action="append_const", action="append_const",
@@ -947,7 +945,16 @@ if __name__ == "__main__":
dest="application", dest="application",
action="store_const", action="store_const",
const="store_latency", const="store_latency",
help=("Compiles and installs the store_latency example.")) help=("Compiles and installs the store_latency example which prints "
"latency statistics of the persistent store library."))
apps_group.add_argument(
"--erase_storage",
dest="application",
action="store_const",
const="erase_storage",
help=("Compiles and installs the erase_storage example which erases "
"the storage. During operation the dongle red light is on. Once "
"the operation is completed the dongle green light is on."))
apps_group.add_argument( apps_group.add_argument(
"--panic_test", "--panic_test",
dest="application", dest="application",

53
examples/erase_storage.rs Normal file
View File

@@ -0,0 +1,53 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
extern crate lang_items;
use core::fmt::Write;
use ctap2::embedded_flash::new_storage;
use libtock_drivers::console::Console;
use libtock_drivers::led;
use libtock_drivers::result::FlexUnwrap;
use persistent_store::{Storage, StorageIndex};
fn is_page_erased(storage: &dyn Storage, page: usize) -> bool {
let index = StorageIndex { page, byte: 0 };
let length = storage.page_size();
storage
.read_slice(index, length)
.unwrap()
.iter()
.all(|&x| x == 0xff)
}
fn main() {
led::get(1).flex_unwrap().on().flex_unwrap(); // red on dongle
const NUM_PAGES: usize = 20; // should be at least ctap::storage::NUM_PAGES
let mut storage = new_storage(NUM_PAGES);
writeln!(Console::new(), "Erase {} pages of storage:", NUM_PAGES).unwrap();
for page in 0..NUM_PAGES {
write!(Console::new(), "- Page {} ", page).unwrap();
if is_page_erased(&storage, page) {
writeln!(Console::new(), "skipped (was already erased).").unwrap();
} else {
storage.erase_page(page).unwrap();
writeln!(Console::new(), "erased.").unwrap();
}
}
writeln!(Console::new(), "Done.").unwrap();
led::get(1).flex_unwrap().off().flex_unwrap();
led::get(0).flex_unwrap().on().flex_unwrap(); // green on dongle
}

View File

@@ -124,15 +124,15 @@ fn main() {
compute_latency(&timer, 20, 1, 50); compute_latency(&timer, 20, 1, 50);
// Those overwritten 1 word entries simulate counters. // Those overwritten 1 word entries simulate counters.
compute_latency(&timer, 3, 0, 1); compute_latency(&timer, 3, 0, 1);
compute_latency(&timer, 6, 0, 1); compute_latency(&timer, 20, 0, 1);
writeln!(Console::new(), "\nDone.").unwrap(); writeln!(Console::new(), "\nDone.").unwrap();
// Results on nrf52840dk: // Results on nrf52840dk:
// //
// | Pages | Overwrite | Length | Boot | Compaction | Insert | Remove | // | Pages | Overwrite | Length | Boot | Compaction | Insert | Remove |
// | ----- | --------- | --------- | ------- | ---------- | ------ | ------- | // | ----- | --------- | --------- | ------- | ---------- | ------ | ------ |
// | 3 | no | 50 words | 2.0 ms | 132.5 ms | 4.8 ms | 1.2 ms | // | 3 | no | 50 words | 2.0 ms | 132.8 ms | 4.3 ms | 1.2 ms |
// | 20 | no | 50 words | 7.4 ms | 135.5 ms | 10.2 ms | 3.9 ms | // | 20 | no | 50 words | 7.8 ms | 135.7 ms | 9.9 ms | 4.0 ms |
// | 3 | yes | 1 word | 21.9 ms | 94.5 ms | 12.4 ms | 5.9 ms | // | 3 | yes | 1 word | 19.6 ms | 90.8 ms | 4.7 ms | 2.3 ms |
// | 6 | yes | 1 word | 55.2 ms | 100.8 ms | 24.8 ms | 12.1 ms | // | 20 | yes | 1 word | 183.3 ms | 90.9 ms | 4.8 ms | 2.3 ms |
} }

View File

@@ -13,14 +13,14 @@
// limitations under the License. // limitations under the License.
use crate::values::{KeyType, Value}; use crate::values::{KeyType, Value};
use alloc::collections::btree_map; use alloc::vec;
use core::cmp::Ordering; use core::cmp::Ordering;
use core::iter::Peekable; use core::iter::Peekable;
/// This macro generates code to extract multiple values from a `BTreeMap<KeyType, Value>` at once /// This macro generates code to extract multiple values from a `Vec<(KeyType, Value)>` at once
/// in an optimized manner, consuming the input map. /// in an optimized manner, consuming the input vector.
/// ///
/// It takes as input a `BTreeMap` as well as a list of identifiers and keys, and generates code /// It takes as input a `Vec` as well as a list of identifiers and keys, and generates code
/// that assigns the corresponding values to new variables using the given identifiers. Each of /// that assigns the corresponding values to new variables using the given identifiers. Each of
/// these variables has type `Option<Value>`, to account for the case where keys aren't found. /// these variables has type `Option<Value>`, to account for the case where keys aren't found.
/// ///
@@ -32,16 +32,14 @@ use core::iter::Peekable;
/// the keys are indeed sorted. This macro is therefore **not suitable for dynamic keys** that can /// the keys are indeed sorted. This macro is therefore **not suitable for dynamic keys** that can
/// change at runtime. /// change at runtime.
/// ///
/// Semantically, provided that the keys are sorted as specified above, the following two snippets /// Example usage:
/// of code are equivalent, but the `destructure_cbor_map!` version is more optimized, as it doesn't
/// re-balance the `BTreeMap` for each key, contrary to the `BTreeMap::remove` operations.
/// ///
/// ```rust /// ```rust
/// # extern crate alloc; /// # extern crate alloc;
/// # use cbor::destructure_cbor_map; /// # use cbor::destructure_cbor_map;
/// # /// #
/// # fn main() { /// # fn main() {
/// # let map = alloc::collections::BTreeMap::new(); /// # let map = alloc::vec::Vec::new();
/// destructure_cbor_map! { /// destructure_cbor_map! {
/// let { /// let {
/// 1 => x, /// 1 => x,
@@ -50,17 +48,6 @@ use core::iter::Peekable;
/// } /// }
/// # } /// # }
/// ``` /// ```
///
/// ```rust
/// # extern crate alloc;
/// #
/// # fn main() {
/// # let mut map = alloc::collections::BTreeMap::<cbor::KeyType, _>::new();
/// use cbor::values::IntoCborKey;
/// let x: Option<cbor::Value> = map.remove(&1.into_cbor_key());
/// let y: Option<cbor::Value> = map.remove(&"key".into_cbor_key());
/// # }
/// ```
#[macro_export] #[macro_export]
macro_rules! destructure_cbor_map { macro_rules! destructure_cbor_map {
( let { $( $key:expr => $variable:ident, )+ } = $map:expr; ) => { ( let { $( $key:expr => $variable:ident, )+ } = $map:expr; ) => {
@@ -100,7 +87,7 @@ macro_rules! destructure_cbor_map {
/// would be inlined for every use case. As of June 2020, this saves ~40KB of binary size for the /// would be inlined for every use case. As of June 2020, this saves ~40KB of binary size for the
/// CTAP2 application of OpenSK. /// CTAP2 application of OpenSK.
pub fn destructure_cbor_map_peek_value( pub fn destructure_cbor_map_peek_value(
it: &mut Peekable<btree_map::IntoIter<KeyType, Value>>, it: &mut Peekable<vec::IntoIter<(KeyType, Value)>>,
needle: KeyType, needle: KeyType,
) -> Option<Value> { ) -> Option<Value> {
loop { loop {
@@ -145,6 +132,23 @@ macro_rules! assert_sorted_keys {
}; };
} }
/// Creates a CBOR Value of type Map with the specified key-value pairs.
///
/// Keys and values are expressions and converted into CBOR Keys and Values.
/// The syntax for these pairs is `key_expression => value_expression,`.
/// Duplicate keys will lead to invalid CBOR, i.e. writing these values fails.
/// Keys do not have to be sorted.
///
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::cbor_map;
/// let map = cbor_map! {
/// 0x01 => false,
/// "02" => -3,
/// };
/// ```
#[macro_export] #[macro_export]
macro_rules! cbor_map { macro_rules! cbor_map {
// trailing comma case // trailing comma case
@@ -157,15 +161,35 @@ macro_rules! cbor_map {
// The import is unused if the list is empty. // The import is unused if the list is empty.
#[allow(unused_imports)] #[allow(unused_imports)]
use $crate::values::{IntoCborKey, IntoCborValue}; use $crate::values::{IntoCborKey, IntoCborValue};
let mut _map = ::alloc::collections::BTreeMap::new(); let mut _map = ::alloc::vec::Vec::new();
$( $(
_map.insert($key.into_cbor_key(), $value.into_cbor_value()); _map.push(($key.into_cbor_key(), $value.into_cbor_value()));
)* )*
$crate::values::Value::Map(_map) $crate::values::Value::Map(_map)
} }
}; };
} }
/// Creates a CBOR Value of type Map with key-value pairs where values can be Options.
///
/// Keys and values are expressions and converted into CBOR Keys and Value Options.
/// The map entry is included iff the Value is not an Option or Option is Some.
/// The syntax for these pairs is `key_expression => value_expression,`.
/// Duplicate keys will lead to invalid CBOR, i.e. writing these values fails.
/// Keys do not have to be sorted.
///
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::cbor_map_options;
/// let missing_value: Option<bool> = None;
/// let map = cbor_map_options! {
/// 0x01 => Some(false),
/// "02" => -3,
/// "not in map" => missing_value,
/// };
/// ```
#[macro_export] #[macro_export]
macro_rules! cbor_map_options { macro_rules! cbor_map_options {
// trailing comma case // trailing comma case
@@ -178,12 +202,12 @@ macro_rules! cbor_map_options {
// The import is unused if the list is empty. // The import is unused if the list is empty.
#[allow(unused_imports)] #[allow(unused_imports)]
use $crate::values::{IntoCborKey, IntoCborValueOption}; use $crate::values::{IntoCborKey, IntoCborValueOption};
let mut _map = ::alloc::collections::BTreeMap::<_, $crate::values::Value>::new(); let mut _map = ::alloc::vec::Vec::<(_, $crate::values::Value)>::new();
$( $(
{ {
let opt: Option<$crate::values::Value> = $value.into_cbor_value_option(); let opt: Option<$crate::values::Value> = $value.into_cbor_value_option();
if let Some(val) = opt { if let Some(val) = opt {
_map.insert($key.into_cbor_key(), val); _map.push(($key.into_cbor_key(), val));
} }
} }
)* )*
@@ -192,13 +216,25 @@ macro_rules! cbor_map_options {
}; };
} }
/// Creates a CBOR Value of type Map from a Vec<(KeyType, Value)>.
#[macro_export] #[macro_export]
macro_rules! cbor_map_btree { macro_rules! cbor_map_collection {
( $tree:expr ) => { ( $tree:expr ) => {{
$crate::values::Value::Map($tree) $crate::values::Value::from($tree)
}; }};
} }
/// Creates a CBOR Value of type Array with the given elements.
///
/// Elements are expressions and converted into CBOR Values. Elements are comma-separated.
///
/// Example usage:
///
/// ```rust
/// # extern crate alloc;
/// # use cbor::cbor_array;
/// let array = cbor_array![1, "2"];
/// ```
#[macro_export] #[macro_export]
macro_rules! cbor_array { macro_rules! cbor_array {
// trailing comma case // trailing comma case
@@ -216,6 +252,7 @@ macro_rules! cbor_array {
}; };
} }
/// Creates a CBOR Value of type Array from a Vec<Value>.
#[macro_export] #[macro_export]
macro_rules! cbor_array_vec { macro_rules! cbor_array_vec {
( $vec:expr ) => {{ ( $vec:expr ) => {{
@@ -329,7 +366,6 @@ macro_rules! cbor_key_bytes {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::super::values::{KeyType, SimpleValue, Value}; use super::super::values::{KeyType, SimpleValue, Value};
use alloc::collections::BTreeMap;
#[test] #[test]
fn test_cbor_simple_values() { fn test_cbor_simple_values() {
@@ -421,7 +457,7 @@ mod test {
Value::KeyValue(KeyType::Unsigned(0)), Value::KeyValue(KeyType::Unsigned(0)),
Value::KeyValue(KeyType::Unsigned(1)), Value::KeyValue(KeyType::Unsigned(1)),
]), ]),
Value::Map(BTreeMap::new()), Value::Map(Vec::new()),
Value::Map( Value::Map(
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))] [(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))]
.iter() .iter()
@@ -518,7 +554,7 @@ mod test {
Value::KeyValue(KeyType::Unsigned(1)), Value::KeyValue(KeyType::Unsigned(1)),
]), ]),
), ),
(KeyType::Unsigned(9), Value::Map(BTreeMap::new())), (KeyType::Unsigned(9), Value::Map(Vec::new())),
( (
KeyType::Unsigned(10), KeyType::Unsigned(10),
Value::Map( Value::Map(
@@ -589,7 +625,7 @@ mod test {
Value::KeyValue(KeyType::Unsigned(1)), Value::KeyValue(KeyType::Unsigned(1)),
]), ]),
), ),
(KeyType::Unsigned(9), Value::Map(BTreeMap::new())), (KeyType::Unsigned(9), Value::Map(Vec::new())),
( (
KeyType::Unsigned(10), KeyType::Unsigned(10),
Value::Map( Value::Map(
@@ -608,30 +644,26 @@ mod test {
} }
#[test] #[test]
fn test_cbor_map_btree_empty() { fn test_cbor_map_collection_empty() {
let a = cbor_map_btree!(BTreeMap::new()); let a = cbor_map_collection!(Vec::<(_, _)>::new());
let b = Value::Map(BTreeMap::new()); let b = Value::Map(Vec::new());
assert_eq!(a, b); assert_eq!(a, b);
} }
#[test] #[test]
fn test_cbor_map_btree_foo() { fn test_cbor_map_collection_foo() {
let a = cbor_map_btree!( let a = cbor_map_collection!(vec![(
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))] KeyType::Unsigned(2),
.iter() Value::KeyValue(KeyType::Unsigned(3))
.cloned() )]);
.collect() let b = Value::Map(vec![(
); KeyType::Unsigned(2),
let b = Value::Map( Value::KeyValue(KeyType::Unsigned(3)),
[(KeyType::Unsigned(2), Value::KeyValue(KeyType::Unsigned(3)))] )]);
.iter()
.cloned()
.collect(),
);
assert_eq!(a, b); assert_eq!(a, b);
} }
fn extract_map(cbor_value: Value) -> BTreeMap<KeyType, Value> { fn extract_map(cbor_value: Value) -> Vec<(KeyType, Value)> {
match cbor_value { match cbor_value {
Value::Map(map) => map, Value::Map(map) => map,
_ => panic!("Expected CBOR map."), _ => panic!("Expected CBOR map."),

View File

@@ -13,8 +13,7 @@
// limitations under the License. // limitations under the License.
use super::values::{Constants, KeyType, SimpleValue, Value}; use super::values::{Constants, KeyType, SimpleValue, Value};
use crate::{cbor_array_vec, cbor_bytes_lit, cbor_map_btree, cbor_text, cbor_unsigned}; use crate::{cbor_array_vec, cbor_bytes_lit, cbor_map_collection, cbor_text, cbor_unsigned};
use alloc::collections::BTreeMap;
use alloc::str; use alloc::str;
use alloc::vec::Vec; use alloc::vec::Vec;
@@ -174,7 +173,7 @@ impl<'a> Reader<'a> {
size_value: u64, size_value: u64,
remaining_depth: i8, remaining_depth: i8,
) -> Result<Value, DecoderError> { ) -> Result<Value, DecoderError> {
let mut value_map = BTreeMap::new(); let mut value_map = Vec::new();
let mut last_key_option = None; let mut last_key_option = None;
for _ in 0..size_value { for _ in 0..size_value {
let key_value = self.decode_complete_data_item(remaining_depth - 1)?; let key_value = self.decode_complete_data_item(remaining_depth - 1)?;
@@ -185,12 +184,12 @@ impl<'a> Reader<'a> {
} }
} }
last_key_option = Some(key.clone()); last_key_option = Some(key.clone());
value_map.insert(key, self.decode_complete_data_item(remaining_depth - 1)?); value_map.push((key, self.decode_complete_data_item(remaining_depth - 1)?));
} else { } else {
return Err(DecoderError::IncorrectMapKeyType); return Err(DecoderError::IncorrectMapKeyType);
} }
} }
Ok(cbor_map_btree!(value_map)) Ok(cbor_map_collection!(value_map))
} }
fn decode_to_simple_value( fn decode_to_simple_value(

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use alloc::collections::BTreeMap;
use alloc::string::{String, ToString}; use alloc::string::{String, ToString};
use alloc::vec::Vec; use alloc::vec::Vec;
use core::cmp::Ordering; use core::cmp::Ordering;
@@ -21,7 +20,7 @@ use core::cmp::Ordering;
pub enum Value { pub enum Value {
KeyValue(KeyType), KeyValue(KeyType),
Array(Vec<Value>), Array(Vec<Value>),
Map(BTreeMap<KeyType, Value>), Map(Vec<(KeyType, Value)>),
// TAG is omitted // TAG is omitted
Simple(SimpleValue), Simple(SimpleValue),
} }
@@ -183,6 +182,12 @@ where
} }
} }
impl From<Vec<(KeyType, Value)>> for Value {
fn from(map: Vec<(KeyType, Value)>) -> Self {
Value::Map(map)
}
}
impl From<bool> for Value { impl From<bool> for Value {
fn from(b: bool) -> Self { fn from(b: bool) -> Self {
Value::bool_value(b) Value::bool_value(b)

View File

@@ -56,8 +56,14 @@ impl<'a> Writer<'a> {
} }
} }
} }
Value::Map(map) => { Value::Map(mut map) => {
self.start_item(5, map.len() as u64); map.sort_by(|a, b| a.0.cmp(&b.0));
let map_len = map.len();
map.dedup_by(|a, b| a.0.eq(&b.0));
if map_len != map.len() {
return false;
}
self.start_item(5, map_len as u64);
for (k, v) in map { for (k, v) in map {
if !self.encode_cbor(Value::KeyValue(k), remaining_depth - 1) { if !self.encode_cbor(Value::KeyValue(k), remaining_depth - 1) {
return false; return false;
@@ -209,9 +215,16 @@ mod test {
#[test] #[test]
fn test_write_map() { fn test_write_map() {
let value_map = cbor_map! { let value_map = cbor_map! {
"aa" => "AA", 0 => "a",
"e" => "E", 23 => "b",
"" => ".", 24 => "c",
std::u8::MAX as i64 => "d",
256 => "e",
std::u16::MAX as i64 => "f",
65536 => "g",
std::u32::MAX as i64 => "h",
4294967296_i64 => "i",
std::i64::MAX => "j",
-1 => "k", -1 => "k",
-24 => "l", -24 => "l",
-25 => "m", -25 => "m",
@@ -224,16 +237,9 @@ mod test {
b"a" => 2, b"a" => 2,
b"bar" => 3, b"bar" => 3,
b"foo" => 4, b"foo" => 4,
0 => "a", "" => ".",
23 => "b", "e" => "E",
24 => "c", "aa" => "AA",
std::u8::MAX as i64 => "d",
256 => "e",
std::u16::MAX as i64 => "f",
65536 => "g",
std::u32::MAX as i64 => "h",
4294967296_i64 => "i",
std::i64::MAX => "j",
}; };
let expected_cbor = vec![ let expected_cbor = vec![
0xb8, 0x19, // map of 25 pairs: 0xb8, 0x19, // map of 25 pairs:
@@ -288,6 +294,67 @@ mod test {
assert_eq!(write_return(value_map), Some(expected_cbor)); assert_eq!(write_return(value_map), Some(expected_cbor));
} }
#[test]
fn test_write_map_sorted() {
let sorted_map = cbor_map! {
0 => "a",
1 => "b",
-1 => "c",
-2 => "d",
b"a" => "e",
b"b" => "f",
"" => "g",
"c" => "h",
};
let unsorted_map = cbor_map! {
1 => "b",
-2 => "d",
b"b" => "f",
"c" => "h",
"" => "g",
b"a" => "e",
-1 => "c",
0 => "a",
};
assert_eq!(write_return(sorted_map), write_return(unsorted_map));
}
#[test]
fn test_write_map_duplicates() {
let duplicate0 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
0 => "b",
};
assert_eq!(write_return(duplicate0), None);
let duplicate1 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
-1 => "d",
};
assert_eq!(write_return(duplicate1), None);
let duplicate2 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
b"a" => "f",
};
assert_eq!(write_return(duplicate2), None);
let duplicate3 = cbor_map! {
0 => "a",
-1 => "c",
b"a" => "e",
"c" => "g",
"c" => "h",
};
assert_eq!(write_return(duplicate3), None);
}
#[test] #[test]
fn test_write_map_with_array() { fn test_write_map_with_array() {
let value_map = cbor_map! { let value_map = cbor_map! {

View File

@@ -25,5 +25,4 @@ regex = { version = "1", optional = true }
[features] [features]
std = ["cbor/std", "hex", "rand", "ring", "untrusted", "serde", "serde_json", "regex"] std = ["cbor/std", "hex", "rand", "ring", "untrusted", "serde", "serde_json", "regex"]
derive_debug = []
with_ctap1 = [] with_ctap1 = []

View File

@@ -18,11 +18,10 @@ use core::ops::Mul;
use subtle::{self, Choice, ConditionallySelectable, CtOption}; use subtle::{self, Choice, ConditionallySelectable, CtOption};
// An exponent on the elliptic curve, that is an element modulo the curve order N. // An exponent on the elliptic curve, that is an element modulo the curve order N.
#[derive(Clone, Copy, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
// TODO: remove this Default once https://github.com/dalek-cryptography/subtle/issues/63 is // TODO: remove this Default once https://github.com/dalek-cryptography/subtle/issues/63 is
// resolved. // resolved.
#[derive(Default)] #[derive(Default)]
#[cfg_attr(feature = "derive_debug", derive(Debug))]
pub struct ExponentP256 { pub struct ExponentP256 {
int: Int256, int: Int256,
} }
@@ -92,11 +91,10 @@ impl Mul for &ExponentP256 {
} }
// A non-zero exponent on the elliptic curve. // A non-zero exponent on the elliptic curve.
#[derive(Clone, Copy, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
// TODO: remove this Default once https://github.com/dalek-cryptography/subtle/issues/63 is // TODO: remove this Default once https://github.com/dalek-cryptography/subtle/issues/63 is
// resolved. // resolved.
#[derive(Default)] #[derive(Default)]
#[cfg_attr(feature = "derive_debug", derive(Debug))]
pub struct NonZeroExponentP256 { pub struct NonZeroExponentP256 {
e: ExponentP256, e: ExponentP256,
} }

View File

@@ -111,7 +111,6 @@ impl Mul for &GFP256 {
} }
} }
#[cfg(feature = "derive_debug")]
impl core::fmt::Debug for GFP256 { impl core::fmt::Debug for GFP256 {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "GFP256::{:?}", self.int) write!(f, "GFP256::{:?}", self.int)

View File

@@ -636,7 +636,6 @@ impl SubAssign<&Int256> for Int256 {
} }
} }
#[cfg(feature = "derive_debug")]
impl core::fmt::Debug for Int256 { impl core::fmt::Debug for Int256 {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "Int256 {{ digits: {:08x?} }}", self.digits) write!(f, "Int256 {{ digits: {:08x?} }}", self.digits)

View File

@@ -542,7 +542,6 @@ impl Add for &PointProjective {
} }
} }
#[cfg(feature = "derive_debug")]
impl core::fmt::Debug for PointP256 { impl core::fmt::Debug for PointP256 {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("PointP256") f.debug_struct("PointP256")
@@ -552,7 +551,6 @@ impl core::fmt::Debug for PointP256 {
} }
} }
#[cfg(feature = "derive_debug")]
impl PartialEq for PointP256 { impl PartialEq for PointP256 {
fn eq(&self, other: &PointP256) -> bool { fn eq(&self, other: &PointP256) -> bool {
self.x == other.x && self.y == other.y self.x == other.x && self.y == other.y

View File

@@ -17,8 +17,6 @@ use super::ec::int256;
use super::ec::int256::Int256; use super::ec::int256::Int256;
use super::ec::point::PointP256; use super::ec::point::PointP256;
use super::rng256::Rng256; use super::rng256::Rng256;
use super::sha256::Sha256;
use super::Hash256;
pub const NBYTES: usize = int256::NBYTES; pub const NBYTES: usize = int256::NBYTES;
@@ -26,7 +24,7 @@ pub struct SecKey {
a: NonZeroExponentP256, a: NonZeroExponentP256,
} }
#[cfg_attr(feature = "derive_debug", derive(Clone, PartialEq, Debug))] #[derive(Clone, Debug, PartialEq)]
pub struct PubKey { pub struct PubKey {
p: PointP256, p: PointP256,
} }
@@ -62,13 +60,15 @@ impl SecKey {
// - https://www.secg.org/sec1-v2.pdf // - https://www.secg.org/sec1-v2.pdf
} }
// DH key agreement method defined in the FIDO2 specification, Section 5.5.4. "Getting /// Performs the handshake using the Diffie Hellman key agreement.
// sharedSecret from Authenticator" ///
pub fn exchange_x_sha256(&self, other: &PubKey) -> [u8; 32] { /// This function generates the Z in the PIN protocol v1 specification.
/// https://drafts.fidoalliance.org/fido-2/stable-links-to-latest/fido-client-to-authenticator-protocol.html#pinProto1
pub fn exchange_x(&self, other: &PubKey) -> [u8; 32] {
let p = self.exchange_raw(other); let p = self.exchange_raw(other);
let mut x: [u8; 32] = [Default::default(); 32]; let mut x: [u8; 32] = [Default::default(); 32];
p.getx().to_int().to_bin(&mut x); p.getx().to_int().to_bin(&mut x);
Sha256::hash(&x) x
} }
} }
@@ -83,11 +83,13 @@ impl PubKey {
self.p.to_bytes_uncompressed(bytes); self.p.to_bytes_uncompressed(bytes);
} }
/// Creates a new PubKey from its coordinates on the elliptic curve.
pub fn from_coordinates(x: &[u8; NBYTES], y: &[u8; NBYTES]) -> Option<PubKey> { pub fn from_coordinates(x: &[u8; NBYTES], y: &[u8; NBYTES]) -> Option<PubKey> {
PointP256::new_checked_vartime(Int256::from_bin(x), Int256::from_bin(y)) PointP256::new_checked_vartime(Int256::from_bin(x), Int256::from_bin(y))
.map(|p| PubKey { p }) .map(|p| PubKey { p })
} }
/// Writes the coordinates into the passed in arrays.
pub fn to_coordinates(&self, x: &mut [u8; NBYTES], y: &mut [u8; NBYTES]) { pub fn to_coordinates(&self, x: &mut [u8; NBYTES], y: &mut [u8; NBYTES]) {
self.p.getx().to_int().to_bin(x); self.p.getx().to_int().to_bin(x);
self.p.gety().to_int().to_bin(y); self.p.gety().to_int().to_bin(y);
@@ -119,7 +121,7 @@ mod test {
/** Test that the exchanged key is the same on both sides **/ /** Test that the exchanged key is the same on both sides **/
#[test] #[test]
fn test_exchange_x_sha256_is_symmetric() { fn test_exchange_x_is_symmetric() {
let mut rng = ThreadRng256 {}; let mut rng = ThreadRng256 {};
for _ in 0..ITERATIONS { for _ in 0..ITERATIONS {
@@ -127,12 +129,12 @@ mod test {
let pk_a = sk_a.genpk(); let pk_a = sk_a.genpk();
let sk_b = SecKey::gensk(&mut rng); let sk_b = SecKey::gensk(&mut rng);
let pk_b = sk_b.genpk(); let pk_b = sk_b.genpk();
assert_eq!(sk_a.exchange_x_sha256(&pk_b), sk_b.exchange_x_sha256(&pk_a)); assert_eq!(sk_a.exchange_x(&pk_b), sk_b.exchange_x(&pk_a));
} }
} }
#[test] #[test]
fn test_exchange_x_sha256_bytes_is_symmetric() { fn test_exchange_x_bytes_is_symmetric() {
let mut rng = ThreadRng256 {}; let mut rng = ThreadRng256 {};
for _ in 0..ITERATIONS { for _ in 0..ITERATIONS {
@@ -146,7 +148,7 @@ mod test {
let pk_a = PubKey::from_bytes_uncompressed(&pk_bytes_a).unwrap(); let pk_a = PubKey::from_bytes_uncompressed(&pk_bytes_a).unwrap();
let pk_b = PubKey::from_bytes_uncompressed(&pk_bytes_b).unwrap(); let pk_b = PubKey::from_bytes_uncompressed(&pk_bytes_b).unwrap();
assert_eq!(sk_a.exchange_x_sha256(&pk_b), sk_b.exchange_x_sha256(&pk_a)); assert_eq!(sk_a.exchange_x(&pk_b), sk_b.exchange_x(&pk_a));
} }
} }

View File

@@ -21,14 +21,16 @@ use super::rng256::Rng256;
use super::{Hash256, HashBlockSize64Bytes}; use super::{Hash256, HashBlockSize64Bytes};
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
#[cfg(test)]
use arrayref::array_mut_ref;
#[cfg(feature = "std")] #[cfg(feature = "std")]
use arrayref::array_ref; use arrayref::array_ref;
use arrayref::{array_mut_ref, mut_array_refs}; use arrayref::mut_array_refs;
use cbor::{cbor_bytes, cbor_map_options};
use core::marker::PhantomData; use core::marker::PhantomData;
#[derive(Clone, PartialEq)] pub const NBYTES: usize = int256::NBYTES;
#[cfg_attr(feature = "derive_debug", derive(Debug))]
#[derive(Clone, Debug, PartialEq)]
pub struct SecKey { pub struct SecKey {
k: NonZeroExponentP256, k: NonZeroExponentP256,
} }
@@ -38,6 +40,7 @@ pub struct Signature {
s: NonZeroExponentP256, s: NonZeroExponentP256,
} }
#[derive(Clone)]
pub struct PubKey { pub struct PubKey {
p: PointP256, p: PointP256,
} }
@@ -58,10 +61,11 @@ impl SecKey {
} }
} }
// ECDSA signature based on a RNG to generate a suitable randomization parameter. /// Creates an ECDSA signature based on a RNG.
// Under the hood, rejection sampling is used to make sure that the randomization parameter is ///
// uniformly distributed. /// Under the hood, rejection sampling is used to make sure that the
// The provided RNG must be cryptographically secure; otherwise this method is insecure. /// randomization parameter is uniformly distributed. The provided RNG must
/// be cryptographically secure; otherwise this method is insecure.
pub fn sign_rng<H, R>(&self, msg: &[u8], rng: &mut R) -> Signature pub fn sign_rng<H, R>(&self, msg: &[u8], rng: &mut R) -> Signature
where where
H: Hash256, H: Hash256,
@@ -77,8 +81,7 @@ impl SecKey {
} }
} }
// Deterministic ECDSA signature based on RFC 6979 to generate a suitable randomization /// Creates a deterministic ECDSA signature based on RFC 6979.
// parameter.
pub fn sign_rfc6979<H>(&self, msg: &[u8]) -> Signature pub fn sign_rfc6979<H>(&self, msg: &[u8]) -> Signature
where where
H: Hash256 + HashBlockSize64Bytes, H: Hash256 + HashBlockSize64Bytes,
@@ -101,8 +104,10 @@ impl SecKey {
} }
} }
// Try signing a curve element given a randomization parameter k. If no signature can be /// Try signing a curve element given a randomization parameter k.
// obtained from this k, None is returned and the caller should try again with another value. ///
/// If no signature can be obtained from this k, None is returned and the
/// caller should try again with another value.
fn try_sign(&self, k: &NonZeroExponentP256, msg: &ExponentP256) -> Option<Signature> { fn try_sign(&self, k: &NonZeroExponentP256, msg: &ExponentP256) -> Option<Signature> {
let r = ExponentP256::modn(PointP256::base_point_mul(k.as_exponent()).getx().to_int()); let r = ExponentP256::modn(PointP256::base_point_mul(k.as_exponent()).getx().to_int());
// The branching here is fine because all this reveals is that k generated an unsuitable r. // The branching here is fine because all this reveals is that k generated an unsuitable r.
@@ -214,7 +219,6 @@ impl Signature {
} }
impl PubKey { impl PubKey {
pub const ES256_ALGORITHM: i64 = -7;
#[cfg(feature = "with_ctap1")] #[cfg(feature = "with_ctap1")]
const UNCOMPRESSED_LENGTH: usize = 1 + 2 * int256::NBYTES; const UNCOMPRESSED_LENGTH: usize = 1 + 2 * int256::NBYTES;
@@ -242,35 +246,10 @@ impl PubKey {
representation representation
} }
// Encodes the key according to CBOR Object Signing and Encryption, defined in RFC 8152. /// Writes the coordinates into the passed in arrays.
pub fn to_cose_key(&self) -> Option<Vec<u8>> { pub fn to_coordinates(&self, x: &mut [u8; NBYTES], y: &mut [u8; NBYTES]) {
const EC2_KEY_TYPE: i64 = 2; self.p.getx().to_int().to_bin(x);
const P_256_CURVE: i64 = 1; self.p.gety().to_int().to_bin(y);
let mut x_bytes = vec![0; int256::NBYTES];
self.p
.getx()
.to_int()
.to_bin(array_mut_ref![x_bytes.as_mut_slice(), 0, int256::NBYTES]);
let x_byte_cbor: cbor::Value = cbor_bytes!(x_bytes);
let mut y_bytes = vec![0; int256::NBYTES];
self.p
.gety()
.to_int()
.to_bin(array_mut_ref![y_bytes.as_mut_slice(), 0, int256::NBYTES]);
let y_byte_cbor: cbor::Value = cbor_bytes!(y_bytes);
let cbor_value = cbor_map_options! {
1 => EC2_KEY_TYPE,
3 => PubKey::ES256_ALGORITHM,
-1 => P_256_CURVE,
-2 => x_byte_cbor,
-3 => y_byte_cbor,
};
let mut encoded_key = Vec::new();
if cbor::write(cbor_value, &mut encoded_key) {
Some(encoded_key)
} else {
None
}
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]

View File

@@ -0,0 +1,226 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::hmac::hmac_256;
use super::{Hash256, HashBlockSize64Bytes};
const HASH_SIZE: usize = 32;
/// Computes the HKDF with empty salt and 256 bit (one block) output.
///
/// # Arguments
///
/// * `ikm` - Input keying material
/// * `info` - Optional context and application specific information
///
/// This implementation is equivalent to the below hkdf, with `salt` set to the
/// default block of zeros and the output length l as 32.
pub fn hkdf_empty_salt_256<H>(ikm: &[u8], info: &[u8]) -> [u8; HASH_SIZE]
where
H: Hash256 + HashBlockSize64Bytes,
{
// Salt is a zero block here.
let prk = hmac_256::<H>(&[0; HASH_SIZE], ikm);
// l is implicitly the block size, so we iterate exactly once.
let mut t = info.to_vec();
t.push(1);
hmac_256::<H>(&prk, t.as_slice())
}
/// Computes the HKDF.
///
/// # Arguments
///
/// * `salt` - Optional salt value (a non-secret random value)
/// * `ikm` - Input keying material
/// * `l` - Length of output keying material in octets
/// * `info` - Optional context and application specific information
///
/// Defined in RFC: https://tools.ietf.org/html/rfc5869
///
/// `salt` and `info` can be be empty. `salt` then defaults to one block of
/// zeros of size `HASH_SIZE`. Argument order is taken from:
/// https://fidoalliance.org/specs/fido-v2.1-rd-20201208/fido-client-to-authenticator-protocol-v2.1-rd-20201208.html#pinProto2
#[cfg(test)]
pub fn hkdf<H>(salt: &[u8], ikm: &[u8], l: u8, info: &[u8]) -> Vec<u8>
where
H: Hash256 + HashBlockSize64Bytes,
{
let prk = if salt.is_empty() {
hmac_256::<H>(&[0; HASH_SIZE], ikm)
} else {
hmac_256::<H>(salt, ikm)
};
let mut t = vec![];
let mut okm = vec![];
for i in 0..(l as usize + HASH_SIZE - 1) / HASH_SIZE {
t.extend_from_slice(info);
t.push((i + 1) as u8);
t = hmac_256::<H>(&prk, t.as_slice()).to_vec();
okm.extend_from_slice(t.as_slice());
}
okm.truncate(l as usize);
okm
}
#[cfg(test)]
mod test {
use super::super::sha256::Sha256;
use super::*;
use arrayref::array_ref;
#[test]
fn test_hkdf_sha256_vectors() {
// Test vectors taken from https://tools.ietf.org/html/rfc5869.
let ikm = hex::decode("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b").unwrap();
let salt = hex::decode("000102030405060708090a0b0c").unwrap();
let info = hex::decode("f0f1f2f3f4f5f6f7f8f9").unwrap();
let l = 42;
let okm = hex::decode(
"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865",
)
.unwrap();
assert_eq!(
hkdf::<Sha256>(salt.as_slice(), ikm.as_slice(), l, info.as_slice()),
okm
);
let ikm = hex::decode(
"000102030405060708090a0b0c0d0e0f\
101112131415161718191a1b1c1d1e1f\
202122232425262728292a2b2c2d2e2f\
303132333435363738393a3b3c3d3e3f\
404142434445464748494a4b4c4d4e4f",
)
.unwrap();
let salt = hex::decode(
"606162636465666768696a6b6c6d6e6f\
707172737475767778797a7b7c7d7e7f\
808182838485868788898a8b8c8d8e8f\
909192939495969798999a9b9c9d9e9f\
a0a1a2a3a4a5a6a7a8a9aaabacadaeaf",
)
.unwrap();
let info = hex::decode(
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebf\
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf\
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf\
e0e1e2e3e4e5e6e7e8e9eaebecedeeef\
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
)
.unwrap();
let l = 82;
let okm = hex::decode(
"b11e398dc80327a1c8e7f78c596a4934\
4f012eda2d4efad8a050cc4c19afa97c\
59045a99cac7827271cb41c65e590e09\
da3275600c2f09b8367793a9aca3db71\
cc30c58179ec3e87c14c01d5c1f3434f\
1d87",
)
.unwrap();
assert_eq!(
hkdf::<Sha256>(salt.as_slice(), ikm.as_slice(), l, info.as_slice()),
okm
);
let ikm = hex::decode("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b").unwrap();
let salt = hex::decode("").unwrap();
let info = hex::decode("").unwrap();
let l = 42;
let okm = hex::decode(
"8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8",
)
.unwrap();
assert_eq!(
hkdf::<Sha256>(salt.as_slice(), ikm.as_slice(), l, info.as_slice()),
okm
);
}
#[test]
fn test_hkdf_empty_salt_256_sha256_vectors() {
// Test vectors generated by pycryptodome using:
// HKDF(b'0', 32, b'', SHA256, context=b'\x00').hex()
let test_okms = [
hex::decode("f9be72116cb97f41828210289caafeabde1f3dfb9723bf43538ab18f3666783a")
.unwrap(),
hex::decode("f50f964f5b94d62fd1da9356ab8662b0a0f5b8e36e277178b69b6ffecf50cf44")
.unwrap(),
hex::decode("fc8772ceb5592d67442dcb4353cdd28519e82d6e55b4cf664b5685252c2d2998")
.unwrap(),
hex::decode("62831b924839a180f53be5461eeea1b89dc21779f50142b5a54df0f0cc86d61a")
.unwrap(),
hex::decode("6991f00a12946a4e3b8315cdcf0132c2ca508fd17b769f08d1454d92d33733e0")
.unwrap(),
hex::decode("0f9bb7dddd1ec61f91d8c4f5369b5870f9d44c4ceabccca1b83f06fec115e4e3")
.unwrap(),
hex::decode("235367e2ab6cca2aba1a666825458dba6b272a215a2537c05feebe4b80dab709")
.unwrap(),
hex::decode("96e8edad661da48d1a133b38c255d33e05555bc9aa442579dea1cd8d8b8d2aef")
.unwrap(),
];
for (i, okm) in test_okms.iter().enumerate() {
// String of number i.
let ikm = i.to_string();
// Byte i.
let info = [i as u8];
assert_eq!(
&hkdf_empty_salt_256::<Sha256>(&ikm.as_bytes(), &info[..]),
array_ref!(okm, 0, 32)
);
}
}
#[test]
fn test_hkdf_length() {
let salt = [];
let mut input = Vec::new();
for l in 0..128 {
assert_eq!(
hkdf::<Sha256>(&salt, input.as_slice(), l, input.as_slice()).len(),
l as usize
);
input.push(b'A');
}
}
#[test]
fn test_hkdf_empty_salt() {
let salt = [];
let mut input = Vec::new();
for l in 0..128 {
assert_eq!(
hkdf::<Sha256>(&salt, input.as_slice(), l, input.as_slice()),
hkdf::<Sha256>(&[0; 32], input.as_slice(), l, input.as_slice())
);
input.push(b'A');
}
}
#[test]
fn test_hkdf_compare_implementations() {
let salt = [];
let l = 32;
let mut input = Vec::new();
for _ in 0..128 {
assert_eq!(
hkdf::<Sha256>(&salt, input.as_slice(), l, input.as_slice()),
hkdf_empty_salt_256::<Sha256>(input.as_slice(), input.as_slice())
);
input.push(b'A');
}
}
}

View File

@@ -22,6 +22,7 @@ pub mod cbc;
mod ec; mod ec;
pub mod ecdh; pub mod ecdh;
pub mod ecdsa; pub mod ecdsa;
pub mod hkdf;
pub mod hmac; pub mod hmac;
pub mod rng256; pub mod rng256;
pub mod sha256; pub mod sha256;

View File

@@ -303,7 +303,7 @@ impl<'a> Fuzzer<'a> {
} }
/// Generates a possibly invalid update. /// Generates a possibly invalid update.
fn update(&mut self) -> StoreUpdate { fn update(&mut self) -> StoreUpdate<Vec<u8>> {
match self.entropy.read_range(0, 1) { match self.entropy.read_range(0, 1) {
0 => { 0 => {
let key = self.key(); let key = self.key();

View File

@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Flash storage for testing.
//!
//! [`BufferStorage`] implements the flash [`Storage`] interface but doesn't interface with an
//! actual flash storage. Instead it uses a buffer in memory to represent the storage state.
use crate::{Storage, StorageError, StorageIndex, StorageResult}; use crate::{Storage, StorageError, StorageIndex, StorageResult};
use alloc::borrow::Borrow; use alloc::borrow::Borrow;
use alloc::boxed::Box; use alloc::boxed::Box;
@@ -63,8 +68,8 @@ pub struct BufferOptions {
/// ///
/// When set, the following conditions would panic: /// When set, the following conditions would panic:
/// - A bit is written from 0 to 1. /// - A bit is written from 0 to 1.
/// - A word is written more than `max_word_writes`. /// - A word is written more than [`Self::max_word_writes`].
/// - A page is erased more than `max_page_erases`. /// - A page is erased more than [`Self::max_page_erases`].
pub strict_mode: bool, pub strict_mode: bool,
} }
@@ -110,15 +115,13 @@ impl BufferStorage {
/// ///
/// Before each subsequent mutable operation (write or erase), the delay is decremented if /// Before each subsequent mutable operation (write or erase), the delay is decremented if
/// positive. If the delay is elapsed, the operation is saved and an error is returned. /// positive. If the delay is elapsed, the operation is saved and an error is returned.
/// Subsequent operations will panic until the interrupted operation is [corrupted] or the /// Subsequent operations will panic until either of:
/// interruption is [reset]. /// - The interrupted operation is [corrupted](BufferStorage::corrupt_operation).
/// - The interruption is [reset](BufferStorage::reset_interruption).
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if an interruption is already armed. /// Panics if an interruption is already armed.
///
/// [corrupted]: struct.BufferStorage.html#method.corrupt_operation
/// [reset]: struct.BufferStorage.html#method.reset_interruption
pub fn arm_interruption(&mut self, delay: usize) { pub fn arm_interruption(&mut self, delay: usize) {
self.interruption.arm(delay); self.interruption.arm(delay);
} }
@@ -130,10 +133,8 @@ impl BufferStorage {
/// # Panics /// # Panics
/// ///
/// Panics if any of the following conditions hold: /// Panics if any of the following conditions hold:
/// - An interruption was not [armed]. /// - An interruption was not [armed](BufferStorage::arm_interruption).
/// - An interruption was armed and it has triggered. /// - An interruption was armed and it has triggered.
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
pub fn disarm_interruption(&mut self) -> usize { pub fn disarm_interruption(&mut self) -> usize {
self.interruption.get().err().unwrap() self.interruption.get().err().unwrap()
} }
@@ -142,16 +143,14 @@ impl BufferStorage {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if an interruption was not [armed]. /// Panics if an interruption was not [armed](BufferStorage::arm_interruption).
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
pub fn reset_interruption(&mut self) { pub fn reset_interruption(&mut self) {
let _ = self.interruption.get(); let _ = self.interruption.get();
} }
/// Corrupts an interrupted operation. /// Corrupts an interrupted operation.
/// ///
/// Applies the [corruption function] to the storage. Counters are updated accordingly: /// Applies the corruption function to the storage. Counters are updated accordingly:
/// - If a word is fully written, its counter is incremented regardless of whether other words /// - If a word is fully written, its counter is incremented regardless of whether other words
/// of the same operation have been fully written. /// of the same operation have been fully written.
/// - If a page is fully erased, its counter is incremented (and its word counters are reset). /// - If a page is fully erased, its counter is incremented (and its word counters are reset).
@@ -159,13 +158,10 @@ impl BufferStorage {
/// # Panics /// # Panics
/// ///
/// Panics if any of the following conditions hold: /// Panics if any of the following conditions hold:
/// - An interruption was not [armed]. /// - An interruption was not [armed](BufferStorage::arm_interruption).
/// - An interruption was armed but did not trigger. /// - An interruption was armed but did not trigger.
/// - The corruption function corrupts more bits than allowed. /// - The corruption function corrupts more bits than allowed.
/// - The interrupted operation itself would have panicked. /// - The interrupted operation itself would have panicked.
///
/// [armed]: struct.BufferStorage.html#method.arm_interruption
/// [corruption function]: type.BufferCorruptFunction.html
pub fn corrupt_operation(&mut self, corrupt: BufferCorruptFunction) { pub fn corrupt_operation(&mut self, corrupt: BufferCorruptFunction) {
let operation = self.interruption.get().unwrap(); let operation = self.interruption.get().unwrap();
let range = self.operation_range(&operation).unwrap(); let range = self.operation_range(&operation).unwrap();
@@ -217,7 +213,8 @@ impl BufferStorage {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if the maximum number of erase cycles per page is reached. /// Panics if the [maximum number of erase cycles per page](BufferOptions::max_page_erases) is
/// reached.
fn incr_page_erases(&mut self, page: usize) { fn incr_page_erases(&mut self, page: usize) {
// Check that pages are not erased too many times. // Check that pages are not erased too many times.
if self.options.strict_mode { if self.options.strict_mode {
@@ -243,7 +240,8 @@ impl BufferStorage {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if the maximum number of writes per word is reached. /// Panics if the [maximum number of writes per word](BufferOptions::max_word_writes) is
/// reached.
fn incr_word_writes(&mut self, index: usize, value: &[u8], complete: &[u8]) { fn incr_word_writes(&mut self, index: usize, value: &[u8], complete: &[u8]) {
let word_size = self.word_size(); let word_size = self.word_size();
for i in 0..value.len() / word_size { for i in 0..value.len() / word_size {

View File

@@ -12,6 +12,10 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Store wrapper for testing.
//!
//! [`StoreDriver`] wraps a [`Store`] and compares its behavior with its associated [`StoreModel`].
use crate::format::{Format, Position}; use crate::format::{Format, Position};
#[cfg(test)] #[cfg(test)]
use crate::StoreUpdate; use crate::StoreUpdate;

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Storage representation of a store.
#[macro_use] #[macro_use]
mod bitfield; mod bitfield;
@@ -20,18 +22,20 @@ use self::bitfield::Length;
use self::bitfield::{count_zeros, num_bits, Bit, Checksum, ConstField, Field}; use self::bitfield::{count_zeros, num_bits, Bit, Checksum, ConstField, Field};
use crate::{usize_to_nat, Nat, Storage, StorageIndex, StoreError, StoreResult, StoreUpdate}; use crate::{usize_to_nat, Nat, Storage, StorageIndex, StoreError, StoreResult, StoreUpdate};
use alloc::vec::Vec; use alloc::vec::Vec;
use core::borrow::Borrow;
use core::cmp::min; use core::cmp::min;
use core::convert::TryFrom; use core::convert::TryFrom;
/// Internal representation of a word in flash. /// Internal representation of a word in flash.
/// ///
/// Currently, the store only supports storages where a word is 32 bits. /// Currently, the store only supports storages where a word is 32 bits, i.e. the [word
/// size](Storage::word_size) is 4 bytes.
type WORD = u32; type WORD = u32;
/// Abstract representation of a word in flash. /// Abstract representation of a word in flash.
/// ///
/// This type is kept abstract to avoid possible confusion with `Nat` if they happen to have the /// This type is kept abstract to avoid possible confusion with [`Nat`] if they happen to have the
/// same representation. This is because they have different semantics, `Nat` represents natural /// same representation. This is because they have different semantics, [`Nat`] represents natural
/// numbers while `Word` represents sequences of bits (and thus has no arithmetic). /// numbers while `Word` represents sequences of bits (and thus has no arithmetic).
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Word(WORD); pub struct Word(WORD);
@@ -46,7 +50,7 @@ impl Word {
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if `slice.len() != WORD_SIZE`. /// Panics if `slice.len()` is not [`WORD_SIZE`] bytes.
pub fn from_slice(slice: &[u8]) -> Word { pub fn from_slice(slice: &[u8]) -> Word {
Word(WORD::from_le_bytes(<WordSlice>::try_from(slice).unwrap())) Word(WORD::from_le_bytes(<WordSlice>::try_from(slice).unwrap()))
} }
@@ -59,47 +63,49 @@ impl Word {
/// Size of a word in bytes. /// Size of a word in bytes.
/// ///
/// Currently, the store only supports storages where a word is 4 bytes. /// Currently, the store only supports storages where the [word size](Storage::word_size) is 4
/// bytes.
const WORD_SIZE: Nat = core::mem::size_of::<WORD>() as Nat; const WORD_SIZE: Nat = core::mem::size_of::<WORD>() as Nat;
/// Minimum number of words per page. /// Minimum number of words per page.
/// ///
/// Currently, the store only supports storages where pages have at least 8 words. /// Currently, the store only supports storages where pages have at least 8 [words](WORD_SIZE), i.e.
const MIN_NUM_WORDS_PER_PAGE: Nat = 8; /// the [page size](Storage::page_size) is at least 32 bytes.
const MIN_PAGE_SIZE: Nat = 8;
/// Maximum size of a page in bytes. /// Maximum size of a page in bytes.
/// ///
/// Currently, the store only supports storages where pages are between 8 and 1024 [words]. /// Currently, the store only supports storages where pages have at most 1024 [words](WORD_SIZE),
/// /// i.e. the [page size](Storage::page_size) is at most 4096 bytes.
/// [words]: constant.WORD_SIZE.html
const MAX_PAGE_SIZE: Nat = 4096; const MAX_PAGE_SIZE: Nat = 4096;
/// Maximum number of erase cycles. /// Maximum number of erase cycles.
/// ///
/// Currently, the store only supports storages where the maximum number of erase cycles fits on 16 /// Currently, the store only supports storages where the [maximum number of erase
/// bits. /// cycles](Storage::max_page_erases) fits in 16 bits, i.e. it is at most 65535.
const MAX_ERASE_CYCLE: Nat = 65535; const MAX_ERASE_CYCLE: Nat = 65535;
/// Minimum number of pages. /// Minimum number of pages.
/// ///
/// Currently, the store only supports storages with at least 3 pages. /// Currently, the store only supports storages where the [number of pages](Storage::num_pages) is
/// at least 3.
const MIN_NUM_PAGES: Nat = 3; const MIN_NUM_PAGES: Nat = 3;
/// Maximum page index. /// Maximum page index.
/// ///
/// Thus the maximum number of pages is one more than this number. Currently, the store only /// Currently, the store only supports storages where the [number of pages](Storage::num_pages) is
/// supports storages where the number of pages is between 3 and 64. /// at most 64, i.e. the maximum page index is 63.
const MAX_PAGE_INDEX: Nat = 63; const MAX_PAGE_INDEX: Nat = 63;
/// Maximum key index. /// Maximum key index.
/// ///
/// Thus the number of keys is one more than this number. Currently, the store only supports 4096 /// Currently, the store only supports 4096 keys, i.e. the maximum key index is 4095.
/// keys.
const MAX_KEY_INDEX: Nat = 4095; const MAX_KEY_INDEX: Nat = 4095;
/// Maximum length in bytes of a user payload. /// Maximum length in bytes of a user payload.
/// ///
/// Currently, the store only supports values smaller than 1024 bytes. /// Currently, the store only supports values at most 1023 bytes long. This may be further reduced
/// depending on the [page size](Storage::page_size), see [`Format::max_value_len`].
const MAX_VALUE_LEN: Nat = 1023; const MAX_VALUE_LEN: Nat = 1023;
/// Maximum number of updates per transaction. /// Maximum number of updates per transaction.
@@ -108,9 +114,15 @@ const MAX_VALUE_LEN: Nat = 1023;
const MAX_UPDATES: Nat = 31; const MAX_UPDATES: Nat = 31;
/// Maximum number of words per virtual page. /// Maximum number of words per virtual page.
const MAX_VIRT_PAGE_SIZE: Nat = div_ceil(MAX_PAGE_SIZE, WORD_SIZE) - CONTENT_WORD; ///
/// A virtual page has [`CONTENT_WORD`] less [words](WORD_SIZE) than the storage [page
/// size](Storage::page_size). Those words are used to store the page header. Since a page has at
/// least [8](MIN_PAGE_SIZE) words, a virtual page has at least 6 words.
const MAX_VIRT_PAGE_SIZE: Nat = MAX_PAGE_SIZE / WORD_SIZE - CONTENT_WORD;
/// Word with all bits set to one. /// Word with all bits set to one.
///
/// After a page is erased, all words are equal to this value.
const ERASED_WORD: Word = Word(!(0 as WORD)); const ERASED_WORD: Word = Word(!(0 as WORD));
/// Helpers for a given storage configuration. /// Helpers for a given storage configuration.
@@ -120,33 +132,31 @@ pub struct Format {
/// ///
/// # Invariant /// # Invariant
/// ///
/// - Words divide a page evenly. /// - [Words](WORD_SIZE) divide a page evenly.
/// - There are at least 8 words in a page. /// - There are at least [`MIN_PAGE_SIZE`] words in a page.
/// - There are at most `MAX_PAGE_SIZE` bytes in a page. /// - There are at most [`MAX_PAGE_SIZE`] bytes in a page.
page_size: Nat, page_size: Nat,
/// The number of pages in the storage. /// The number of pages in the storage.
/// ///
/// # Invariant /// # Invariant
/// ///
/// - There are at least 3 pages. /// - There are at least [`MIN_NUM_PAGES`] pages.
/// - There are at most `MAX_PAGE_INDEX + 1` pages. /// - There are at most [`MAX_PAGE_INDEX`] + 1 pages.
num_pages: Nat, num_pages: Nat,
/// The maximum number of times a page can be erased. /// The maximum number of times a page can be erased.
/// ///
/// # Invariant /// # Invariant
/// ///
/// - A page can be erased at most `MAX_ERASE_CYCLE` times. /// - A page can be erased at most [`MAX_ERASE_CYCLE`] times.
max_page_erases: Nat, max_page_erases: Nat,
} }
impl Format { impl Format {
/// Extracts the format from a storage. /// Extracts the format from a storage.
/// ///
/// Returns `None` if the storage is not [supported]. /// Returns `None` if the storage is not [supported](Format::is_storage_supported).
///
/// [supported]: struct.Format.html#method.is_storage_supported
pub fn new<S: Storage>(storage: &S) -> Option<Format> { pub fn new<S: Storage>(storage: &S) -> Option<Format> {
if Format::is_storage_supported(storage) { if Format::is_storage_supported(storage) {
Some(Format { Some(Format {
@@ -162,21 +172,12 @@ impl Format {
/// Returns whether a storage is supported. /// Returns whether a storage is supported.
/// ///
/// A storage is supported if the following conditions hold: /// A storage is supported if the following conditions hold:
/// - The size of a word is [`WORD_SIZE`] bytes. /// - The [`Storage::word_size`] is [`WORD_SIZE`] bytes.
/// - The size of a word evenly divides the size of a page. /// - The [`Storage::word_size`] evenly divides the [`Storage::page_size`].
/// - A page contains at least [`MIN_NUM_WORDS_PER_PAGE`] words. /// - The [`Storage::page_size`] is between [`MIN_PAGE_SIZE`] words and [`MAX_PAGE_SIZE`] bytes.
/// - A page contains at most [`MAX_PAGE_SIZE`] bytes. /// - The [`Storage::num_pages`] is between [`MIN_NUM_PAGES`] and [`MAX_PAGE_INDEX`] + 1.
/// - There are at least [`MIN_NUM_PAGES`] pages. /// - The [`Storage::max_word_writes`] is at least 2.
/// - There are at most [`MAX_PAGE_INDEX`]` + 1` pages. /// - The [`Storage::max_page_erases`] is at most [`MAX_ERASE_CYCLE`].
/// - A word can be written at least twice between erase cycles.
/// - The maximum number of erase cycles is at most [`MAX_ERASE_CYCLE`].
///
/// [`WORD_SIZE`]: constant.WORD_SIZE.html
/// [`MIN_NUM_WORDS_PER_PAGE`]: constant.MIN_NUM_WORDS_PER_PAGE.html
/// [`MAX_PAGE_SIZE`]: constant.MAX_PAGE_SIZE.html
/// [`MIN_NUM_PAGES`]: constant.MIN_NUM_PAGES.html
/// [`MAX_PAGE_INDEX`]: constant.MAX_PAGE_INDEX.html
/// [`MAX_ERASE_CYCLE`]: constant.MAX_ERASE_CYCLE.html
fn is_storage_supported<S: Storage>(storage: &S) -> bool { fn is_storage_supported<S: Storage>(storage: &S) -> bool {
let word_size = usize_to_nat(storage.word_size()); let word_size = usize_to_nat(storage.word_size());
let page_size = usize_to_nat(storage.page_size()); let page_size = usize_to_nat(storage.page_size());
@@ -185,7 +186,7 @@ impl Format {
let max_page_erases = usize_to_nat(storage.max_page_erases()); let max_page_erases = usize_to_nat(storage.max_page_erases());
word_size == WORD_SIZE word_size == WORD_SIZE
&& page_size % word_size == 0 && page_size % word_size == 0
&& (MIN_NUM_WORDS_PER_PAGE * word_size <= page_size && page_size <= MAX_PAGE_SIZE) && (MIN_PAGE_SIZE * word_size <= page_size && page_size <= MAX_PAGE_SIZE)
&& (MIN_NUM_PAGES <= num_pages && num_pages <= MAX_PAGE_INDEX + 1) && (MIN_NUM_PAGES <= num_pages && num_pages <= MAX_PAGE_INDEX + 1)
&& max_word_writes >= 2 && max_word_writes >= 2
&& max_page_erases <= MAX_ERASE_CYCLE && max_page_erases <= MAX_ERASE_CYCLE
@@ -198,28 +199,28 @@ impl Format {
/// The size of a page in bytes. /// The size of a page in bytes.
/// ///
/// We have `MIN_NUM_WORDS_PER_PAGE * self.word_size() <= self.page_size() <= MAX_PAGE_SIZE`. /// This is at least [`MIN_PAGE_SIZE`] [words](WORD_SIZE) and at most [`MAX_PAGE_SIZE`] bytes.
pub fn page_size(&self) -> Nat { pub fn page_size(&self) -> Nat {
self.page_size self.page_size
} }
/// The number of pages in the storage, denoted by `N`. /// The number of pages in the storage, denoted by N.
/// ///
/// We have `MIN_NUM_PAGES <= N <= MAX_PAGE_INDEX + 1`. /// We have [`MIN_NUM_PAGES`] ≤ N ≤ [`MAX_PAGE_INDEX`] + 1.
pub fn num_pages(&self) -> Nat { pub fn num_pages(&self) -> Nat {
self.num_pages self.num_pages
} }
/// The maximum page index. /// The maximum page index.
/// ///
/// We have `2 <= self.max_page() <= MAX_PAGE_INDEX`. /// This is at least [`MIN_NUM_PAGES`] - 1 and at most [`MAX_PAGE_INDEX`].
pub fn max_page(&self) -> Nat { pub fn max_page(&self) -> Nat {
self.num_pages - 1 self.num_pages - 1
} }
/// The maximum number of times a page can be erased, denoted by `E`. /// The maximum number of times a page can be erased, denoted by E.
/// ///
/// We have `E <= MAX_ERASE_CYCLE`. /// We have E ≤ [`MAX_ERASE_CYCLE`].
pub fn max_page_erases(&self) -> Nat { pub fn max_page_erases(&self) -> Nat {
self.max_page_erases self.max_page_erases
} }
@@ -234,19 +235,18 @@ impl Format {
MAX_UPDATES MAX_UPDATES
} }
/// The size of a virtual page in words, denoted by `Q`. /// The size of a virtual page in words, denoted by Q.
/// ///
/// A virtual page is stored in a physical page after the page header. /// A virtual page is stored in a physical page after the page header.
/// ///
/// We have `MIN_NUM_WORDS_PER_PAGE - 2 <= Q <= MAX_VIRT_PAGE_SIZE`. /// We have [`MIN_PAGE_SIZE`] - 2 Q ≤ [`MAX_VIRT_PAGE_SIZE`].
pub fn virt_page_size(&self) -> Nat { pub fn virt_page_size(&self) -> Nat {
self.page_size() / self.word_size() - CONTENT_WORD self.page_size() / self.word_size() - CONTENT_WORD
} }
/// The maximum length in bytes of a user payload. /// The maximum length in bytes of a user payload.
/// ///
/// We have `(MIN_NUM_WORDS_PER_PAGE - 3) * self.word_size() <= self.max_value_len() <= /// This is at least [`MIN_PAGE_SIZE`] - 3 [words](WORD_SIZE) and at most [`MAX_VALUE_LEN`].
/// MAX_VALUE_LEN`.
pub fn max_value_len(&self) -> Nat { pub fn max_value_len(&self) -> Nat {
min( min(
(self.virt_page_size() - 1) * self.word_size(), (self.virt_page_size() - 1) * self.word_size(),
@@ -254,57 +254,50 @@ impl Format {
) )
} }
/// The maximum prefix length in words, denoted by `M`. /// The maximum prefix length in words, denoted by M.
/// ///
/// A prefix is the first words of a virtual page that belong to the last entry of the previous /// A prefix is the first words of a virtual page that belong to the last entry of the previous
/// virtual page. This happens because entries may overlap up to 2 virtual pages. /// virtual page. This happens because entries may overlap up to 2 virtual pages.
/// ///
/// We have `MIN_NUM_WORDS_PER_PAGE - 3 <= M < Q`. /// We have [`MIN_PAGE_SIZE`] - 3 M < Q.
pub fn max_prefix_len(&self) -> Nat { pub fn max_prefix_len(&self) -> Nat {
self.bytes_to_words(self.max_value_len()) self.bytes_to_words(self.max_value_len())
} }
/// The total virtual capacity in words, denoted by `V`. /// The total virtual capacity in words, denoted by V.
/// ///
/// We have `V = (N - 1) * (Q - 1) - M`. /// We have V = (N - 1) × (Q - 1) - M.
/// ///
/// We can show `V >= (N - 2) * (Q - 1)` with the following steps: /// We can show V (N - 2) × (Q - 1) with the following steps:
/// - `M <= Q - 1` from `M < Q` from [`M`] definition /// - M Q - 1 from M < Q from [M](Format::max_prefix_len)'s definition
/// - `-M >= -(Q - 1)` from above /// - -M -(Q - 1) from above
/// - `V >= (N - 1) * (Q - 1) - (Q - 1)` from `V` definition /// - V (N - 1) × (Q - 1) - (Q - 1) from V's definition
///
/// [`M`]: struct.Format.html#method.max_prefix_len
pub fn virt_size(&self) -> Nat { pub fn virt_size(&self) -> Nat {
(self.num_pages() - 1) * (self.virt_page_size() - 1) - self.max_prefix_len() (self.num_pages() - 1) * (self.virt_page_size() - 1) - self.max_prefix_len()
} }
/// The total user capacity in words, denoted by `C`. /// The total user capacity in words, denoted by C.
/// ///
/// We have `C = V - N = (N - 1) * (Q - 2) - M - 1`. /// We have C = V - N = (N - 1) × (Q - 2) - M - 1.
/// ///
/// We can show `C >= (N - 2) * (Q - 2) - 2` with the following steps: /// We can show C (N - 2) × (Q - 2) - 2 with the following steps:
/// - `V >= (N - 2) * (Q - 1)` from [`V`] definition /// - V (N - 2) × (Q - 1) from [V](Format::virt_size)'s definition
/// - `C >= (N - 2) * (Q - 1) - N` from `C` definition /// - C (N - 2) × (Q - 1) - N from C's definition
/// - `(N - 2) * (Q - 1) - N = (N - 2) * (Q - 2) - 2` by calculus /// - (N - 2) × (Q - 1) - N = (N - 2) × (Q - 2) - 2 by calculus
///
/// [`V`]: struct.Format.html#method.virt_size
pub fn total_capacity(&self) -> Nat { pub fn total_capacity(&self) -> Nat {
// From the virtual capacity, we reserve N - 1 words for `Erase` entries and 1 word for a // From the virtual capacity, we reserve N - 1 words for `Erase` entries and 1 word for a
// `Clear` entry. // `Clear` entry.
self.virt_size() - self.num_pages() self.virt_size() - self.num_pages()
} }
/// The total virtual lifetime in words, denoted by `L`. /// The total virtual lifetime in words, denoted by L.
/// ///
/// We have `L = (E * N + N - 1) * Q`. /// We have L = (E × N + N - 1) × Q.
pub fn total_lifetime(&self) -> Position { pub fn total_lifetime(&self) -> Position {
Position::new(self, self.max_page_erases(), self.num_pages() - 1, 0) Position::new(self, self.max_page_erases(), self.num_pages() - 1, 0)
} }
/// Returns the word position of the first entry of a page. /// Returns the word position of the first entry of a page.
///
/// The init info of the page must be provided to know where the first entry of the page
/// starts.
pub fn page_head(&self, init: InitInfo, page: Nat) -> Position { pub fn page_head(&self, init: InitInfo, page: Nat) -> Position {
Position::new(self, init.cycle, page, init.prefix) Position::new(self, init.cycle, page, init.prefix)
} }
@@ -335,12 +328,12 @@ impl Format {
} }
/// Builds the storage representation of an init info. /// Builds the storage representation of an init info.
pub fn build_init(&self, init: InitInfo) -> WordSlice { pub fn build_init(&self, init: InitInfo) -> StoreResult<WordSlice> {
let mut word = ERASED_WORD; let mut word = ERASED_WORD;
INIT_CYCLE.set(&mut word, init.cycle); INIT_CYCLE.set(&mut word, init.cycle)?;
INIT_PREFIX.set(&mut word, init.prefix); INIT_PREFIX.set(&mut word, init.prefix)?;
WORD_CHECKSUM.set(&mut word, 0); WORD_CHECKSUM.set(&mut word, 0)?;
word.as_slice() Ok(word.as_slice())
} }
/// Returns the storage index of the compact info of a page. /// Returns the storage index of the compact info of a page.
@@ -368,36 +361,36 @@ impl Format {
} }
/// Builds the storage representation of a compact info. /// Builds the storage representation of a compact info.
pub fn build_compact(&self, compact: CompactInfo) -> WordSlice { pub fn build_compact(&self, compact: CompactInfo) -> StoreResult<WordSlice> {
let mut word = ERASED_WORD; let mut word = ERASED_WORD;
COMPACT_TAIL.set(&mut word, compact.tail); COMPACT_TAIL.set(&mut word, compact.tail)?;
WORD_CHECKSUM.set(&mut word, 0); WORD_CHECKSUM.set(&mut word, 0)?;
word.as_slice() Ok(word.as_slice())
} }
/// Builds the storage representation of an internal entry. /// Builds the storage representation of an internal entry.
pub fn build_internal(&self, internal: InternalEntry) -> WordSlice { pub fn build_internal(&self, internal: InternalEntry) -> StoreResult<WordSlice> {
let mut word = ERASED_WORD; let mut word = ERASED_WORD;
match internal { match internal {
InternalEntry::Erase { page } => { InternalEntry::Erase { page } => {
ID_ERASE.set(&mut word); ID_ERASE.set(&mut word)?;
ERASE_PAGE.set(&mut word, page); ERASE_PAGE.set(&mut word, page)?;
} }
InternalEntry::Clear { min_key } => { InternalEntry::Clear { min_key } => {
ID_CLEAR.set(&mut word); ID_CLEAR.set(&mut word)?;
CLEAR_MIN_KEY.set(&mut word, min_key); CLEAR_MIN_KEY.set(&mut word, min_key)?;
} }
InternalEntry::Marker { count } => { InternalEntry::Marker { count } => {
ID_MARKER.set(&mut word); ID_MARKER.set(&mut word)?;
MARKER_COUNT.set(&mut word, count); MARKER_COUNT.set(&mut word, count)?;
} }
InternalEntry::Remove { key } => { InternalEntry::Remove { key } => {
ID_REMOVE.set(&mut word); ID_REMOVE.set(&mut word)?;
REMOVE_KEY.set(&mut word, key); REMOVE_KEY.set(&mut word, key)?;
} }
} }
WORD_CHECKSUM.set(&mut word, 0); WORD_CHECKSUM.set(&mut word, 0)?;
word.as_slice() Ok(word.as_slice())
} }
/// Parses the first word of an entry from its storage representation. /// Parses the first word of an entry from its storage representation.
@@ -459,31 +452,31 @@ impl Format {
} }
/// Builds the storage representation of a user entry. /// Builds the storage representation of a user entry.
pub fn build_user(&self, key: Nat, value: &[u8]) -> Vec<u8> { pub fn build_user(&self, key: Nat, value: &[u8]) -> StoreResult<Vec<u8>> {
let length = usize_to_nat(value.len()); let length = usize_to_nat(value.len());
let word_size = self.word_size(); let word_size = self.word_size();
let footer = self.bytes_to_words(length); let footer = self.bytes_to_words(length);
let mut result = vec![0xff; ((1 + footer) * word_size) as usize]; let mut result = vec![0xff; ((1 + footer) * word_size) as usize];
result[word_size as usize..][..length as usize].copy_from_slice(value); result[word_size as usize..][..length as usize].copy_from_slice(value);
let mut word = ERASED_WORD; let mut word = ERASED_WORD;
ID_HEADER.set(&mut word); ID_HEADER.set(&mut word)?;
if footer > 0 && is_erased(&result[(footer * word_size) as usize..]) { if footer > 0 && is_erased(&result[(footer * word_size) as usize..]) {
HEADER_FLIPPED.set(&mut word); HEADER_FLIPPED.set(&mut word);
*result.last_mut().unwrap() = 0x7f; *result.last_mut().unwrap() = 0x7f;
} }
HEADER_LENGTH.set(&mut word, length); HEADER_LENGTH.set(&mut word, length)?;
HEADER_KEY.set(&mut word, key); HEADER_KEY.set(&mut word, key)?;
HEADER_CHECKSUM.set( HEADER_CHECKSUM.set(
&mut word, &mut word,
count_zeros(&result[(footer * word_size) as usize..]), count_zeros(&result[(footer * word_size) as usize..]),
); )?;
result[..word_size as usize].copy_from_slice(&word.as_slice()); result[..word_size as usize].copy_from_slice(&word.as_slice());
result Ok(result)
} }
/// Sets the padding bit in the first word of a user entry. /// Sets the padding bit in the first word of a user entry.
pub fn set_padding(&self, word: &mut Word) { pub fn set_padding(&self, word: &mut Word) -> StoreResult<()> {
ID_PADDING.set(word); ID_PADDING.set(word)
} }
/// Sets the deleted bit in the first word of a user entry. /// Sets the deleted bit in the first word of a user entry.
@@ -492,13 +485,16 @@ impl Format {
} }
/// Returns the capacity required by a transaction. /// Returns the capacity required by a transaction.
pub fn transaction_capacity(&self, updates: &[StoreUpdate]) -> Nat { pub fn transaction_capacity<ByteSlice: Borrow<[u8]>>(
&self,
updates: &[StoreUpdate<ByteSlice>],
) -> Nat {
match updates.len() { match updates.len() {
// An empty transaction doesn't consume anything. // An empty transaction doesn't consume anything.
0 => 0, 0 => 0,
// Transactions with a single update are optimized by avoiding a marker entry. // Transactions with a single update are optimized by avoiding a marker entry.
1 => match &updates[0] { 1 => match &updates[0] {
StoreUpdate::Insert { value, .. } => self.entry_size(value), StoreUpdate::Insert { value, .. } => self.entry_size(value.borrow()),
// Transactions with a single update which is a removal don't consume anything. // Transactions with a single update which is a removal don't consume anything.
StoreUpdate::Remove { .. } => 0, StoreUpdate::Remove { .. } => 0,
}, },
@@ -508,9 +504,9 @@ impl Format {
} }
/// Returns the capacity of an update. /// Returns the capacity of an update.
fn update_capacity(&self, update: &StoreUpdate) -> Nat { fn update_capacity<ByteSlice: Borrow<[u8]>>(&self, update: &StoreUpdate<ByteSlice>) -> Nat {
match update { match update {
StoreUpdate::Insert { value, .. } => self.entry_size(value), StoreUpdate::Insert { value, .. } => self.entry_size(value.borrow()),
StoreUpdate::Remove { .. } => 1, StoreUpdate::Remove { .. } => 1,
} }
} }
@@ -523,7 +519,10 @@ impl Format {
/// Checks if a transaction is valid and returns its sorted keys. /// Checks if a transaction is valid and returns its sorted keys.
/// ///
/// Returns `None` if the transaction is invalid. /// Returns `None` if the transaction is invalid.
pub fn transaction_valid(&self, updates: &[StoreUpdate]) -> Option<Vec<Nat>> { pub fn transaction_valid<ByteSlice: Borrow<[u8]>>(
&self,
updates: &[StoreUpdate<ByteSlice>],
) -> Option<Vec<Nat>> {
if usize_to_nat(updates.len()) > self.max_updates() { if usize_to_nat(updates.len()) > self.max_updates() {
return None; return None;
} }
@@ -550,7 +549,7 @@ impl Format {
/// ///
/// # Preconditions /// # Preconditions
/// ///
/// - `bytes + self.word_size()` does not overflow. /// - `bytes` + [`Self::word_size`] does not overflow.
pub fn bytes_to_words(&self, bytes: Nat) -> Nat { pub fn bytes_to_words(&self, bytes: Nat) -> Nat {
div_ceil(bytes, self.word_size()) div_ceil(bytes, self.word_size())
} }
@@ -564,7 +563,7 @@ const COMPACT_WORD: Nat = 1;
/// The word index of the content of a page. /// The word index of the content of a page.
/// ///
/// Since a page is at least 8 words, there is always at least 6 words of content. /// This is also the length in words of the page header.
const CONTENT_WORD: Nat = 2; const CONTENT_WORD: Nat = 2;
/// The checksum for a single word. /// The checksum for a single word.
@@ -711,21 +710,21 @@ bitfield! {
/// The position of a word in the virtual storage. /// The position of a word in the virtual storage.
/// ///
/// With the notations defined in `Format`, let: /// With the notations defined in [`Format`], let:
/// - `w` a virtual word offset in a page which is between `0` and `Q - 1` /// - w denote a word offset in a virtual page, thus between 0 and Q - 1
/// - `p` a page offset which is between `0` and `N - 1` /// - p denote a page offset, thus between 0 and N - 1
/// - `c` the number of erase cycles of a page which is between `0` and `E` /// - c denote the number of times a page was erased, thus between 0 and E
/// ///
/// Then the position of a word is `(c*N + p)*Q + w`. This position monotonically increases and /// The position of a word is (c × N + p) × Q + w. This position monotonically increases and
/// represents the consumed lifetime of the storage. /// represents the consumed lifetime of the storage.
/// ///
/// This type is kept abstract to avoid possible confusion with `Nat` and `Word` if they happen to /// This type is kept abstract to avoid possible confusion with [`Nat`] and [`Word`] if they happen
/// have the same representation. Here is an overview of their semantics: /// to have the same representation. Here is an overview of their semantics:
/// ///
/// | Name | Semantics | Arithmetic operations | Bit-wise operations | /// | Name | Semantics | Arithmetic operations | Bit-wise operations |
/// | ---------- | --------------------------- | --------------------- | ------------------- | /// | ---------- | --------------------------- | --------------------- | ------------------- |
/// | `Nat` | Natural numbers | Yes (no overflow) | No | /// | [`Nat`] | Natural numbers | Yes (no overflow) | No |
/// | `Word` | Word in flash | No | Yes | /// | [`Word`] | Word in flash | No | Yes |
/// | `Position` | Position in virtual storage | Yes (no overflow) | No | /// | `Position` | Position in virtual storage | Yes (no overflow) | No |
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Position(Nat); pub struct Position(Nat);
@@ -756,9 +755,9 @@ impl Position {
/// Create a word position given its coordinates. /// Create a word position given its coordinates.
/// ///
/// The coordinates of a word are: /// The coordinates of a word are:
/// - Its word index in its page. /// - Its word index in its virtual page.
/// - Its page index in the storage. /// - Its page index in the storage.
/// - The number of times that page was erased. /// - The number of times its page was erased.
pub fn new(format: &Format, cycle: Nat, page: Nat, word: Nat) -> Position { pub fn new(format: &Format, cycle: Nat, page: Nat, word: Nat) -> Position {
Position((cycle * format.num_pages() + page) * format.virt_page_size() + word) Position((cycle * format.num_pages() + page) * format.virt_page_size() + word)
} }
@@ -921,11 +920,11 @@ pub fn is_erased(slice: &[u8]) -> bool {
/// Divides then takes ceiling. /// Divides then takes ceiling.
/// ///
/// Returns `ceil(x / m)` in mathematical notations (not Rust code). /// Returns ⌈x / m⌉, i.e. the lowest natural number r such that r ≥ x / m.
/// ///
/// # Preconditions /// # Preconditions
/// ///
/// - `x + m` does not overflow. /// - x + m does not overflow.
const fn div_ceil(x: Nat, m: Nat) -> Nat { const fn div_ceil(x: Nat, m: Nat) -> Nat {
(x + m - 1) / m (x + m - 1) / m
} }
@@ -1077,4 +1076,15 @@ mod tests {
0xff800000 0xff800000
); );
} }
#[test]
fn position_offsets_fit_in_a_halfword() {
// The store stores in RAM the entry positions as their offset from the head. Those offsets
// are represented as u16. The bound below is a large over-approximation of the maximal
// offset. We first make sure it fits in a u16.
const MAX_POS: Nat = (MAX_PAGE_INDEX + 1) * MAX_VIRT_PAGE_SIZE;
assert!(MAX_POS <= u16::MAX as Nat);
// We also check the actual value for up-to-date documentation, since it's a constant.
assert_eq!(MAX_POS, 0xff80);
}
} }

View File

@@ -42,15 +42,20 @@ impl Field {
/// Sets the value of a bit field. /// Sets the value of a bit field.
/// ///
/// # Preconditions /// # Errors
/// ///
/// - The value must fit in the bit field: `num_bits(value) < self.len`. /// - The value must fit in the bit field: `num_bits(value) < self.len`.
/// - The value must only change bits from 1 to 0: `self.get(*word) & value == value`. /// - The value must only change bits from 1 to 0: `self.get(*word) & value == value`.
pub fn set(&self, word: &mut Word, value: Nat) { pub fn set(&self, word: &mut Word, value: Nat) -> StoreResult<()> {
debug_assert_eq!(value & self.mask(), value); if value & self.mask() != value {
return Err(StoreError::InvalidStorage);
}
let mask = !(self.mask() << self.pos); let mask = !(self.mask() << self.pos);
word.0 &= mask | (value << self.pos); word.0 &= mask | (value << self.pos);
debug_assert_eq!(self.get(*word), value); if self.get(*word) != value {
return Err(StoreError::InvalidStorage);
}
Ok(())
} }
/// Returns a bit mask the length of the bit field. /// Returns a bit mask the length of the bit field.
@@ -82,8 +87,8 @@ impl ConstField {
} }
/// Sets the bit field to its value. /// Sets the bit field to its value.
pub fn set(&self, word: &mut Word) { pub fn set(&self, word: &mut Word) -> StoreResult<()> {
self.field.set(word, self.value); self.field.set(word, self.value)
} }
} }
@@ -135,15 +140,15 @@ impl Checksum {
/// Sets the checksum to the external increment value. /// Sets the checksum to the external increment value.
/// ///
/// # Preconditions /// # Errors
/// ///
/// - The bits of the checksum bit field should be set to one: `self.field.get(*word) == /// - The bits of the checksum bit field should be set to one: `self.field.get(*word) ==
/// self.field.mask()`. /// self.field.mask()`.
/// - The checksum value should fit in the checksum bit field: `num_bits(word.count_zeros() + /// - The checksum value should fit in the checksum bit field: `num_bits(word.count_zeros() +
/// value) < self.field.len`. /// value) < self.field.len`.
pub fn set(&self, word: &mut Word, value: Nat) { pub fn set(&self, word: &mut Word, value: Nat) -> StoreResult<()> {
debug_assert_eq!(self.field.get(*word), self.field.mask()); debug_assert_eq!(self.field.get(*word), self.field.mask());
self.field.set(word, word.0.count_zeros() + value); self.field.set(word, word.0.count_zeros() + value)
} }
} }
@@ -290,7 +295,7 @@ mod tests {
assert_eq!(field.get(Word(0x000000f8)), 0x1f); assert_eq!(field.get(Word(0x000000f8)), 0x1f);
assert_eq!(field.get(Word(0x0000ff37)), 6); assert_eq!(field.get(Word(0x0000ff37)), 6);
let mut word = Word(0xffffffff); let mut word = Word(0xffffffff);
field.set(&mut word, 3); field.set(&mut word, 3).unwrap();
assert_eq!(word, Word(0xffffff1f)); assert_eq!(word, Word(0xffffff1f));
} }
@@ -305,7 +310,7 @@ mod tests {
assert!(field.check(Word(0x00000048))); assert!(field.check(Word(0x00000048)));
assert!(field.check(Word(0x0000ff4f))); assert!(field.check(Word(0x0000ff4f)));
let mut word = Word(0xffffffff); let mut word = Word(0xffffffff);
field.set(&mut word); field.set(&mut word).unwrap();
assert_eq!(word, Word(0xffffff4f)); assert_eq!(word, Word(0xffffff4f));
} }
@@ -333,7 +338,7 @@ mod tests {
assert_eq!(field.get(Word(0x00ffff67)), Ok(4)); assert_eq!(field.get(Word(0x00ffff67)), Ok(4));
assert_eq!(field.get(Word(0x7fffff07)), Err(StoreError::InvalidStorage)); assert_eq!(field.get(Word(0x7fffff07)), Err(StoreError::InvalidStorage));
let mut word = Word(0x0fffffff); let mut word = Word(0x0fffffff);
field.set(&mut word, 4); field.set(&mut word, 4).unwrap();
assert_eq!(word, Word(0x0fffff47)); assert_eq!(word, Word(0x0fffff47));
} }

View File

@@ -0,0 +1,345 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Support for fragmented entries.
//!
//! This module permits to handle entries larger than the [maximum value
//! length](Store::max_value_length) by storing ordered consecutive fragments in a sequence of keys.
//! The first keys hold fragments of maximal length, followed by a possibly partial fragment. The
//! remaining keys are not used.
use crate::{Storage, Store, StoreError, StoreHandle, StoreResult, StoreUpdate};
use alloc::vec::Vec;
use core::ops::Range;
/// Represents a sequence of keys.
#[allow(clippy::len_without_is_empty)]
pub trait Keys {
/// Returns the number of keys.
fn len(&self) -> usize;
/// Returns the position of a key in the sequence.
fn pos(&self, key: usize) -> Option<usize>;
/// Returns the key of a position in the sequence.
///
/// # Preconditions
///
/// The position must be within the length: `pos` < [`Self::len`].
fn key(&self, pos: usize) -> usize;
}
impl Keys for Range<usize> {
fn len(&self) -> usize {
self.end - self.start
}
fn pos(&self, key: usize) -> Option<usize> {
if self.start <= key && key < self.end {
Some(key - self.start)
} else {
None
}
}
fn key(&self, pos: usize) -> usize {
debug_assert!(pos < Keys::len(self));
self.start + pos
}
}
/// Reads the concatenated value of a sequence of keys.
pub fn read(store: &Store<impl Storage>, keys: &impl Keys) -> StoreResult<Option<Vec<u8>>> {
let handles = get_handles(store, keys)?;
if handles.is_empty() {
return Ok(None);
}
let mut result = Vec::with_capacity(handles.len() * store.max_value_length());
for handle in handles {
result.extend(handle.get_value(store)?);
}
Ok(Some(result))
}
/// Reads a range from the concatenated value of a sequence of keys.
///
/// This is equivalent to calling [`read`] then taking the range except that:
/// - Only the needed chunks are read.
/// - The range is truncated to fit in the value.
pub fn read_range(
store: &Store<impl Storage>,
keys: &impl Keys,
range: Range<usize>,
) -> StoreResult<Option<Vec<u8>>> {
let range_len = match range.end.checked_sub(range.start) {
None => return Err(StoreError::InvalidArgument),
Some(x) => x,
};
let handles = get_handles(store, keys)?;
if handles.is_empty() {
return Ok(None);
}
let mut result = Vec::with_capacity(range_len);
let mut offset = 0;
for handle in handles {
let start = range.start.saturating_sub(offset);
let length = handle.get_length(store)?;
let end = core::cmp::min(range.end.saturating_sub(offset), length);
offset += length;
if start < end {
result.extend(&handle.get_value(store)?[start..end]);
}
}
Ok(Some(result))
}
/// Writes a value to a sequence of keys as chunks.
pub fn write(store: &mut Store<impl Storage>, keys: &impl Keys, value: &[u8]) -> StoreResult<()> {
let handles = get_handles(store, keys)?;
let keys_len = keys.len();
let mut updates = Vec::with_capacity(keys_len);
let mut chunks = value.chunks(store.max_value_length());
for pos in 0..keys_len {
let key = keys.key(pos);
match (handles.get(pos), chunks.next()) {
// No existing handle and no new chunk: nothing to do.
(None, None) => (),
// Existing handle and no new chunk: remove old handle.
(Some(_), None) => updates.push(StoreUpdate::Remove { key }),
// Existing handle with same value as new chunk: nothing to do.
(Some(handle), Some(value)) if handle.get_value(store)? == value => (),
// New chunk: Write (or overwrite) the new value.
(_, Some(value)) => updates.push(StoreUpdate::Insert { key, value }),
}
}
if chunks.next().is_some() {
// The value is too long.
return Err(StoreError::InvalidArgument);
}
store.transaction(&updates)
}
/// Deletes the value of a sequence of keys.
pub fn delete(store: &mut Store<impl Storage>, keys: &impl Keys) -> StoreResult<()> {
let updates: Vec<StoreUpdate<Vec<u8>>> = get_handles(store, keys)?
.iter()
.map(|handle| StoreUpdate::Remove {
key: handle.get_key(),
})
.collect();
store.transaction(&updates)
}
/// Returns the handles of a sequence of keys.
///
/// The handles are truncated to the keys that are present.
fn get_handles(store: &Store<impl Storage>, keys: &impl Keys) -> StoreResult<Vec<StoreHandle>> {
let keys_len = keys.len();
let mut handles: Vec<Option<StoreHandle>> = vec![None; keys_len as usize];
for handle in store.iter()? {
let handle = handle?;
let pos = match keys.pos(handle.get_key()) {
Some(pos) => pos,
None => continue,
};
if pos >= keys_len {
return Err(StoreError::InvalidArgument);
}
if let Some(old_handle) = &handles[pos] {
if old_handle.get_key() != handle.get_key() {
// The user provided a non-injective `pos` function.
return Err(StoreError::InvalidArgument);
} else {
return Err(StoreError::InvalidStorage);
}
}
handles[pos] = Some(handle);
}
let num_handles = handles.iter().filter(|x| x.is_some()).count();
let mut result = Vec::with_capacity(num_handles);
for (i, handle) in handles.into_iter().enumerate() {
match (i < num_handles, handle) {
(true, Some(handle)) => result.push(handle),
(false, None) => (),
// We should have `num_handles` Somes followed by Nones.
_ => return Err(StoreError::InvalidStorage),
}
}
Ok(result)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::MINIMAL;
#[test]
fn read_empty_entry() {
let store = MINIMAL.new_store();
assert_eq!(read(&store, &(0..4)), Ok(None));
}
#[test]
fn read_single_chunk() {
let mut store = MINIMAL.new_store();
let value = b"hello".to_vec();
assert_eq!(store.insert(0, &value), Ok(()));
assert_eq!(read(&store, &(0..4)), Ok(Some(value)));
}
#[test]
fn read_multiple_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(read(&store, &(0..4)), Ok(Some(value)));
}
#[test]
fn read_range_first_chunk() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(
read_range(&store, &(0..4), 0..10),
Ok(Some((0..10).collect()))
);
assert_eq!(
read_range(&store, &(0..4), 10..20),
Ok(Some((10..20).collect()))
);
assert_eq!(
read_range(&store, &(0..4), 40..52),
Ok(Some((40..52).collect()))
);
}
#[test]
fn read_range_second_chunk() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(read_range(&store, &(0..4), 52..53), Ok(Some(vec![52])));
assert_eq!(read_range(&store, &(0..4), 53..54), Ok(Some(vec![53])));
assert_eq!(read_range(&store, &(0..4), 59..60), Ok(Some(vec![59])));
}
#[test]
fn read_range_both_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(
read_range(&store, &(0..4), 40..60),
Ok(Some((40..60).collect()))
);
assert_eq!(
read_range(&store, &(0..4), 0..60),
Ok(Some((0..60).collect()))
);
}
#[test]
fn read_range_outside() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(
read_range(&store, &(0..4), 40..100),
Ok(Some((40..60).collect()))
);
assert_eq!(read_range(&store, &(0..4), 60..100), Ok(Some(vec![])));
}
#[test]
fn write_single_chunk() {
let mut store = MINIMAL.new_store();
let value = b"hello".to_vec();
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
assert_eq!(store.find(0), Ok(Some(value)));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn write_multiple_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
assert_eq!(store.find(0), Ok(Some((0..52).collect())));
assert_eq!(store.find(1), Ok(Some((52..60).collect())));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn overwrite_less_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
let value: Vec<_> = (42..69).collect();
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
assert_eq!(store.find(0), Ok(Some((42..69).collect())));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn overwrite_needed_chunks() {
let mut store = MINIMAL.new_store();
let mut value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
// Current lifetime is 2 words of overhead (2 insert) and 60 bytes of data.
let mut lifetime = 2 + 60 / 4;
assert_eq!(store.lifetime().unwrap().used(), lifetime);
// Update the value.
value.extend(60..80);
assert_eq!(write(&mut store, &(0..4), &value), Ok(()));
// Added lifetime is 1 word of overhead (1 insert) and (80 - 52) bytes of data.
lifetime += 1 + (80 - 52) / 4;
assert_eq!(store.lifetime().unwrap().used(), lifetime);
}
#[test]
fn delete_empty() {
let mut store = MINIMAL.new_store();
assert_eq!(delete(&mut store, &(0..4)), Ok(()));
assert_eq!(store.find(0), Ok(None));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
#[test]
fn delete_chunks() {
let mut store = MINIMAL.new_store();
let value: Vec<_> = (0..60).collect();
assert_eq!(store.insert(0, &value[..52]), Ok(()));
assert_eq!(store.insert(1, &value[52..]), Ok(()));
assert_eq!(delete(&mut store, &(0..4)), Ok(()));
assert_eq!(store.find(0), Ok(None));
assert_eq!(store.find(1), Ok(None));
assert_eq!(store.find(2), Ok(None));
assert_eq!(store.find(3), Ok(None));
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2019-2020 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,191 +12,191 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// TODO(ia0): Add links once the code is complete. // The documentation is easier to read from a browser:
// - Run: cargo doc --document-private-items --features=std
// - Open: target/doc/persistent_store/index.html
//! Store abstraction for flash storage //! Store abstraction for flash storage
//! //!
//! # Specification //! # Specification
//! //!
//! The store provides a partial function from keys to values on top of a storage //! The [store](Store) provides a partial function from keys to values on top of a
//! interface. The store total capacity depends on the size of the storage. Store //! [storage](Storage) interface. The store total [capacity](Store::capacity) depends on the size of
//! updates may be bundled in transactions. Mutable operations are atomic, including //! the storage. Store [updates](StoreUpdate) may be bundled in [transactions](Store::transaction).
//! when interrupted. //! Mutable operations are atomic, including when interrupted.
//! //!
//! The store is flash-efficient in the sense that it uses the storage lifetime //! The store is flash-efficient in the sense that it uses the storage [lifetime](Store::lifetime)
//! efficiently. For each page, all words are written at least once between erase //! efficiently. For each page, all words are written at least once between erase cycles and all
//! cycles and all erase cycles are used. However, not all written words are user //! erase cycles are used. However, not all written words are user content: Lifetime is also
//! content: lifetime is also consumed with metadata and compaction. //! consumed with metadata and compaction.
//! //!
//! The store is extendable with other entries than key-values. It is essentially a //! The store is extendable with other entries than key-values. It is essentially a framework
//! framework providing access to the storage lifetime. The partial function is //! providing access to the storage lifetime. The partial function is simply the most common usage
//! simply the most common usage and can be used to encode other usages. //! and can be used to encode other usages.
//! //!
//! ## Definitions //! ## Definitions
//! //!
//! An _entry_ is a pair of a key and a value. A _key_ is a number between 0 //! An _entry_ is a pair of a key and a value. A _key_ is a number between 0 and
//! and 4095. A _value_ is a byte slice with a length between 0 and 1023 bytes (for //! [4095](format::MAX_KEY_INDEX). A _value_ is a byte slice with a length between 0 and
//! large enough pages). //! [1023](format::Format::max_value_len) bytes (for large enough pages).
//! //!
//! The store provides the following _updates_: //! The store provides the following _updates_:
//! - Given a key and a value, `Insert` updates the store such that the value is //! - Given a key and a value, [`StoreUpdate::Insert`] updates the store such that the value is
//! associated with the key. The values for other keys are left unchanged. //! associated with the key. The values for other keys are left unchanged.
//! - Given a key, `Remove` updates the store such that no value is associated with //! - Given a key, [`StoreUpdate::Remove`] updates the store such that no value is associated with
//! the key. The values for other keys are left unchanged. Additionally, if there //! the key. The values for other keys are left unchanged. Additionally, if there was a value
//! was a value associated with the key, the value is wiped from the storage //! associated with the key, the value is wiped from the storage (all its bits are set to 0).
//! (all its bits are set to 0).
//! //!
//! The store provides the following _read-only operations_: //! The store provides the following _read-only operations_:
//! - `Iter` iterates through the store returning all entries exactly once. The //! - [`Store::iter`] iterates through the store returning all entries exactly once. The iteration
//! iteration order is not specified but stable between mutable operations. //! order is not specified but stable between mutable operations.
//! - `Capacity` returns how many words can be stored before the store is full. //! - [`Store::capacity`] returns how many words can be stored before the store is full.
//! - `Lifetime` returns how many words can be written before the storage lifetime //! - [`Store::lifetime`] returns how many words can be written before the storage lifetime is
//! is consumed. //! consumed.
//! //!
//! The store provides the following _mutable operations_: //! The store provides the following _mutable operations_:
//! - Given a set of independent updates, `Transaction` applies the sequence of //! - Given a set of independent updates, [`Store::transaction`] applies the sequence of updates.
//! updates. //! - Given a threshold, [`Store::clear`] removes all entries with a key greater or equal to the
//! - Given a threshold, `Clear` removes all entries with a key greater or equal //! threshold.
//! to the threshold. //! - Given a length in words, [`Store::prepare`] makes one step of compaction unless that many
//! - Given a length in words, `Prepare` makes one step of compaction unless that //! words can be written without compaction. This operation has no effect on the store but may
//! many words can be written without compaction. This operation has no effect //! still mutate its storage. In particular, the store has the same capacity but a possibly
//! on the store but may still mutate its storage. In particular, the store has //! reduced lifetime.
//! the same capacity but a possibly reduced lifetime.
//! //!
//! A mutable operation is _atomic_ if, when power is lost during the operation, the //! A mutable operation is _atomic_ if, when power is lost during the operation, the store is either
//! store is either updated (as if the operation succeeded) or left unchanged (as if //! updated (as if the operation succeeded) or left unchanged (as if the operation did not occur).
//! the operation did not occur). If the store is left unchanged, lifetime may still //! If the store is left unchanged, lifetime may still be consumed.
//! be consumed.
//! //!
//! The store relies on the following _storage interface_: //! The store relies on the following _storage interface_:
//! - It is possible to read a byte slice. The slice won't span multiple pages. //! - It is possible to [read](Storage::read_slice) a byte slice. The slice won't span multiple
//! - It is possible to write a word slice. The slice won't span multiple pages. //! pages.
//! - It is possible to erase a page. //! - It is possible to [write](Storage::write_slice) a word slice. The slice won't span multiple
//! - The pages are sequentially indexed from 0. If the actual underlying storage //! pages.
//! is segmented, then the storage layer should translate those indices to //! - It is possible to [erase](Storage::erase_page) a page.
//! actual page addresses. //! - The pages are sequentially indexed from 0. If the actual underlying storage is segmented,
//! then the storage layer should translate those indices to actual page addresses.
//! //!
//! The store has a _total capacity_ of `C = (N - 1) * (P - 4) - M - 1` words, where //! The store has a _total capacity_ of C = (N - 1) × (P - 4) - M - 1 words, where:
//! `P` is the number of words per page, `N` is the number of pages, and `M` is the //! - P is the number of words per page
//! maximum length in words of a value (256 for large enough pages). The capacity //! - [N](format::Format::num_pages) is the number of pages
//! used by each mutable operation is given below (a transient word only uses //! - [M](format::Format::max_prefix_len) is the maximum length in words of a value (256 for large
//! capacity during the operation): //! enough pages)
//! - `Insert` uses `1 + ceil(len / 4)` words where `len` is the length of the
//! value in bytes. If an entry was replaced, the words used by its insertion
//! are freed.
//! - `Remove` doesn't use capacity if alone in the transaction and 1 transient
//! word otherwise. If an entry was deleted, the words used by its insertion are
//! freed.
//! - `Transaction` uses 1 transient word. In addition, the updates of the
//! transaction use and free words as described above.
//! - `Clear` doesn't use capacity and frees the words used by the insertion of
//! the deleted entries.
//! - `Prepare` doesn't use capacity.
//! //!
//! The _total lifetime_ of the store is below `L = ((E + 1) * N - 1) * (P - 2)` and //! The capacity used by each mutable operation is given below (a transient word only uses capacity
//! above `L - M` words, where `E` is the maximum number of erase cycles. The //! during the operation):
//! lifetime is used when capacity is used, including transiently, as well as when
//! compaction occurs. Compaction frequency and lifetime consumption are positively
//! correlated to the store load factor (the ratio of used capacity to total capacity).
//! //!
//! It is possible to approximate the cost of transient words in terms of capacity: //! | Operation/Update | Used capacity | Freed capacity | Transient capacity |
//! `L` transient words are equivalent to `C - x` words of capacity where `x` is the //! | ----------------------- | ---------------- | ----------------- | ------------------ |
//! average capacity (including transient) of operations. //! | [`StoreUpdate::Insert`] | 1 + value length | overwritten entry | 0 |
//! | [`StoreUpdate::Remove`] | 0 | deleted entry | see below\* |
//! | [`Store::transaction`] | 0 + updates | 0 + updates | 1 |
//! | [`Store::clear`] | 0 | deleted entries | 0 |
//! | [`Store::prepare`] | 0 | 0 | 0 |
//!
//! \*0 if the update is alone in the transaction, otherwise 1.
//!
//! The _total lifetime_ of the store is below L = ((E + 1) × N - 1) × (P - 2) and above L - M
//! words, where E is the maximum number of erase cycles. The lifetime is used when capacity is
//! used, including transiently, as well as when compaction occurs. Compaction frequency and
//! lifetime consumption are positively correlated to the store load factor (the ratio of used
//! capacity to total capacity).
//!
//! It is possible to approximate the cost of transient words in terms of capacity: L transient
//! words are equivalent to C - x words of capacity where x is the average capacity (including
//! transient) of operations.
//! //!
//! ## Preconditions //! ## Preconditions
//! //!
//! The following assumptions need to hold, or the store may behave in unexpected ways: //! The following assumptions need to hold, or the store may behave in unexpected ways:
//! - A word can be written twice between erase cycles. //! - A word can be written [twice](Storage::max_word_writes) between erase cycles.
//! - A page can be erased `E` times after the first boot of the store. //! - A page can be erased [E](Storage::max_page_erases) times after the first boot of the store.
//! - When power is lost while writing a slice or erasing a page, the next read //! - When power is lost while writing a slice or erasing a page, the next read returns a slice
//! returns a slice where a subset (possibly none or all) of the bits that //! where a subset (possibly none or all) of the bits that should have been modified have been
//! should have been modified have been modified. //! modified.
//! - Reading a slice is deterministic. When power is lost while writing a slice //! - Reading a slice is deterministic. When power is lost while writing a slice or erasing a
//! or erasing a slice (erasing a page containing that slice), reading that //! slice (erasing a page containing that slice), reading that slice repeatedly returns the same
//! slice repeatedly returns the same result (until it is overwritten or its //! result (until it is overwritten or its page is erased).
//! page is erased). //! - To decide whether a page has been erased, it is enough to test if all its bits are equal
//! - To decide whether a page has been erased, it is enough to test if all its //! to 1.
//! bits are equal to 1. //! - When power is lost while writing a slice or erasing a page, that operation does not count
//! - When power is lost while writing a slice or erasing a page, that operation //! towards the limits. However, completing that write or erase operation would count towards
//! does not count towards the limits. However, completing that write or erase //! the limits, as if the number of writes per word and number of erase cycles could be
//! operation would count towards the limits, as if the number of writes per //! fractional.
//! word and number of erase cycles could be fractional. //! - The storage is only modified by the store. Note that completely erasing the storage is
//! - The storage is only modified by the store. Note that completely erasing the //! supported, essentially losing all content and lifetime tracking. It is preferred to use
//! storage is supported, essentially losing all content and lifetime tracking. //! [`Store::clear`] with a threshold of 0 to keep the lifetime tracking.
//! It is preferred to use `Clear` with a threshold of 0 to keep the lifetime
//! tracking.
//! //!
//! The store properties may still hold outside some of those assumptions, but with //! The store properties may still hold outside some of those assumptions, but with an increasing
//! an increasing chance of failure. //! chance of failure.
//! //!
//! # Implementation //! # Implementation
//! //!
//! We define the following constants: //! We define the following constants:
//! - `E < 65536` the number of times a page can be erased. //! - [E](format::Format::max_page_erases) ≤ [65535](format::MAX_ERASE_CYCLE) the number of times
//! - `3 <= N < 64` the number of pages in the storage. //! a page can be erased.
//! - `8 <= P <= 1024` the number of words in a page. //! - 3 ≤ [N](format::Format::num_pages) < 64 the number of pages in the storage.
//! - `Q = P - 2` the number of words in a virtual page. //! - 8 ≤ P ≤ 1024 the number of words in a page.
//! - `K = 4096` the maximum number of keys. //! - [Q](format::Format::virt_page_size) = P - 2 the number of words in a virtual page.
//! - `M = min(Q - 1, 256)` the maximum length in words of a value. //! - [M](format::Format::max_prefix_len) = min(Q - 1, 256) the maximum length in words of a
//! - `V = (N - 1) * (Q - 1) - M` the virtual capacity. //! value.
//! - `C = V - N` the user capacity. //! - [V](format::Format::virt_size) = (N - 1) × (Q - 1) - M the virtual capacity.
//! - [C](format::Format::total_capacity) = V - N the user capacity.
//! //!
//! We build a virtual storage from the physical storage using the first 2 words of //! We build a virtual storage from the physical storage using the first 2 words of each page:
//! each page:
//! - The first word contains the number of times the page has been erased. //! - The first word contains the number of times the page has been erased.
//! - The second word contains the starting word to which this page is being moved //! - The second word contains the starting word to which this page is being moved during
//! during compaction. //! compaction.
//! //!
//! The virtual storage has a length of `(E + 1) * N * Q` words and represents the //! The virtual storage has a length of (E + 1) × N × Q words and represents the lifetime of the
//! lifetime of the store. (We reserve the last `Q + M` words to support adding //! store. (We reserve the last Q + M words to support adding emergency lifetime.) This virtual
//! emergency lifetime.) This virtual storage has a linear address space. //! storage has a linear address space.
//! //!
//! We define a set of overlapping windows of `N * Q` words at each `Q`-aligned //! We define a set of overlapping windows of N × Q words at each Q-aligned boundary. We call i the
//! boundary. We call `i` the window spanning from `i * Q` to `(i + N) * Q`. Only //! window spanning from i × Q to (i + N) × Q. Only those windows actually exist in the underlying
//! those windows actually exist in the underlying storage. We use compaction to //! storage. We use compaction to shift the current window from i to i + 1, preserving the content
//! shift the current window from `i` to `i + 1`, preserving the content of the //! of the store.
//! store.
//! //!
//! For a given state of the virtual storage, we define `h_i` as the position of the //! For a given state of the virtual storage, we define h\_i as the position of the first entry of
//! first entry of the window `i`. We call it the head of the window `i`. Because //! the window i. We call it the head of the window i. Because entries are at most M + 1 words, they
//! entries are at most `M + 1` words, they can overlap on the next page only by `M` //! can overlap on the next page only by M words. So we have i × Q ≤ h_i ≤ i × Q + M . Since there
//! words. So we have `i * Q <= h_i <= i * Q + M` . Since there are no entries //! are no entries before the first page, we have h\_0 = 0.
//! before the first page, we have `h_0 = 0`.
//! //!
//! We define `t_i` as one past the last entry of the window `i`. If there are no //! We define t\_i as one past the last entry of the window i. If there are no entries in that
//! entries in that window, we have `t_i = h_i`. We call `t_i` the tail of the //! window, we have t\_i = h\_i. We call t\_i the tail of the window i. We define the compaction
//! window `i`. We define the compaction invariant as `t_i - h_i <= V`. //! invariant as t\_i - h\_i V.
//! //!
//! We define `|x|` as the capacity used before position `x`. We have `|x| <= x`. We //! We define |x| as the capacity used before position x. We have |x| x. We define the capacity
//! define the capacity invariant as `|t_i| - |h_i| <= C`. //! invariant as |t\_i| - |h\_i| C.
//! //!
//! Using this virtual storage, entries are appended to the tail as long as there is //! Using this virtual storage, entries are appended to the tail as long as there is both virtual
//! both virtual capacity to preserve the compaction invariant and capacity to //! capacity to preserve the compaction invariant and capacity to preserve the capacity invariant.
//! preserve the capacity invariant. When virtual capacity runs out, the first page //! When virtual capacity runs out, the first page of the window is compacted and the window is
//! of the window is compacted and the window is shifted. //! shifted.
//! //!
//! Entries are identified by a prefix of bits. The prefix has to contain at least //! Entries are identified by a prefix of bits. The prefix has to contain at least one bit set to
//! one bit set to zero to differentiate from the tail. Entries can be one of: //! zero to differentiate from the tail. Entries can be one of:
//! - Padding: A word whose first bit is set to zero. The rest is arbitrary. This //! - [Padding](format::ID_PADDING): A word whose first bit is set to zero. The rest is arbitrary.
//! entry is used to mark words partially written after an interrupted operation //! This entry is used to mark words partially written after an interrupted operation as padding
//! as padding such that they are ignored by future operations. //! such that they are ignored by future operations.
//! - Header: A word whose second bit is set to zero. It contains the following fields: //! - [Header](format::ID_HEADER): A word whose second bit is set to zero. It contains the
//! - A bit indicating whether the entry is deleted. //! following fields:
//! - A bit indicating whether the value is word-aligned and has all bits set //! - A [bit](format::HEADER_DELETED) indicating whether the entry is deleted.
//! to 1 in its last word. The last word of an entry is used to detect that //! - A [bit](format::HEADER_FLIPPED) indicating whether the value is word-aligned and has all
//! an entry has been fully written. As such it must contain at least one //! bits set to 1 in its last word. The last word of an entry is used to detect that an
//! bit equal to zero. //! entry has been fully written. As such it must contain at least one bit equal to zero.
//! - The key of the entry. //! - The [key](format::HEADER_KEY) of the entry.
//! - The length in bytes of the value. The value follows the header. The //! - The [length](format::HEADER_LENGTH) in bytes of the value. The value follows the header.
//! entry is word-aligned if the value is not. //! The entry is word-aligned if the value is not.
//! - The checksum of the first and last word of the entry. //! - The [checksum](format::HEADER_CHECKSUM) of the first and last word of the entry.
//! - Erase: A word used during compaction. It contains the page to be erased and //! - [Erase](format::ID_ERASE): A word used during compaction. It contains the
//! a checksum. //! [page](format::ERASE_PAGE) to be erased and a [checksum](format::WORD_CHECKSUM).
//! - Clear: A word used during the `Clear` operation. It contains the threshold //! - [Clear](format::ID_CLEAR): A word used during the clear operation. It contains the
//! and a checksum. //! [threshold](format::CLEAR_MIN_KEY) and a [checksum](format::WORD_CHECKSUM).
//! - Marker: A word used during the `Transaction` operation. It contains the //! - [Marker](format::ID_MARKER): A word used during a transaction. It contains the [number of
//! number of updates following the marker and a checksum. //! updates](format::MARKER_COUNT) following the marker and a [checksum](format::WORD_CHECKSUM).
//! - Remove: A word used during the `Transaction` operation. It contains the key //! - [Remove](format::ID_REMOVE): A word used inside a transaction. It contains the
//! of the entry to be removed and a checksum. //! [key](format::REMOVE_KEY) of the entry to be removed and a
//! [checksum](format::WORD_CHECKSUM).
//! //!
//! Checksums are the number of bits equal to 0. //! Checksums are the number of bits equal to 0.
//! //!
@@ -204,107 +204,105 @@
//! //!
//! ## Compaction //! ## Compaction
//! //!
//! It should always be possible to fully compact the store, after what the //! It should always be possible to fully compact the store, after what the remaining capacity
//! remaining capacity should be available in the current window (restoring the //! should be available in the current window (restoring the compaction invariant). We consider all
//! compaction invariant). We consider all notations on the virtual storage after //! notations on the virtual storage after the full compaction. We will use the |x| notation
//! the full compaction. We will use the `|x|` notation although we update the state //! although we update the state of the virtual storage. This is fine because compaction doesn't
//! of the virtual storage. This is fine because compaction doesn't change the //! change the status of an existing word.
//! status of an existing word.
//! //!
//! We want to show that the next `N - 1` compactions won't move the tail past the //! We want to show that the next N - 1 compactions won't move the tail past the last page of their
//! last page of their window, with `I` the initial window: //! window, with I the initial window:
//! //!
//! ```text //! | | | | |
//! forall 1 <= i <= N - 1, t_{I + i} <= (I + i + N - 1) * Q //! | ----------------:| ----------:|:-:|:------------------- |
//! ``` //! | ∀(1 ≤ i ≤ N - 1) | t\_{I + i} | ≤ | (I + i + N - 1) × Q |
//! //!
//! We assume `i` between `1` and `N - 1`. //! We assume i between 1 and N - 1.
//! //!
//! One step of compaction advances the tail by how many words were used in the //! One step of compaction advances the tail by how many words were used in the first page of the
//! first page of the window with the last entry possibly overlapping on the next //! window with the last entry possibly overlapping on the next page.
//! page.
//! //!
//! ```text //! | | | | |
//! forall j, t_{j + 1} = t_j + |h_{j + 1}| - |h_j| + 1 //! | --:| ----------:|:-:|:------------------------------------ |
//! ``` //! | ∀j | t\_{j + 1} | = | t\_j + \|h\_{j + 1}\| - \|h\_j\| + 1 |
//! //!
//! By induction, we have: //! By induction, we have:
//! //!
//! ```text //! | | | |
//! t_{I + i} <= t_I + |h_{I + i}| - |h_I| + i //! | ----------:|:-:|:------------------------------------ |
//! ``` //! | t\_{I + i} | ≤ | t\_I + \|h\_{I + i}\| - \|h\_I\| + i |
//! //!
//! We have the following properties: //! We have the following properties:
//! //!
//! ```text //! | | | |
//! t_I <= h_I + V //! | -------------------------:|:-:|:----------------- |
//! |h_{I + i}| - |h_I| <= h_{I + i} - h_I //! | t\_I | | h\_I + V |
//! h_{I + i} <= (I + i) * Q + M //! | \|h\_{I + i}\| - \|h\_I\| | ≤ | h\_{I + i} - h\_I |
//! ``` //! | h\_{I + i} | ≤ | (I + i) × Q + M |
//! //!
//! Replacing into our previous equality, we can conclude: //! Replacing into our previous equality, we can conclude:
//! //!
//! ```text //! | | | |
//! t_{I + i} = t_I + |h_{I + i}| - |h_I| + i //! | ----------:|:-:| ------------------------------------------- |
//! <= h_I + V + (I + i) * Q + M - h_I + i //! | t\_{I + i} | = | t_I + \|h_{I + i}\| - \|h_I\| + i |
//! = (N - 1) * (Q - 1) - M + (I + i) * Q + M + i //! | | ≤ | h\_I + V + (I + i) * Q + M - h\_I + i |
//! = (N - 1) * (Q - 1) + (I + i) * Q + i //! | | = | (N - 1) × (Q - 1) - M + (I + i) × Q + M + i |
//! = (I + i + N - 1) * Q + i - (N - 1) //! | | = | (N - 1) × (Q - 1) + (I + i) × Q + i |
//! <= (I + i + N - 1) * Q //! | | = | (I + i + N - 1) × Q + i - (N - 1) |
//! ``` //! | | ≤ | (I + i + N - 1) × Q |
//! //!
//! We also want to show that after `N - 1` compactions, the remaining capacity is //! We also want to show that after N - 1 compactions, the remaining capacity is available without
//! available without compaction. //! compaction.
//! //!
//! ```text //! | | | |
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= // The available words in the window. //! | -:| --------------------------------------------- | --------------------------------- |
//! C - (|t_{I + N - 1}| - |h_{I + N - 1}|) // The remaining capacity. //! | | V - (t\_{I + N - 1} - h\_{I + N - 1}) | The available words in the window |
//! + 1 // Reserved for Clear. //! | ≥ | C - (\|t\_{I + N - 1}\| - \|h\_{I + N - 1}\|) | The remaining capacity |
//! ``` //! | + | 1 | Reserved for clear |
//! //!
//! We can replace the definition of `C` and simplify: //! We can replace the definition of C and simplify:
//! //!
//! ```text //! | | | | |
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= V - N - (|t_{I + N - 1}| - |h_{I + N - 1}|) + 1 //! | ---:| -------------------------------------:|:-:|:----------------------------------------------------- |
//! iff t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1 //! | | V - (t\_{I + N - 1} - h\_{I + N - 1}) | ≥ | V - N - (\|t\_{I + N - 1}\| - \|h\_{I + N - 1}\|) + 1 |
//! ``` //! | iff | t\_{I + N - 1} - h\_{I + N - 1} | ≤ | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| + N - 1 |
//! //!
//! We have the following properties: //! We have the following properties:
//! //!
//! ```text //!
//! t_{I + N - 1} = t_I + |h_{I + N - 1}| - |h_I| + N - 1 //! | | | | |
//! |t_{I + N - 1}| - |h_{I + N - 1}| = |t_I| - |h_I| // Compaction preserves capacity. //! | ---------------------------------------:|:-:|:-------------------------------------------- |:------ |
//! |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I //! | t\_{I + N - 1} | = | t\_I + \|h\_{I + N - 1}\| - \|h\_I\| + N - 1 | |
//! ``` //! | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| | = | \|t\_I\| - \|h\_I\| | Compaction preserves capacity |
//! | \|h\_{I + N - 1}\| - \|t\_I\| | ≤ | h\_{I + N - 1} - t\_I | |
//! //!
//! From which we conclude: //! From which we conclude:
//! //!
//! ```text //! | | | | |
//! t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1 //! | ---:| -------------------------------:|:-:|:----------------------------------------------- |
//! iff t_I + |h_{I + N - 1}| - |h_I| + N - 1 - h_{I + N - 1} <= |t_I| - |h_I| + N - 1 //! | | t\_{I + N - 1} - h\_{I + N - 1} | ≤ | \|t\_{I + N - 1}\| - \|h\_{I + N - 1}\| + N - 1 |
//! iff t_I + |h_{I + N - 1}| - h_{I + N - 1} <= |t_I| //! | iff | t\_I + \|h\_{I + N - 1}\| - \|h\_I\| + N - 1 - h\_{I + N - 1} | ≤ | \|t\_I\| - \|h\_I\| + N - 1 |
//! iff |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I //! | iff | t\_I + \|h\_{I + N - 1}\| - h\_{I + N - 1} | ≤ | \|t\_I\| |
//! ``` //! | iff | \|h\_{I + N - 1}\| - \|t\_I\| | ≤ | h\_{I + N - 1} - t\_I |
//! //!
//! //!
//! ## Checksum //! ## Checksum
//! //!
//! The main property we want is that all partially written/erased words are either //! The main property we want is that all partially written/erased words are either the initial
//! the initial word, the final word, or invalid. //! word, the final word, or invalid.
//! //!
//! We say that a bit sequence `TARGET` is reachable from a bit sequence `SOURCE` if //! We say that a bit sequence `TARGET` is reachable from a bit sequence `SOURCE` if both have the
//! both have the same length and `SOURCE & TARGET == TARGET` where `&` is the //! same length and `SOURCE & TARGET == TARGET` where `&` is the bitwise AND operation on bit
//! bitwise AND operation on bit sequences of that length. In other words, when //! sequences of that length. In other words, when `SOURCE` has a bit equal to 0 then `TARGET` also
//! `SOURCE` has a bit equal to 0 then `TARGET` also has that bit equal to 0. //! has that bit equal to 0.
//! //!
//! The only written entries start with `101` or `110` and are written from an //! The only written entries start with `101` or `110` and are written from an erased word. Marking
//! erased word. Marking an entry as padding or deleted is a single bit operation, //! an entry as padding or deleted is a single bit operation, so the property trivially holds. For
//! so the property trivially holds. For those cases, the proof relies on the fact //! those cases, the proof relies on the fact that there is exactly one bit equal to 0 in the 3
//! that there is exactly one bit equal to 0 in the 3 first bits. Either the 3 first //! first bits. Either the 3 first bits are still `111` in which case we expect the remaining bits
//! bits are still `111` in which case we expect the remaining bits to be equal //! to be equal to 1. Otherwise we can use the checksum of the given type of entry because those 2
//! to 1. Otherwise we can use the checksum of the given type of entry because those //! types of entries are not reachable from each other. Here is a visualization of the partitioning
//! 2 types of entries are not reachable from each other. Here is a visualization of //! based on the first 3 bits:
//! the partitioning based on the first 3 bits:
//! //!
//! | First 3 bits | Description | How to check | //! | First 3 bits | Description | How to check |
//! | ------------:| ------------------ | ---------------------------- | //! | ------------:| ------------------ | ---------------------------- |
@@ -314,36 +312,30 @@
//! | `100` | Deleted user entry | No check, atomically written | //! | `100` | Deleted user entry | No check, atomically written |
//! | `0??` | Padding entry | No check, atomically written | //! | `0??` | Padding entry | No check, atomically written |
//! //!
//! To show that valid entries of a given type are not reachable from each other, we //! To show that valid entries of a given type are not reachable from each other, we show 3 lemmas:
//! show 3 lemmas:
//! //!
//! 1. A bit sequence is not reachable from another if its number of bits equal to //! 1. A bit sequence is not reachable from another if its number of bits equal to 0 is smaller.
//! 0 is smaller. //! 2. A bit sequence is not reachable from another if they have the same number of bits equals to
//! 0 and are different.
//! 3. A bit sequence is not reachable from another if it is bigger when they are interpreted as
//! numbers in binary representation.
//! //!
//! 2. A bit sequence is not reachable from another if they have the same number of //! From those lemmas we consider the 2 cases. If both entries have the same number of bits equal to
//! bits equals to 0 and are different. //! 0, they are either equal or not reachable from each other because of the second lemma. If they
//! //! don't have the same number of bits equal to 0, then the one with less bits equal to 0 is not
//! 3. A bit sequence is not reachable from another if it is bigger when they are //! reachable from the other because of the first lemma and the one with more bits equal to 0 is not
//! interpreted as numbers in binary representation. //! reachable from the other because of the third lemma and the definition of the checksum.
//!
//! From those lemmas we consider the 2 cases. If both entries have the same number
//! of bits equal to 0, they are either equal or not reachable from each other
//! because of the second lemma. If they don't have the same number of bits equal to
//! 0, then the one with less bits equal to 0 is not reachable from the other
//! because of the first lemma and the one with more bits equal to 0 is not
//! reachable from the other because of the third lemma and the definition of the
//! checksum.
//! //!
//! # Fuzzing //! # Fuzzing
//! //!
//! For any sequence of operations and interruptions starting from an erased //! For any sequence of operations and interruptions starting from an erased storage, the store is
//! storage, the store is checked against its model and some internal invariant at //! checked against its model and some internal invariant at each step.
//! each step.
//! //!
//! For any sequence of operations and interruptions starting from an arbitrary //! For any sequence of operations and interruptions starting from an arbitrary storage, the store
//! storage, the store is checked not to crash. //! is checked not to crash.
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
#![feature(try_trait)]
#[macro_use] #[macro_use]
extern crate alloc; extern crate alloc;
@@ -353,10 +345,13 @@ mod buffer;
#[cfg(feature = "std")] #[cfg(feature = "std")]
mod driver; mod driver;
mod format; mod format;
pub mod fragment;
#[cfg(feature = "std")] #[cfg(feature = "std")]
mod model; mod model;
mod storage; mod storage;
mod store; mod store;
#[cfg(test)]
mod test;
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use self::buffer::{BufferCorruptFunction, BufferOptions, BufferStorage}; pub use self::buffer::{BufferCorruptFunction, BufferOptions, BufferStorage};

View File

@@ -12,13 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Store specification.
use crate::format::Format; use crate::format::Format;
use crate::{usize_to_nat, StoreError, StoreRatio, StoreResult, StoreUpdate}; use crate::{usize_to_nat, StoreError, StoreRatio, StoreResult, StoreUpdate};
use std::collections::HashMap; use std::collections::HashMap;
/// Models the mutable operations of a store. /// Models the mutable operations of a store.
/// ///
/// The model doesn't model the storage and read-only operations. This is done by the driver. /// The model doesn't model the storage and read-only operations. This is done by the
/// [driver](crate::StoreDriver).
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct StoreModel { pub struct StoreModel {
/// Represents the content of the store. /// Represents the content of the store.
@@ -34,7 +37,7 @@ pub enum StoreOperation {
/// Applies a transaction. /// Applies a transaction.
Transaction { Transaction {
/// The list of updates to be applied. /// The list of updates to be applied.
updates: Vec<StoreUpdate>, updates: Vec<StoreUpdate<Vec<u8>>>,
}, },
/// Deletes all keys above a threshold. /// Deletes all keys above a threshold.
@@ -89,7 +92,7 @@ impl StoreModel {
} }
/// Applies a transaction. /// Applies a transaction.
fn transaction(&mut self, updates: Vec<StoreUpdate>) -> StoreResult<()> { fn transaction(&mut self, updates: Vec<StoreUpdate<Vec<u8>>>) -> StoreResult<()> {
// Fail if the transaction is invalid. // Fail if the transaction is invalid.
if self.format.transaction_valid(&updates).is_none() { if self.format.transaction_valid(&updates).is_none() {
return Err(StoreError::InvalidArgument); return Err(StoreError::InvalidArgument);

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Flash storage abstraction.
/// Represents a byte position in a storage. /// Represents a byte position in a storage.
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct StorageIndex { pub struct StorageIndex {
@@ -65,12 +67,14 @@ pub trait Storage {
/// The following pre-conditions must hold: /// The following pre-conditions must hold:
/// - The `index` must designate `value.len()` bytes in the storage. /// - The `index` must designate `value.len()` bytes in the storage.
/// - Both `index` and `value.len()` must be word-aligned. /// - Both `index` and `value.len()` must be word-aligned.
/// - The written words should not have been written too many times since last page erasure. /// - The written words should not have been written [too many](Self::max_word_writes) times
/// since the last page erasure.
fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()>; fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()>;
/// Erases a page of the storage. /// Erases a page of the storage.
/// ///
/// The `page` must be in the storage. /// The `page` must be in the storage, i.e. less than [`Storage::num_pages`]. And the page
/// should not have been erased [too many](Self::max_page_erases) times.
fn erase_page(&mut self, page: usize) -> StorageResult<()>; fn erase_page(&mut self, page: usize) -> StorageResult<()>;
} }

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Store implementation.
use crate::format::{ use crate::format::{
is_erased, CompactInfo, Format, Header, InitInfo, InternalEntry, Padding, ParsedWord, Position, is_erased, CompactInfo, Format, Header, InitInfo, InternalEntry, Padding, ParsedWord, Position,
Word, WordState, Word, WordState,
@@ -23,8 +25,12 @@ use crate::{usize_to_nat, Nat, Storage, StorageError, StorageIndex};
pub use crate::{ pub use crate::{
BufferStorage, StoreDriver, StoreDriverOff, StoreDriverOn, StoreInterruption, StoreInvariant, BufferStorage, StoreDriver, StoreDriverOff, StoreDriverOn, StoreInterruption, StoreInvariant,
}; };
use alloc::boxed::Box;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::borrow::Borrow;
use core::cmp::{max, min, Ordering}; use core::cmp::{max, min, Ordering};
use core::convert::TryFrom;
use core::option::NoneError;
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::collections::HashSet; use std::collections::HashSet;
@@ -51,17 +57,14 @@ pub enum StoreError {
/// ///
/// The consequences depend on the storage failure. In particular, the operation may or may not /// The consequences depend on the storage failure. In particular, the operation may or may not
/// have succeeded, and the storage may have become invalid. Before doing any other operation, /// have succeeded, and the storage may have become invalid. Before doing any other operation,
/// the store should be [recovered]. The operation may then be retried if idempotent. /// the store should be [recovered](Store::recover). The operation may then be retried if
/// /// idempotent.
/// [recovered]: struct.Store.html#method.recover
StorageError, StorageError,
/// Storage is invalid. /// Storage is invalid.
/// ///
/// The storage should be erased and the store [recovered]. The store would be empty and have /// The storage should be erased and the store [recovered](Store::recover). The store would be
/// lost track of lifetime. /// empty and have lost track of lifetime.
///
/// [recovered]: struct.Store.html#method.recover
InvalidStorage, InvalidStorage,
} }
@@ -75,20 +78,26 @@ impl From<StorageError> for StoreError {
} }
} }
impl From<NoneError> for StoreError {
fn from(error: NoneError) -> StoreError {
match error {
NoneError => StoreError::InvalidStorage,
}
}
}
/// Result of store operations. /// Result of store operations.
pub type StoreResult<T> = Result<T, StoreError>; pub type StoreResult<T> = Result<T, StoreError>;
/// Progression ratio for store metrics. /// Progression ratio for store metrics.
/// ///
/// This is used for the [capacity] and [lifetime] metrics. Those metrics are measured in words. /// This is used for the [`Store::capacity`] and [`Store::lifetime`] metrics. Those metrics are
/// measured in words.
/// ///
/// # Invariant /// # Invariant
/// ///
/// - The used value does not exceed the total: `used <= total`. /// - The used value does not exceed the total: `used` ≤ `total`.
/// #[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// [capacity]: struct.Store.html#method.capacity
/// [lifetime]: struct.Store.html#method.lifetime
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct StoreRatio { pub struct StoreRatio {
/// How much of the metric is used. /// How much of the metric is used.
pub(crate) used: Nat, pub(crate) used: Nat,
@@ -136,11 +145,20 @@ impl StoreHandle {
self.key as usize self.key as usize
} }
/// Returns the value length of the entry.
///
/// # Errors
///
/// Returns [`StoreError::InvalidArgument`] if the entry has been deleted or compacted.
pub fn get_length<S: Storage>(&self, store: &Store<S>) -> StoreResult<usize> {
store.get_length(self)
}
/// Returns the value of the entry. /// Returns the value of the entry.
/// ///
/// # Errors /// # Errors
/// ///
/// Returns `InvalidArgument` if the entry has been deleted or compacted. /// Returns [`StoreError::InvalidArgument`] if the entry has been deleted or compacted.
pub fn get_value<S: Storage>(&self, store: &Store<S>) -> StoreResult<Vec<u8>> { pub fn get_value<S: Storage>(&self, store: &Store<S>) -> StoreResult<Vec<u8>> {
store.get_value(self) store.get_value(self)
} }
@@ -148,15 +166,15 @@ impl StoreHandle {
/// Represents an update to the store as part of a transaction. /// Represents an update to the store as part of a transaction.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum StoreUpdate { pub enum StoreUpdate<ByteSlice: Borrow<[u8]>> {
/// Inserts or replaces an entry in the store. /// Inserts or replaces an entry in the store.
Insert { key: usize, value: Vec<u8> }, Insert { key: usize, value: ByteSlice },
/// Removes an entry from the store. /// Removes an entry from the store.
Remove { key: usize }, Remove { key: usize },
} }
impl StoreUpdate { impl<ByteSlice: Borrow<[u8]>> StoreUpdate<ByteSlice> {
/// Returns the key affected by the update. /// Returns the key affected by the update.
pub fn key(&self) -> usize { pub fn key(&self) -> usize {
match *self { match *self {
@@ -168,12 +186,14 @@ impl StoreUpdate {
/// Returns the value written by the update. /// Returns the value written by the update.
pub fn value(&self) -> Option<&[u8]> { pub fn value(&self) -> Option<&[u8]> {
match self { match self {
StoreUpdate::Insert { value, .. } => Some(value), StoreUpdate::Insert { value, .. } => Some(value.borrow()),
StoreUpdate::Remove { .. } => None, StoreUpdate::Remove { .. } => None,
} }
} }
} }
pub type StoreIter<'a> = Box<dyn Iterator<Item = StoreResult<StoreHandle>> + 'a>;
/// Implements a store with a map interface over a storage. /// Implements a store with a map interface over a storage.
#[derive(Clone)] #[derive(Clone)]
pub struct Store<S: Storage> { pub struct Store<S: Storage> {
@@ -182,6 +202,14 @@ pub struct Store<S: Storage> {
/// The storage configuration. /// The storage configuration.
format: Format, format: Format,
/// The position of the first word in the store.
head: Option<Position>,
/// The list of the position of the user entries.
///
/// The position is encoded as the word offset from the [head](Store::head).
entries: Option<Vec<u16>>,
} }
impl<S: Storage> Store<S> { impl<S: Storage> Store<S> {
@@ -193,13 +221,19 @@ impl<S: Storage> Store<S> {
/// ///
/// # Errors /// # Errors
/// ///
/// Returns `InvalidArgument` if the storage is not supported. /// Returns [`StoreError::InvalidArgument`] if the storage is not
/// [supported](Format::is_storage_supported).
pub fn new(storage: S) -> Result<Store<S>, (StoreError, S)> { pub fn new(storage: S) -> Result<Store<S>, (StoreError, S)> {
let format = match Format::new(&storage) { let format = match Format::new(&storage) {
None => return Err((StoreError::InvalidArgument, storage)), None => return Err((StoreError::InvalidArgument, storage)),
Some(x) => x, Some(x) => x,
}; };
let mut store = Store { storage, format }; let mut store = Store {
storage,
format,
head: None,
entries: None,
};
if let Err(error) = store.recover() { if let Err(error) = store.recover() {
return Err((error, store.storage)); return Err((error, store.storage));
} }
@@ -207,31 +241,35 @@ impl<S: Storage> Store<S> {
} }
/// Iterates over the entries. /// Iterates over the entries.
pub fn iter<'a>(&'a self) -> StoreResult<StoreIter<'a, S>> { pub fn iter<'a>(&'a self) -> StoreResult<StoreIter<'a>> {
StoreIter::new(self) let head = self.head?;
Ok(Box::new(self.entries.as_ref()?.iter().map(
move |&offset| {
let pos = head + offset as Nat;
match self.parse_entry(&mut pos.clone())? {
ParsedEntry::User(Header {
key, length: len, ..
}) => Ok(StoreHandle { key, pos, len }),
_ => Err(StoreError::InvalidStorage),
}
},
)))
} }
/// Returns the current capacity in words. /// Returns the current and total capacity in words.
/// ///
/// The capacity represents the size of what is stored. /// The capacity represents the size of what is stored.
pub fn capacity(&self) -> StoreResult<StoreRatio> { pub fn capacity(&self) -> StoreResult<StoreRatio> {
let total = self.format.total_capacity(); let total = self.format.total_capacity();
let mut used = 0; let mut used = 0;
let mut pos = self.head()?; for handle in self.iter()? {
let end = pos + self.format.virt_size(); let handle = handle?;
while pos < end { used += 1 + self.format.bytes_to_words(handle.len);
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break,
ParsedEntry::Padding => (),
ParsedEntry::User(_) => used += pos - entry_pos,
_ => return Err(StoreError::InvalidStorage),
}
} }
Ok(StoreRatio { used, total }) Ok(StoreRatio { used, total })
} }
/// Returns the current lifetime in words. /// Returns the current and total lifetime in words.
/// ///
/// The lifetime represents the age of the storage. The limit is an over-approximation by at /// The lifetime represents the age of the storage. The limit is an over-approximation by at
/// most the maximum length of a value (the actual limit depends on the length of the prefix of /// most the maximum length of a value (the actual limit depends on the length of the prefix of
@@ -246,18 +284,22 @@ impl<S: Storage> Store<S> {
/// ///
/// # Errors /// # Errors
/// ///
/// Returns `InvalidArgument` in the following circumstances: /// Returns [`StoreError::InvalidArgument`] in the following circumstances:
/// - There are too many updates. /// - There are [too many](Format::max_updates) updates.
/// - The updates overlap, i.e. their keys are not disjoint. /// - The updates overlap, i.e. their keys are not disjoint.
/// - The updates are invalid, e.g. key out of bound or value too long. /// - The updates are invalid, e.g. key [out of bound](Format::max_key) or value [too
pub fn transaction(&mut self, updates: &[StoreUpdate]) -> StoreResult<()> { /// long](Format::max_value_len).
pub fn transaction<ByteSlice: Borrow<[u8]>>(
&mut self,
updates: &[StoreUpdate<ByteSlice>],
) -> StoreResult<()> {
let count = usize_to_nat(updates.len()); let count = usize_to_nat(updates.len());
if count == 0 { if count == 0 {
return Ok(()); return Ok(());
} }
if count == 1 { if count == 1 {
match updates[0] { match updates[0] {
StoreUpdate::Insert { key, ref value } => return self.insert(key, value), StoreUpdate::Insert { key, ref value } => return self.insert(key, value.borrow()),
StoreUpdate::Remove { key } => return self.remove(key), StoreUpdate::Remove { key } => return self.remove(key),
} }
} }
@@ -270,7 +312,9 @@ impl<S: Storage> Store<S> {
self.reserve(self.format.transaction_capacity(updates))?; self.reserve(self.format.transaction_capacity(updates))?;
// Write the marker entry. // Write the marker entry.
let marker = self.tail()?; let marker = self.tail()?;
let entry = self.format.build_internal(InternalEntry::Marker { count }); let entry = self
.format
.build_internal(InternalEntry::Marker { count })?;
self.write_slice(marker, &entry)?; self.write_slice(marker, &entry)?;
self.init_page(marker, marker)?; self.init_page(marker, marker)?;
// Write the updates. // Write the updates.
@@ -278,7 +322,7 @@ impl<S: Storage> Store<S> {
for update in updates { for update in updates {
let length = match *update { let length = match *update {
StoreUpdate::Insert { key, ref value } => { StoreUpdate::Insert { key, ref value } => {
let entry = self.format.build_user(usize_to_nat(key), value); let entry = self.format.build_user(usize_to_nat(key), value.borrow())?;
let word_size = self.format.word_size(); let word_size = self.format.word_size();
let footer = usize_to_nat(entry.len()) / word_size - 1; let footer = usize_to_nat(entry.len()) / word_size - 1;
self.write_slice(tail, &entry[..(footer * word_size) as usize])?; self.write_slice(tail, &entry[..(footer * word_size) as usize])?;
@@ -287,7 +331,7 @@ impl<S: Storage> Store<S> {
} }
StoreUpdate::Remove { key } => { StoreUpdate::Remove { key } => {
let key = usize_to_nat(key); let key = usize_to_nat(key);
let remove = self.format.build_internal(InternalEntry::Remove { key }); let remove = self.format.build_internal(InternalEntry::Remove { key })?;
self.write_slice(tail, &remove)?; self.write_slice(tail, &remove)?;
0 0
} }
@@ -307,7 +351,9 @@ impl<S: Storage> Store<S> {
if min_key > self.format.max_key() { if min_key > self.format.max_key() {
return Err(StoreError::InvalidArgument); return Err(StoreError::InvalidArgument);
} }
let clear = self.format.build_internal(InternalEntry::Clear { min_key }); let clear = self
.format
.build_internal(InternalEntry::Clear { min_key })?;
// We always have one word available. We can't use `reserve` because this is internal // We always have one word available. We can't use `reserve` because this is internal
// capacity, not user capacity. // capacity, not user capacity.
while self.immediate_capacity()? < 1 { while self.immediate_capacity()? < 1 {
@@ -373,7 +419,7 @@ impl<S: Storage> Store<S> {
if key > self.format.max_key() || value_len > self.format.max_value_len() { if key > self.format.max_key() || value_len > self.format.max_value_len() {
return Err(StoreError::InvalidArgument); return Err(StoreError::InvalidArgument);
} }
let entry = self.format.build_user(key, value); let entry = self.format.build_user(key, value)?;
let entry_len = usize_to_nat(entry.len()); let entry_len = usize_to_nat(entry.len());
self.reserve(entry_len / self.format.word_size())?; self.reserve(entry_len / self.format.word_size())?;
let tail = self.tail()?; let tail = self.tail()?;
@@ -381,6 +427,7 @@ impl<S: Storage> Store<S> {
let footer = entry_len / word_size - 1; let footer = entry_len / word_size - 1;
self.write_slice(tail, &entry[..(footer * word_size) as usize])?; self.write_slice(tail, &entry[..(footer * word_size) as usize])?;
self.write_slice(tail + footer, &entry[(footer * word_size) as usize..])?; self.write_slice(tail + footer, &entry[(footer * word_size) as usize..])?;
self.push_entry(tail)?;
self.insert_init(tail, footer, key) self.insert_init(tail, footer, key)
} }
@@ -398,7 +445,8 @@ impl<S: Storage> Store<S> {
/// Removes an entry given a handle. /// Removes an entry given a handle.
pub fn remove_handle(&mut self, handle: &StoreHandle) -> StoreResult<()> { pub fn remove_handle(&mut self, handle: &StoreHandle) -> StoreResult<()> {
self.check_handle(handle)?; self.check_handle(handle)?;
self.delete_pos(handle.pos, self.format.bytes_to_words(handle.len)) self.delete_pos(handle.pos, self.format.bytes_to_words(handle.len))?;
self.remove_entry(handle.pos)
} }
/// Returns the maximum length in bytes of a value. /// Returns the maximum length in bytes of a value.
@@ -406,6 +454,17 @@ impl<S: Storage> Store<S> {
self.format.max_value_len() as usize self.format.max_value_len() as usize
} }
/// Returns the length of the value of an entry given its handle.
fn get_length(&self, handle: &StoreHandle) -> StoreResult<usize> {
self.check_handle(handle)?;
let mut pos = handle.pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::User(header) => Ok(header.length as usize),
ParsedEntry::Padding => Err(StoreError::InvalidArgument),
_ => Err(StoreError::InvalidStorage),
}
}
/// Returns the value of an entry given its handle. /// Returns the value of an entry given its handle.
fn get_value(&self, handle: &StoreHandle) -> StoreResult<Vec<u8>> { fn get_value(&self, handle: &StoreHandle) -> StoreResult<Vec<u8>> {
self.check_handle(handle)?; self.check_handle(handle)?;
@@ -437,7 +496,7 @@ impl<S: Storage> Store<S> {
let init_info = self.format.build_init(InitInfo { let init_info = self.format.build_init(InitInfo {
cycle: 0, cycle: 0,
prefix: 0, prefix: 0,
}); })?;
self.storage_write_slice(index, &init_info) self.storage_write_slice(index, &init_info)
} }
@@ -460,7 +519,9 @@ impl<S: Storage> Store<S> {
/// Recovers a possible compaction interrupted while copying the entries. /// Recovers a possible compaction interrupted while copying the entries.
fn recover_compaction(&mut self) -> StoreResult<()> { fn recover_compaction(&mut self) -> StoreResult<()> {
let head_page = self.head()?.page(&self.format); let head = self.get_extremum_page_head(Ordering::Less)?;
self.head = Some(head);
let head_page = head.page(&self.format);
match self.parse_compact(head_page)? { match self.parse_compact(head_page)? {
WordState::Erased => Ok(()), WordState::Erased => Ok(()),
WordState::Partial => self.compact(), WordState::Partial => self.compact(),
@@ -470,14 +531,15 @@ impl<S: Storage> Store<S> {
/// Recover a possible interrupted operation which is not a compaction. /// Recover a possible interrupted operation which is not a compaction.
fn recover_operation(&mut self) -> StoreResult<()> { fn recover_operation(&mut self) -> StoreResult<()> {
let mut pos = self.head()?; self.entries = Some(Vec::new());
let mut pos = self.head?;
let mut prev_pos = pos; let mut prev_pos = pos;
let end = pos + self.format.virt_size(); let end = pos + self.format.virt_size();
while pos < end { while pos < end {
let entry_pos = pos; let entry_pos = pos;
match self.parse_entry(&mut pos)? { match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break, ParsedEntry::Tail => break,
ParsedEntry::User(_) => (), ParsedEntry::User(_) => self.push_entry(entry_pos)?,
ParsedEntry::Padding => { ParsedEntry::Padding => {
self.wipe_span(entry_pos + 1, pos - entry_pos - 1)?; self.wipe_span(entry_pos + 1, pos - entry_pos - 1)?;
} }
@@ -610,7 +672,7 @@ impl<S: Storage> Store<S> {
/// ///
/// In particular, the handle has not been compacted. /// In particular, the handle has not been compacted.
fn check_handle(&self, handle: &StoreHandle) -> StoreResult<()> { fn check_handle(&self, handle: &StoreHandle) -> StoreResult<()> {
if handle.pos < self.head()? { if handle.pos < self.head? {
Err(StoreError::InvalidArgument) Err(StoreError::InvalidArgument)
} else { } else {
Ok(()) Ok(())
@@ -640,20 +702,22 @@ impl<S: Storage> Store<S> {
/// Compacts one page. /// Compacts one page.
fn compact(&mut self) -> StoreResult<()> { fn compact(&mut self) -> StoreResult<()> {
let head = self.head()?; let head = self.head?;
if head.cycle(&self.format) >= self.format.max_page_erases() { if head.cycle(&self.format) >= self.format.max_page_erases() {
return Err(StoreError::NoLifetime); return Err(StoreError::NoLifetime);
} }
let tail = max(self.tail()?, head.next_page(&self.format)); let tail = max(self.tail()?, head.next_page(&self.format));
let index = self.format.index_compact(head.page(&self.format)); let index = self.format.index_compact(head.page(&self.format));
let compact_info = self.format.build_compact(CompactInfo { tail: tail - head }); let compact_info = self
.format
.build_compact(CompactInfo { tail: tail - head })?;
self.storage_write_slice(index, &compact_info)?; self.storage_write_slice(index, &compact_info)?;
self.compact_copy() self.compact_copy()
} }
/// Continues a compaction after its compact page info has been written. /// Continues a compaction after its compact page info has been written.
fn compact_copy(&mut self) -> StoreResult<()> { fn compact_copy(&mut self) -> StoreResult<()> {
let mut head = self.head()?; let mut head = self.head?;
let page = head.page(&self.format); let page = head.page(&self.format);
let end = head.next_page(&self.format); let end = head.next_page(&self.format);
let mut tail = match self.parse_compact(page)? { let mut tail = match self.parse_compact(page)? {
@@ -667,8 +731,12 @@ impl<S: Storage> Store<S> {
let pos = head; let pos = head;
match self.parse_entry(&mut head)? { match self.parse_entry(&mut head)? {
ParsedEntry::Tail => break, ParsedEntry::Tail => break,
// This can happen if we copy to the next page. We actually reached the tail but we
// read what we just copied.
ParsedEntry::Partial if head > end => break,
ParsedEntry::User(_) => (), ParsedEntry::User(_) => (),
_ => continue, ParsedEntry::Padding => continue,
_ => return Err(StoreError::InvalidStorage),
}; };
let length = head - pos; let length = head - pos;
// We have to copy the slice for 2 reasons: // We have to copy the slice for 2 reasons:
@@ -676,11 +744,13 @@ impl<S: Storage> Store<S> {
// 2. We can't pass a flash slice to the kernel. This should get fixed with // 2. We can't pass a flash slice to the kernel. This should get fixed with
// https://github.com/tock/tock/issues/1274. // https://github.com/tock/tock/issues/1274.
let entry = self.read_slice(pos, length * self.format.word_size()); let entry = self.read_slice(pos, length * self.format.word_size());
self.remove_entry(pos)?;
self.write_slice(tail, &entry)?; self.write_slice(tail, &entry)?;
self.push_entry(tail)?;
self.init_page(tail, tail + (length - 1))?; self.init_page(tail, tail + (length - 1))?;
tail += length; tail += length;
} }
let erase = self.format.build_internal(InternalEntry::Erase { page }); let erase = self.format.build_internal(InternalEntry::Erase { page })?;
self.write_slice(tail, &erase)?; self.write_slice(tail, &erase)?;
self.init_page(tail, tail)?; self.init_page(tail, tail)?;
self.compact_erase(tail) self.compact_erase(tail)
@@ -688,14 +758,31 @@ impl<S: Storage> Store<S> {
/// Continues a compaction after its erase entry has been written. /// Continues a compaction after its erase entry has been written.
fn compact_erase(&mut self, erase: Position) -> StoreResult<()> { fn compact_erase(&mut self, erase: Position) -> StoreResult<()> {
let page = match self.parse_entry(&mut erase.clone())? { // Read the page to erase from the erase entry.
let mut page = match self.parse_entry(&mut erase.clone())? {
ParsedEntry::Internal(InternalEntry::Erase { page }) => page, ParsedEntry::Internal(InternalEntry::Erase { page }) => page,
_ => return Err(StoreError::InvalidStorage), _ => return Err(StoreError::InvalidStorage),
}; };
// Erase the page.
self.storage_erase_page(page)?; self.storage_erase_page(page)?;
let head = self.head()?; // Update the head.
page = (page + 1) % self.format.num_pages();
let init = match self.parse_init(page)? {
WordState::Valid(x) => x,
_ => return Err(StoreError::InvalidStorage),
};
let head = self.format.page_head(init, page);
if let Some(entries) = &mut self.entries {
let head_offset = u16::try_from(head - self.head?).ok()?;
for entry in entries {
*entry = entry.checked_sub(head_offset)?;
}
}
self.head = Some(head);
// Wipe the overlapping entry from the erased page.
let pos = head.page_begin(&self.format); let pos = head.page_begin(&self.format);
self.wipe_span(pos, head - pos)?; self.wipe_span(pos, head - pos)?;
// Mark the erase entry as done.
self.set_padding(erase)?; self.set_padding(erase)?;
Ok(()) Ok(())
} }
@@ -704,13 +791,13 @@ impl<S: Storage> Store<S> {
fn transaction_apply(&mut self, sorted_keys: &[Nat], marker: Position) -> StoreResult<()> { fn transaction_apply(&mut self, sorted_keys: &[Nat], marker: Position) -> StoreResult<()> {
self.delete_keys(&sorted_keys, marker)?; self.delete_keys(&sorted_keys, marker)?;
self.set_padding(marker)?; self.set_padding(marker)?;
let end = self.head()? + self.format.virt_size(); let end = self.head? + self.format.virt_size();
let mut pos = marker + 1; let mut pos = marker + 1;
while pos < end { while pos < end {
let entry_pos = pos; let entry_pos = pos;
match self.parse_entry(&mut pos)? { match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break, ParsedEntry::Tail => break,
ParsedEntry::User(_) => (), ParsedEntry::User(_) => self.push_entry(entry_pos)?,
ParsedEntry::Internal(InternalEntry::Remove { .. }) => { ParsedEntry::Internal(InternalEntry::Remove { .. }) => {
self.set_padding(entry_pos)? self.set_padding(entry_pos)?
} }
@@ -727,37 +814,38 @@ impl<S: Storage> Store<S> {
ParsedEntry::Internal(InternalEntry::Clear { min_key }) => min_key, ParsedEntry::Internal(InternalEntry::Clear { min_key }) => min_key,
_ => return Err(StoreError::InvalidStorage), _ => return Err(StoreError::InvalidStorage),
}; };
let mut pos = self.head()?; self.delete_if(clear, |key| key >= min_key)?;
let end = pos + self.format.virt_size();
while pos < end {
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Internal(InternalEntry::Clear { .. }) if entry_pos == clear => break,
ParsedEntry::User(header) if header.key >= min_key => {
self.delete_pos(entry_pos, pos - entry_pos - 1)?;
}
ParsedEntry::Padding | ParsedEntry::User(_) => (),
_ => return Err(StoreError::InvalidStorage),
}
}
self.set_padding(clear)?; self.set_padding(clear)?;
Ok(()) Ok(())
} }
/// Deletes a set of entries up to a certain position. /// Deletes a set of entries up to a certain position.
fn delete_keys(&mut self, sorted_keys: &[Nat], end: Position) -> StoreResult<()> { fn delete_keys(&mut self, sorted_keys: &[Nat], end: Position) -> StoreResult<()> {
let mut pos = self.head()?; self.delete_if(end, |key| sorted_keys.binary_search(&key).is_ok())
while pos < end {
let entry_pos = pos;
match self.parse_entry(&mut pos)? {
ParsedEntry::Tail => break,
ParsedEntry::User(header) if sorted_keys.binary_search(&header.key).is_ok() => {
self.delete_pos(entry_pos, pos - entry_pos - 1)?;
} }
ParsedEntry::Padding | ParsedEntry::User(_) => (),
/// Deletes entries matching a predicate up to a certain position.
fn delete_if(&mut self, end: Position, delete: impl Fn(Nat) -> bool) -> StoreResult<()> {
let head = self.head?;
let mut entries = self.entries.take()?;
let mut i = 0;
while i < entries.len() {
let pos = head + entries[i] as Nat;
if pos >= end {
break;
}
let header = match self.parse_entry(&mut pos.clone())? {
ParsedEntry::User(x) => x,
_ => return Err(StoreError::InvalidStorage), _ => return Err(StoreError::InvalidStorage),
};
if delete(header.key) {
self.delete_pos(pos, self.format.bytes_to_words(header.length))?;
entries.swap_remove(i);
} else {
i += 1;
} }
} }
self.entries = Some(entries);
Ok(()) Ok(())
} }
@@ -792,7 +880,7 @@ impl<S: Storage> Store<S> {
let init_info = self.format.build_init(InitInfo { let init_info = self.format.build_init(InitInfo {
cycle: new_first.cycle(&self.format), cycle: new_first.cycle(&self.format),
prefix: new_first.word(&self.format), prefix: new_first.word(&self.format),
}); })?;
self.storage_write_slice(index, &init_info)?; self.storage_write_slice(index, &init_info)?;
Ok(()) Ok(())
} }
@@ -800,7 +888,7 @@ impl<S: Storage> Store<S> {
/// Sets the padding bit of a user header. /// Sets the padding bit of a user header.
fn set_padding(&mut self, pos: Position) -> StoreResult<()> { fn set_padding(&mut self, pos: Position) -> StoreResult<()> {
let mut word = Word::from_slice(self.read_word(pos)); let mut word = Word::from_slice(self.read_word(pos));
self.format.set_padding(&mut word); self.format.set_padding(&mut word)?;
self.write_slice(pos, &word.as_slice())?; self.write_slice(pos, &word.as_slice())?;
Ok(()) Ok(())
} }
@@ -836,19 +924,20 @@ impl<S: Storage> Store<S> {
} }
} }
// There is always at least one initialized page. // There is always at least one initialized page.
best.ok_or(StoreError::InvalidStorage) Ok(best?)
} }
/// Returns the number of words that can be written without compaction. /// Returns the number of words that can be written without compaction.
fn immediate_capacity(&self) -> StoreResult<Nat> { fn immediate_capacity(&self) -> StoreResult<Nat> {
let tail = self.tail()?; let tail = self.tail()?;
let end = self.head()? + self.format.virt_size(); let end = self.head? + self.format.virt_size();
Ok(end.get().saturating_sub(tail.get())) Ok(end.get().saturating_sub(tail.get()))
} }
/// Returns the position of the first word in the store. /// Returns the position of the first word in the store.
#[cfg(feature = "std")]
pub(crate) fn head(&self) -> StoreResult<Position> { pub(crate) fn head(&self) -> StoreResult<Position> {
self.get_extremum_page_head(Ordering::Less) Ok(self.head?)
} }
/// Returns one past the position of the last word in the store. /// Returns one past the position of the last word in the store.
@@ -863,6 +952,30 @@ impl<S: Storage> Store<S> {
Ok(pos) Ok(pos)
} }
fn push_entry(&mut self, pos: Position) -> StoreResult<()> {
let entries = match &mut self.entries {
None => return Ok(()),
Some(x) => x,
};
let head = self.head?;
let offset = u16::try_from(pos - head).ok()?;
debug_assert!(!entries.contains(&offset));
entries.push(offset);
Ok(())
}
fn remove_entry(&mut self, pos: Position) -> StoreResult<()> {
let entries = match &mut self.entries {
None => return Ok(()),
Some(x) => x,
};
let head = self.head?;
let offset = u16::try_from(pos - head).ok()?;
let i = entries.iter().position(|x| *x == offset)?;
entries.swap_remove(i);
Ok(())
}
/// Parses the entry at a given position. /// Parses the entry at a given position.
/// ///
/// The position is updated to point to the next entry. /// The position is updated to point to the next entry.
@@ -1061,7 +1174,7 @@ impl Store<BufferStorage> {
/// If the value has been partially compacted, only return the non-compacted part. Returns an /// If the value has been partially compacted, only return the non-compacted part. Returns an
/// empty value if it has been fully compacted. /// empty value if it has been fully compacted.
pub fn inspect_value(&self, handle: &StoreHandle) -> Vec<u8> { pub fn inspect_value(&self, handle: &StoreHandle) -> Vec<u8> {
let head = self.head().unwrap(); let head = self.head.unwrap();
let length = self.format.bytes_to_words(handle.len); let length = self.format.bytes_to_words(handle.len);
if head <= handle.pos { if head <= handle.pos {
// The value has not been compacted. // The value has not been compacted.
@@ -1087,20 +1200,21 @@ impl Store<BufferStorage> {
store store
.iter() .iter()
.unwrap() .unwrap()
.map(|x| x.unwrap()) .filter(|x| x.is_err() || delete_key(x.as_ref().unwrap().key as usize))
.filter(|x| delete_key(x.key as usize)) .collect::<Result<Vec<_>, _>>()
.collect::<Vec<_>>()
}; };
match *operation { match *operation {
StoreOperation::Transaction { ref updates } => { StoreOperation::Transaction { ref updates } => {
let keys: HashSet<usize> = updates.iter().map(|x| x.key()).collect(); let keys: HashSet<usize> = updates.iter().map(|x| x.key()).collect();
let deleted = deleted(self, &|key| keys.contains(&key)); match deleted(self, &|key| keys.contains(&key)) {
(deleted, self.transaction(updates)) Ok(deleted) => (deleted, self.transaction(updates)),
Err(error) => (Vec::new(), Err(error)),
} }
StoreOperation::Clear { min_key } => {
let deleted = deleted(self, &|key| key >= min_key);
(deleted, self.clear(min_key))
} }
StoreOperation::Clear { min_key } => match deleted(self, &|key| key >= min_key) {
Ok(deleted) => (deleted, self.clear(min_key)),
Err(error) => (Vec::new(), Err(error)),
},
StoreOperation::Prepare { length } => (Vec::new(), self.prepare(length)), StoreOperation::Prepare { length } => (Vec::new(), self.prepare(length)),
} }
} }
@@ -1110,10 +1224,12 @@ impl Store<BufferStorage> {
let format = Format::new(storage).unwrap(); let format = Format::new(storage).unwrap();
// Write the init info of the first page. // Write the init info of the first page.
let mut index = format.index_init(0); let mut index = format.index_init(0);
let init_info = format.build_init(InitInfo { let init_info = format
.build_init(InitInfo {
cycle: usize_to_nat(cycle), cycle: usize_to_nat(cycle),
prefix: 0, prefix: 0,
}); })
.unwrap();
storage.write_slice(index, &init_info).unwrap(); storage.write_slice(index, &init_info).unwrap();
// Pad the first word of the page. This makes the store looks used, otherwise we may confuse // Pad the first word of the page. This makes the store looks used, otherwise we may confuse
// it with a partially initialized store. // it with a partially initialized store.
@@ -1165,61 +1281,6 @@ enum ParsedEntry {
Tail, Tail,
} }
/// Iterates over the entries of a store.
pub struct StoreIter<'a, S: Storage> {
/// The store being iterated.
store: &'a Store<S>,
/// The position of the next entry.
pos: Position,
/// Iteration stops when reaching this position.
end: Position,
}
impl<'a, S: Storage> StoreIter<'a, S> {
/// Creates an iterator over the entries of a store.
fn new(store: &'a Store<S>) -> StoreResult<StoreIter<'a, S>> {
let pos = store.head()?;
let end = pos + store.format.virt_size();
Ok(StoreIter { store, pos, end })
}
}
impl<'a, S: Storage> StoreIter<'a, S> {
/// Returns the next entry and advances the iterator.
fn transposed_next(&mut self) -> StoreResult<Option<StoreHandle>> {
if self.pos >= self.end {
return Ok(None);
}
while self.pos < self.end {
let entry_pos = self.pos;
match self.store.parse_entry(&mut self.pos)? {
ParsedEntry::Tail => break,
ParsedEntry::Padding => (),
ParsedEntry::User(header) => {
return Ok(Some(StoreHandle {
key: header.key,
pos: entry_pos,
len: header.length,
}))
}
_ => return Err(StoreError::InvalidStorage),
}
}
self.pos = self.end;
Ok(None)
}
}
impl<'a, S: Storage> Iterator for StoreIter<'a, S> {
type Item = StoreResult<StoreHandle>;
fn next(&mut self) -> Option<StoreResult<StoreHandle>> {
self.transposed_next().transpose()
}
}
/// Returns whether 2 slices are different. /// Returns whether 2 slices are different.
/// ///
/// Returns an error if `target` has a bit set to one for which `source` is set to zero. /// Returns an error if `target` has a bit set to one for which `source` is set to zero.
@@ -1239,71 +1300,15 @@ fn is_write_needed(source: &[u8], target: &[u8]) -> StoreResult<bool> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::BufferOptions; use crate::test::MINIMAL;
#[derive(Clone)]
struct Config {
word_size: usize,
page_size: usize,
num_pages: usize,
max_word_writes: usize,
max_page_erases: usize,
}
impl Config {
fn new_driver(&self) -> StoreDriverOff {
let options = BufferOptions {
word_size: self.word_size,
page_size: self.page_size,
max_word_writes: self.max_word_writes,
max_page_erases: self.max_page_erases,
strict_mode: true,
};
StoreDriverOff::new(options, self.num_pages)
}
}
const MINIMAL: Config = Config {
word_size: 4,
page_size: 64,
num_pages: 5,
max_word_writes: 2,
max_page_erases: 9,
};
const NORDIC: Config = Config {
word_size: 4,
page_size: 0x1000,
num_pages: 20,
max_word_writes: 2,
max_page_erases: 10000,
};
const TITAN: Config = Config {
word_size: 4,
page_size: 0x800,
num_pages: 10,
max_word_writes: 2,
max_page_erases: 10000,
};
#[test] #[test]
fn nordic_capacity() { fn is_write_needed_ok() {
let driver = NORDIC.new_driver().power_on().unwrap(); assert_eq!(is_write_needed(&[], &[]), Ok(false));
assert_eq!(driver.model().capacity().total, 19123); assert_eq!(is_write_needed(&[0], &[0]), Ok(false));
} assert_eq!(is_write_needed(&[0], &[1]), Err(StoreError::InvalidStorage));
assert_eq!(is_write_needed(&[1], &[0]), Ok(true));
#[test] assert_eq!(is_write_needed(&[1], &[1]), Ok(false));
fn titan_capacity() {
let driver = TITAN.new_driver().power_on().unwrap();
assert_eq!(driver.model().capacity().total, 4315);
}
#[test]
fn minimal_virt_page_size() {
// Make sure a virtual page has 14 words. We use this property in the other tests below to
// know whether entries are spanning, starting, and ending pages.
assert_eq!(MINIMAL.new_driver().model().format().virt_page_size(), 14);
} }
#[test] #[test]
@@ -1438,4 +1443,22 @@ mod tests {
driver = driver.power_off().power_on().unwrap(); driver = driver.power_off().power_on().unwrap();
driver.check().unwrap(); driver.check().unwrap();
} }
#[test]
fn entries_ok() {
let mut driver = MINIMAL.new_driver().power_on().unwrap();
// The store is initially empty.
assert!(driver.store().entries.as_ref().unwrap().is_empty());
// Inserted elements are added.
const LEN: usize = 6;
driver.insert(0, &[0x38; (LEN - 1) * 4]).unwrap();
driver.insert(1, &[0x5c; 4]).unwrap();
assert_eq!(driver.store().entries, Some(vec![0, LEN as u16]));
// Deleted elements are removed.
driver.remove(0).unwrap();
assert_eq!(driver.store().entries, Some(vec![LEN as u16]));
}
} }

View File

@@ -0,0 +1,84 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{BufferOptions, BufferStorage, Store, StoreDriverOff};
#[derive(Clone)]
pub struct Config {
word_size: usize,
page_size: usize,
num_pages: usize,
max_word_writes: usize,
max_page_erases: usize,
}
impl Config {
pub fn new_driver(&self) -> StoreDriverOff {
let options = BufferOptions {
word_size: self.word_size,
page_size: self.page_size,
max_word_writes: self.max_word_writes,
max_page_erases: self.max_page_erases,
strict_mode: true,
};
StoreDriverOff::new(options, self.num_pages)
}
pub fn new_store(&self) -> Store<BufferStorage> {
self.new_driver().power_on().unwrap().extract_store()
}
}
pub const MINIMAL: Config = Config {
word_size: 4,
page_size: 64,
num_pages: 5,
max_word_writes: 2,
max_page_erases: 9,
};
const NORDIC: Config = Config {
word_size: 4,
page_size: 0x1000,
num_pages: 20,
max_word_writes: 2,
max_page_erases: 10000,
};
const TITAN: Config = Config {
word_size: 4,
page_size: 0x800,
num_pages: 10,
max_word_writes: 2,
max_page_erases: 10000,
};
#[test]
fn nordic_capacity() {
let driver = NORDIC.new_driver().power_on().unwrap();
assert_eq!(driver.model().capacity().total, 19123);
}
#[test]
fn titan_capacity() {
let driver = TITAN.new_driver().power_on().unwrap();
assert_eq!(driver.model().capacity().total, 4315);
}
#[test]
fn minimal_virt_page_size() {
// Make sure a virtual page has 14 words. We use this property in the other tests below to
// know whether entries are spanning, starting, and ending pages.
assert_eq!(MINIMAL.new_driver().model().format().virt_page_size(), 14);
}

View File

@@ -44,7 +44,6 @@ cargo test --manifest-path tools/heapviz/Cargo.toml
echo "Checking that CTAP2 builds properly..." echo "Checking that CTAP2 builds properly..."
cargo check --release --target=thumbv7em-none-eabi cargo check --release --target=thumbv7em-none-eabi
cargo check --release --target=thumbv7em-none-eabi --features with_ctap1 cargo check --release --target=thumbv7em-none-eabi --features with_ctap1
cargo check --release --target=thumbv7em-none-eabi --features with_ctap2_1
cargo check --release --target=thumbv7em-none-eabi --features debug_ctap cargo check --release --target=thumbv7em-none-eabi --features debug_ctap
cargo check --release --target=thumbv7em-none-eabi --features panic_console cargo check --release --target=thumbv7em-none-eabi --features panic_console
cargo check --release --target=thumbv7em-none-eabi --features debug_allocations cargo check --release --target=thumbv7em-none-eabi --features debug_allocations
@@ -92,7 +91,7 @@ then
cargo test --release --features std cargo test --release --features std
cd ../.. cd ../..
cd libraries/crypto cd libraries/crypto
RUSTFLAGS='-C target-feature=+aes' cargo test --release --features std,derive_debug RUSTFLAGS='-C target-feature=+aes' cargo test --release --features std
cd ../.. cd ../..
cd libraries/persistent_store cd libraries/persistent_store
cargo test --release --features std cargo test --release --features std
@@ -104,7 +103,7 @@ then
cargo test --features std cargo test --features std
cd ../.. cd ../..
cd libraries/crypto cd libraries/crypto
RUSTFLAGS='-C target-feature=+aes' cargo test --features std,derive_debug RUSTFLAGS='-C target-feature=+aes' cargo test --features std
cd ../.. cd ../..
cd libraries/persistent_store cd libraries/persistent_store
cargo test --features std cargo test --features std
@@ -116,16 +115,4 @@ then
echo "Running unit tests on the desktop (debug mode + CTAP1)..." echo "Running unit tests on the desktop (debug mode + CTAP1)..."
cargo test --features std,with_ctap1 cargo test --features std,with_ctap1
echo "Running unit tests on the desktop (release mode + CTAP2.1)..."
cargo test --release --features std,with_ctap2_1
echo "Running unit tests on the desktop (debug mode + CTAP2.1)..."
cargo test --features std,with_ctap2_1
echo "Running unit tests on the desktop (release mode + CTAP1 + CTAP2.1)..."
cargo test --release --features std,with_ctap1,with_ctap2_1
echo "Running unit tests on the desktop (debug mode + CTAP1 + CTAP2.1)..."
cargo test --features std,with_ctap1,with_ctap2_1
fi fi

View File

@@ -1,4 +1,4 @@
// Copyright 2020 Google LLC // Copyright 2020-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -18,9 +18,8 @@ use core::convert::TryFrom;
const APDU_HEADER_LEN: usize = 4; const APDU_HEADER_LEN: usize = 4;
#[cfg_attr(test, derive(Clone, Debug))] #[derive(Clone, Debug, PartialEq)]
#[allow(non_camel_case_types, dead_code)] #[allow(non_camel_case_types, dead_code)]
#[derive(PartialEq)]
pub enum ApduStatusCode { pub enum ApduStatusCode {
SW_SUCCESS = 0x90_00, SW_SUCCESS = 0x90_00,
/// Command successfully executed; 'XX' bytes of data are /// Command successfully executed; 'XX' bytes of data are
@@ -30,6 +29,7 @@ pub enum ApduStatusCode {
SW_WRONG_DATA = 0x6a_80, SW_WRONG_DATA = 0x6a_80,
SW_WRONG_LENGTH = 0x67_00, SW_WRONG_LENGTH = 0x67_00,
SW_COND_USE_NOT_SATISFIED = 0x69_85, SW_COND_USE_NOT_SATISFIED = 0x69_85,
SW_COMMAND_NOT_ALLOWED = 0x69_86,
SW_FILE_NOT_FOUND = 0x6a_82, SW_FILE_NOT_FOUND = 0x6a_82,
SW_INCORRECT_P1P2 = 0x6a_86, SW_INCORRECT_P1P2 = 0x6a_86,
/// Instruction code not supported or invalid /// Instruction code not supported or invalid
@@ -51,9 +51,8 @@ pub enum ApduInstructions {
GetResponse = 0xC0, GetResponse = 0xC0,
} }
#[cfg_attr(test, derive(Clone, Debug))] #[derive(Clone, Debug, Default, PartialEq)]
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Default, PartialEq)]
pub struct ApduHeader { pub struct ApduHeader {
pub cla: u8, pub cla: u8,
pub ins: u8, pub ins: u8,
@@ -72,8 +71,7 @@ impl From<&[u8; APDU_HEADER_LEN]> for ApduHeader {
} }
} }
#[cfg_attr(test, derive(Clone, Debug))] #[derive(Clone, Debug, PartialEq)]
#[derive(PartialEq)]
/// The APDU cases /// The APDU cases
pub enum Case { pub enum Case {
Le1, Le1,
@@ -85,18 +83,16 @@ pub enum Case {
Le3, Le3,
} }
#[cfg_attr(test, derive(Clone, Debug))] #[derive(Clone, Debug, PartialEq)]
#[allow(dead_code)] #[allow(dead_code)]
#[derive(PartialEq)]
pub enum ApduType { pub enum ApduType {
Instruction, Instruction,
Short(Case), Short(Case),
Extended(Case), Extended(Case),
} }
#[cfg_attr(test, derive(Clone, Debug))] #[derive(Clone, Debug, PartialEq)]
#[allow(dead_code)] #[allow(dead_code)]
#[derive(PartialEq)]
pub struct APDU { pub struct APDU {
pub header: ApduHeader, pub header: ApduHeader,
pub lc: u16, pub lc: u16,

1760
src/ctap/client_pin.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,12 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use super::customization::{MAX_CREDENTIAL_COUNT_IN_LIST, MAX_LARGE_BLOB_ARRAY_SIZE};
use super::data_formats::{ use super::data_formats::{
extract_array, extract_bool, extract_byte_string, extract_map, extract_text_string, extract_array, extract_bool, extract_byte_string, extract_map, extract_text_string,
extract_unsigned, ok_or_missing, ClientPinSubCommand, CoseKey, GetAssertionExtensions, extract_unsigned, ok_or_missing, ClientPinSubCommand, ConfigSubCommand, ConfigSubCommandParams,
GetAssertionOptions, MakeCredentialExtensions, MakeCredentialOptions, CoseKey, CredentialManagementSubCommand, CredentialManagementSubCommandParameters,
PublicKeyCredentialDescriptor, PublicKeyCredentialParameter, PublicKeyCredentialRpEntity, GetAssertionExtensions, GetAssertionOptions, MakeCredentialExtensions, MakeCredentialOptions,
PublicKeyCredentialUserEntity, PinUvAuthProtocol, PublicKeyCredentialDescriptor, PublicKeyCredentialParameter,
PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity, SetMinPinLengthParams,
}; };
use super::key_material; use super::key_material;
use super::status_code::Ctap2StatusCode; use super::status_code::Ctap2StatusCode;
@@ -27,13 +29,11 @@ use arrayref::array_ref;
use cbor::destructure_cbor_map; use cbor::destructure_cbor_map;
use core::convert::TryFrom; use core::convert::TryFrom;
// Depending on your memory, you can use Some(n) to limit request sizes in // This constant is a consequence of the structure of messages.
// MakeCredential and GetAssertion. This affects allowList and excludeList. const MIN_LARGE_BLOB_LEN: usize = 17;
// You might also want to set the max credential size in process_get_info then.
pub const MAX_CREDENTIAL_COUNT_IN_LIST: Option<usize> = None;
// CTAP specification (version 20190130) section 6.1 // CTAP specification (version 20190130) section 6.1
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] #[derive(Debug, PartialEq)]
pub enum Command { pub enum Command {
AuthenticatorMakeCredential(AuthenticatorMakeCredentialParameters), AuthenticatorMakeCredential(AuthenticatorMakeCredentialParameters),
AuthenticatorGetAssertion(AuthenticatorGetAssertionParameters), AuthenticatorGetAssertion(AuthenticatorGetAssertionParameters),
@@ -41,9 +41,10 @@ pub enum Command {
AuthenticatorClientPin(AuthenticatorClientPinParameters), AuthenticatorClientPin(AuthenticatorClientPinParameters),
AuthenticatorReset, AuthenticatorReset,
AuthenticatorGetNextAssertion, AuthenticatorGetNextAssertion,
#[cfg(feature = "with_ctap2_1")] AuthenticatorCredentialManagement(AuthenticatorCredentialManagementParameters),
AuthenticatorSelection, AuthenticatorSelection,
// TODO(kaczmarczyck) implement FIDO 2.1 commands (see below consts) AuthenticatorLargeBlobs(AuthenticatorLargeBlobsParameters),
AuthenticatorConfig(AuthenticatorConfigParameters),
// Vendor specific commands // Vendor specific commands
AuthenticatorVendorConfigure(AuthenticatorVendorConfigureParameters), AuthenticatorVendorConfigure(AuthenticatorVendorConfigureParameters),
} }
@@ -54,8 +55,6 @@ impl From<cbor::reader::DecoderError> for Ctap2StatusCode {
} }
} }
// TODO: Remove this `allow(dead_code)` once the constants are used.
#[allow(dead_code)]
impl Command { impl Command {
const AUTHENTICATOR_MAKE_CREDENTIAL: u8 = 0x01; const AUTHENTICATOR_MAKE_CREDENTIAL: u8 = 0x01;
const AUTHENTICATOR_GET_ASSERTION: u8 = 0x02; const AUTHENTICATOR_GET_ASSERTION: u8 = 0x02;
@@ -63,8 +62,8 @@ impl Command {
const AUTHENTICATOR_CLIENT_PIN: u8 = 0x06; const AUTHENTICATOR_CLIENT_PIN: u8 = 0x06;
const AUTHENTICATOR_RESET: u8 = 0x07; const AUTHENTICATOR_RESET: u8 = 0x07;
const AUTHENTICATOR_GET_NEXT_ASSERTION: u8 = 0x08; const AUTHENTICATOR_GET_NEXT_ASSERTION: u8 = 0x08;
// TODO(kaczmarczyck) use or remove those constants // Implement Bio Enrollment when your hardware supports biometrics.
const AUTHENTICATOR_BIO_ENROLLMENT: u8 = 0x09; const _AUTHENTICATOR_BIO_ENROLLMENT: u8 = 0x09;
const AUTHENTICATOR_CREDENTIAL_MANAGEMENT: u8 = 0x0A; const AUTHENTICATOR_CREDENTIAL_MANAGEMENT: u8 = 0x0A;
const AUTHENTICATOR_SELECTION: u8 = 0x0B; const AUTHENTICATOR_SELECTION: u8 = 0x0B;
const AUTHENTICATOR_LARGE_BLOBS: u8 = 0x0C; const AUTHENTICATOR_LARGE_BLOBS: u8 = 0x0C;
@@ -111,11 +110,28 @@ impl Command {
// Parameters are ignored. // Parameters are ignored.
Ok(Command::AuthenticatorGetNextAssertion) Ok(Command::AuthenticatorGetNextAssertion)
} }
#[cfg(feature = "with_ctap2_1")] Command::AUTHENTICATOR_CREDENTIAL_MANAGEMENT => {
let decoded_cbor = cbor::read(&bytes[1..])?;
Ok(Command::AuthenticatorCredentialManagement(
AuthenticatorCredentialManagementParameters::try_from(decoded_cbor)?,
))
}
Command::AUTHENTICATOR_SELECTION => { Command::AUTHENTICATOR_SELECTION => {
// Parameters are ignored. // Parameters are ignored.
Ok(Command::AuthenticatorSelection) Ok(Command::AuthenticatorSelection)
} }
Command::AUTHENTICATOR_LARGE_BLOBS => {
let decoded_cbor = cbor::read(&bytes[1..])?;
Ok(Command::AuthenticatorLargeBlobs(
AuthenticatorLargeBlobsParameters::try_from(decoded_cbor)?,
))
}
Command::AUTHENTICATOR_CONFIG => {
let decoded_cbor = cbor::read(&bytes[1..])?;
Ok(Command::AuthenticatorConfig(
AuthenticatorConfigParameters::try_from(decoded_cbor)?,
))
}
Command::AUTHENTICATOR_VENDOR_CONFIGURE => { Command::AUTHENTICATOR_VENDOR_CONFIGURE => {
let decoded_cbor = cbor::read(&bytes[1..])?; let decoded_cbor = cbor::read(&bytes[1..])?;
Ok(Command::AuthenticatorVendorConfigure( Ok(Command::AuthenticatorVendorConfigure(
@@ -127,18 +143,20 @@ impl Command {
} }
} }
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] #[derive(Clone, Debug, PartialEq)]
pub struct AuthenticatorMakeCredentialParameters { pub struct AuthenticatorMakeCredentialParameters {
pub client_data_hash: Vec<u8>, pub client_data_hash: Vec<u8>,
pub rp: PublicKeyCredentialRpEntity, pub rp: PublicKeyCredentialRpEntity,
pub user: PublicKeyCredentialUserEntity, pub user: PublicKeyCredentialUserEntity,
pub pub_key_cred_params: Vec<PublicKeyCredentialParameter>, pub pub_key_cred_params: Vec<PublicKeyCredentialParameter>,
pub exclude_list: Option<Vec<PublicKeyCredentialDescriptor>>, pub exclude_list: Option<Vec<PublicKeyCredentialDescriptor>>,
pub extensions: Option<MakeCredentialExtensions>, // Extensions are optional, but we can use defaults for all missing fields.
// Even though options are optional, we can use the default if not present. pub extensions: MakeCredentialExtensions,
// Same for options, use defaults when not present.
pub options: MakeCredentialOptions, pub options: MakeCredentialOptions,
pub pin_uv_auth_param: Option<Vec<u8>>, pub pin_uv_auth_param: Option<Vec<u8>>,
pub pin_uv_auth_protocol: Option<u64>, pub pin_uv_auth_protocol: Option<PinUvAuthProtocol>,
pub enterprise_attestation: Option<u64>,
} }
impl TryFrom<cbor::Value> for AuthenticatorMakeCredentialParameters { impl TryFrom<cbor::Value> for AuthenticatorMakeCredentialParameters {
@@ -147,15 +165,16 @@ impl TryFrom<cbor::Value> for AuthenticatorMakeCredentialParameters {
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> { fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
destructure_cbor_map! { destructure_cbor_map! {
let { let {
1 => client_data_hash, 0x01 => client_data_hash,
2 => rp, 0x02 => rp,
3 => user, 0x03 => user,
4 => cred_param_vec, 0x04 => cred_param_vec,
5 => exclude_list, 0x05 => exclude_list,
6 => extensions, 0x06 => extensions,
7 => options, 0x07 => options,
8 => pin_uv_auth_param, 0x08 => pin_uv_auth_param,
9 => pin_uv_auth_protocol, 0x09 => pin_uv_auth_protocol,
0x0A => enterprise_attestation,
} = extract_map(cbor_value)?; } = extract_map(cbor_value)?;
} }
@@ -185,18 +204,19 @@ impl TryFrom<cbor::Value> for AuthenticatorMakeCredentialParameters {
let extensions = extensions let extensions = extensions
.map(MakeCredentialExtensions::try_from) .map(MakeCredentialExtensions::try_from)
.transpose()?; .transpose()?
.unwrap_or_default();
let options = match options { let options = options
Some(entry) => MakeCredentialOptions::try_from(entry)?, .map(MakeCredentialOptions::try_from)
None => MakeCredentialOptions { .transpose()?
rk: false, .unwrap_or_default();
uv: false,
},
};
let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?; let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?;
let pin_uv_auth_protocol = pin_uv_auth_protocol.map(extract_unsigned).transpose()?; let pin_uv_auth_protocol = pin_uv_auth_protocol
.map(PinUvAuthProtocol::try_from)
.transpose()?;
let enterprise_attestation = enterprise_attestation.map(extract_unsigned).transpose()?;
Ok(AuthenticatorMakeCredentialParameters { Ok(AuthenticatorMakeCredentialParameters {
client_data_hash, client_data_hash,
@@ -208,20 +228,22 @@ impl TryFrom<cbor::Value> for AuthenticatorMakeCredentialParameters {
options, options,
pin_uv_auth_param, pin_uv_auth_param,
pin_uv_auth_protocol, pin_uv_auth_protocol,
enterprise_attestation,
}) })
} }
} }
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] #[derive(Debug, PartialEq)]
pub struct AuthenticatorGetAssertionParameters { pub struct AuthenticatorGetAssertionParameters {
pub rp_id: String, pub rp_id: String,
pub client_data_hash: Vec<u8>, pub client_data_hash: Vec<u8>,
pub allow_list: Option<Vec<PublicKeyCredentialDescriptor>>, pub allow_list: Option<Vec<PublicKeyCredentialDescriptor>>,
pub extensions: Option<GetAssertionExtensions>, // Extensions are optional, but we can use defaults for all missing fields.
// Even though options are optional, we can use the default if not present. pub extensions: GetAssertionExtensions,
// Same for options, use defaults when not present.
pub options: GetAssertionOptions, pub options: GetAssertionOptions,
pub pin_uv_auth_param: Option<Vec<u8>>, pub pin_uv_auth_param: Option<Vec<u8>>,
pub pin_uv_auth_protocol: Option<u64>, pub pin_uv_auth_protocol: Option<PinUvAuthProtocol>,
} }
impl TryFrom<cbor::Value> for AuthenticatorGetAssertionParameters { impl TryFrom<cbor::Value> for AuthenticatorGetAssertionParameters {
@@ -230,13 +252,13 @@ impl TryFrom<cbor::Value> for AuthenticatorGetAssertionParameters {
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> { fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
destructure_cbor_map! { destructure_cbor_map! {
let { let {
1 => rp_id, 0x01 => rp_id,
2 => client_data_hash, 0x02 => client_data_hash,
3 => allow_list, 0x03 => allow_list,
4 => extensions, 0x04 => extensions,
5 => options, 0x05 => options,
6 => pin_uv_auth_param, 0x06 => pin_uv_auth_param,
7 => pin_uv_auth_protocol, 0x07 => pin_uv_auth_protocol,
} = extract_map(cbor_value)?; } = extract_map(cbor_value)?;
} }
@@ -259,18 +281,18 @@ impl TryFrom<cbor::Value> for AuthenticatorGetAssertionParameters {
let extensions = extensions let extensions = extensions
.map(GetAssertionExtensions::try_from) .map(GetAssertionExtensions::try_from)
.transpose()?; .transpose()?
.unwrap_or_default();
let options = match options { let options = options
Some(entry) => GetAssertionOptions::try_from(entry)?, .map(GetAssertionOptions::try_from)
None => GetAssertionOptions { .transpose()?
up: true, .unwrap_or_default();
uv: false,
},
};
let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?; let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?;
let pin_uv_auth_protocol = pin_uv_auth_protocol.map(extract_unsigned).transpose()?; let pin_uv_auth_protocol = pin_uv_auth_protocol
.map(PinUvAuthProtocol::try_from)
.transpose()?;
Ok(AuthenticatorGetAssertionParameters { Ok(AuthenticatorGetAssertionParameters {
rp_id, rp_id,
@@ -284,21 +306,15 @@ impl TryFrom<cbor::Value> for AuthenticatorGetAssertionParameters {
} }
} }
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] #[derive(Clone, Debug, PartialEq)]
pub struct AuthenticatorClientPinParameters { pub struct AuthenticatorClientPinParameters {
pub pin_protocol: u64, pub pin_uv_auth_protocol: PinUvAuthProtocol,
pub sub_command: ClientPinSubCommand, pub sub_command: ClientPinSubCommand,
pub key_agreement: Option<CoseKey>, pub key_agreement: Option<CoseKey>,
pub pin_auth: Option<Vec<u8>>, pub pin_uv_auth_param: Option<Vec<u8>>,
pub new_pin_enc: Option<Vec<u8>>, pub new_pin_enc: Option<Vec<u8>>,
pub pin_hash_enc: Option<Vec<u8>>, pub pin_hash_enc: Option<Vec<u8>>,
#[cfg(feature = "with_ctap2_1")]
pub min_pin_length: Option<u8>,
#[cfg(feature = "with_ctap2_1")]
pub min_pin_length_rp_ids: Option<Vec<String>>,
#[cfg(feature = "with_ctap2_1")]
pub permissions: Option<u8>, pub permissions: Option<u8>,
#[cfg(feature = "with_ctap2_1")]
pub permissions_rp_id: Option<String>, pub permissions_rp_id: Option<String>,
} }
@@ -306,86 +322,167 @@ impl TryFrom<cbor::Value> for AuthenticatorClientPinParameters {
type Error = Ctap2StatusCode; type Error = Ctap2StatusCode;
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> { fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
#[cfg(not(feature = "with_ctap2_1"))]
destructure_cbor_map! { destructure_cbor_map! {
let { let {
1 => pin_protocol, 0x01 => pin_uv_auth_protocol,
2 => sub_command, 0x02 => sub_command,
3 => key_agreement, 0x03 => key_agreement,
4 => pin_auth, 0x04 => pin_uv_auth_param,
5 => new_pin_enc, 0x05 => new_pin_enc,
6 => pin_hash_enc, 0x06 => pin_hash_enc,
} = extract_map(cbor_value)?; 0x09 => permissions,
} 0x0A => permissions_rp_id,
#[cfg(feature = "with_ctap2_1")]
destructure_cbor_map! {
let {
1 => pin_protocol,
2 => sub_command,
3 => key_agreement,
4 => pin_auth,
5 => new_pin_enc,
6 => pin_hash_enc,
7 => min_pin_length,
8 => min_pin_length_rp_ids,
9 => permissions,
10 => permissions_rp_id,
} = extract_map(cbor_value)?; } = extract_map(cbor_value)?;
} }
let pin_protocol = extract_unsigned(ok_or_missing(pin_protocol)?)?; let pin_uv_auth_protocol =
PinUvAuthProtocol::try_from(ok_or_missing(pin_uv_auth_protocol)?)?;
let sub_command = ClientPinSubCommand::try_from(ok_or_missing(sub_command)?)?; let sub_command = ClientPinSubCommand::try_from(ok_or_missing(sub_command)?)?;
let key_agreement = key_agreement.map(extract_map).transpose()?.map(CoseKey); let key_agreement = key_agreement.map(CoseKey::try_from).transpose()?;
let pin_auth = pin_auth.map(extract_byte_string).transpose()?; let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?;
let new_pin_enc = new_pin_enc.map(extract_byte_string).transpose()?; let new_pin_enc = new_pin_enc.map(extract_byte_string).transpose()?;
let pin_hash_enc = pin_hash_enc.map(extract_byte_string).transpose()?; let pin_hash_enc = pin_hash_enc.map(extract_byte_string).transpose()?;
#[cfg(feature = "with_ctap2_1")]
let min_pin_length = min_pin_length
.map(extract_unsigned)
.transpose()?
.map(u8::try_from)
.transpose()
.map_err(|_| Ctap2StatusCode::CTAP2_ERR_PIN_POLICY_VIOLATION)?;
#[cfg(feature = "with_ctap2_1")]
let min_pin_length_rp_ids = match min_pin_length_rp_ids {
Some(entry) => Some(
extract_array(entry)?
.into_iter()
.map(extract_text_string)
.collect::<Result<Vec<String>, Ctap2StatusCode>>()?,
),
None => None,
};
#[cfg(feature = "with_ctap2_1")]
// We expect a bit field of 8 bits, and drop everything else. // We expect a bit field of 8 bits, and drop everything else.
// This means we ignore extensions in future versions. // This means we ignore extensions in future versions.
let permissions = permissions let permissions = permissions
.map(extract_unsigned) .map(extract_unsigned)
.transpose()? .transpose()?
.map(|p| p as u8); .map(|p| p as u8);
#[cfg(feature = "with_ctap2_1")]
let permissions_rp_id = permissions_rp_id.map(extract_text_string).transpose()?; let permissions_rp_id = permissions_rp_id.map(extract_text_string).transpose()?;
Ok(AuthenticatorClientPinParameters { Ok(AuthenticatorClientPinParameters {
pin_protocol, pin_uv_auth_protocol,
sub_command, sub_command,
key_agreement, key_agreement,
pin_auth, pin_uv_auth_param,
new_pin_enc, new_pin_enc,
pin_hash_enc, pin_hash_enc,
#[cfg(feature = "with_ctap2_1")]
min_pin_length,
#[cfg(feature = "with_ctap2_1")]
min_pin_length_rp_ids,
#[cfg(feature = "with_ctap2_1")]
permissions, permissions,
#[cfg(feature = "with_ctap2_1")]
permissions_rp_id, permissions_rp_id,
}) })
} }
} }
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] #[derive(Debug, PartialEq)]
pub struct AuthenticatorLargeBlobsParameters {
pub get: Option<usize>,
pub set: Option<Vec<u8>>,
pub offset: usize,
pub length: Option<usize>,
pub pin_uv_auth_param: Option<Vec<u8>>,
pub pin_uv_auth_protocol: Option<PinUvAuthProtocol>,
}
impl TryFrom<cbor::Value> for AuthenticatorLargeBlobsParameters {
type Error = Ctap2StatusCode;
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
destructure_cbor_map! {
let {
0x01 => get,
0x02 => set,
0x03 => offset,
0x04 => length,
0x05 => pin_uv_auth_param,
0x06 => pin_uv_auth_protocol,
} = extract_map(cbor_value)?;
}
// careful: some missing parameters here are CTAP1_ERR_INVALID_PARAMETER
let get = get.map(extract_unsigned).transpose()?.map(|u| u as usize);
let set = set.map(extract_byte_string).transpose()?;
let offset =
extract_unsigned(offset.ok_or(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)?)? as usize;
let length = length
.map(extract_unsigned)
.transpose()?
.map(|u| u as usize);
let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?;
let pin_uv_auth_protocol = pin_uv_auth_protocol
.map(PinUvAuthProtocol::try_from)
.transpose()?;
if get.is_none() && set.is_none() {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
if get.is_some() && set.is_some() {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
if get.is_some()
&& (length.is_some() || pin_uv_auth_param.is_some() || pin_uv_auth_protocol.is_some())
{
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
if set.is_some() && offset == 0 {
match length {
None => return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER),
Some(len) if len > MAX_LARGE_BLOB_ARRAY_SIZE => {
return Err(Ctap2StatusCode::CTAP2_ERR_LARGE_BLOB_STORAGE_FULL)
}
Some(len) if len < MIN_LARGE_BLOB_LEN => {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
}
Some(_) => (),
}
}
if set.is_some() && offset != 0 && length.is_some() {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
Ok(AuthenticatorLargeBlobsParameters {
get,
set,
offset,
length,
pin_uv_auth_param,
pin_uv_auth_protocol,
})
}
}
#[derive(Debug, PartialEq)]
pub struct AuthenticatorConfigParameters {
pub sub_command: ConfigSubCommand,
pub sub_command_params: Option<ConfigSubCommandParams>,
pub pin_uv_auth_param: Option<Vec<u8>>,
pub pin_uv_auth_protocol: Option<PinUvAuthProtocol>,
}
impl TryFrom<cbor::Value> for AuthenticatorConfigParameters {
type Error = Ctap2StatusCode;
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
destructure_cbor_map! {
let {
0x01 => sub_command,
0x02 => sub_command_params,
0x03 => pin_uv_auth_param,
0x04 => pin_uv_auth_protocol,
} = extract_map(cbor_value)?;
}
let sub_command = ConfigSubCommand::try_from(ok_or_missing(sub_command)?)?;
let sub_command_params = match sub_command {
ConfigSubCommand::SetMinPinLength => Some(ConfigSubCommandParams::SetMinPinLength(
SetMinPinLengthParams::try_from(ok_or_missing(sub_command_params)?)?,
)),
_ => None,
};
let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?;
let pin_uv_auth_protocol = pin_uv_auth_protocol
.map(PinUvAuthProtocol::try_from)
.transpose()?;
Ok(AuthenticatorConfigParameters {
sub_command,
sub_command_params,
pin_uv_auth_param,
pin_uv_auth_protocol,
})
}
}
#[derive(Debug, PartialEq)]
pub struct AuthenticatorAttestationMaterial { pub struct AuthenticatorAttestationMaterial {
pub certificate: Vec<u8>, pub certificate: Vec<u8>,
pub private_key: [u8; key_material::ATTESTATION_PRIVATE_KEY_LENGTH], pub private_key: [u8; key_material::ATTESTATION_PRIVATE_KEY_LENGTH],
@@ -397,8 +494,8 @@ impl TryFrom<cbor::Value> for AuthenticatorAttestationMaterial {
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> { fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
destructure_cbor_map! { destructure_cbor_map! {
let { let {
1 => certificate, 0x01 => certificate,
2 => private_key, 0x02 => private_key,
} = extract_map(cbor_value)?; } = extract_map(cbor_value)?;
} }
let certificate = extract_byte_string(ok_or_missing(certificate)?)?; let certificate = extract_byte_string(ok_or_missing(certificate)?)?;
@@ -414,7 +511,46 @@ impl TryFrom<cbor::Value> for AuthenticatorAttestationMaterial {
} }
} }
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] #[derive(Debug, PartialEq)]
pub struct AuthenticatorCredentialManagementParameters {
pub sub_command: CredentialManagementSubCommand,
pub sub_command_params: Option<CredentialManagementSubCommandParameters>,
pub pin_uv_auth_protocol: Option<PinUvAuthProtocol>,
pub pin_uv_auth_param: Option<Vec<u8>>,
}
impl TryFrom<cbor::Value> for AuthenticatorCredentialManagementParameters {
type Error = Ctap2StatusCode;
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
destructure_cbor_map! {
let {
0x01 => sub_command,
0x02 => sub_command_params,
0x03 => pin_uv_auth_protocol,
0x04 => pin_uv_auth_param,
} = extract_map(cbor_value)?;
}
let sub_command = CredentialManagementSubCommand::try_from(ok_or_missing(sub_command)?)?;
let sub_command_params = sub_command_params
.map(CredentialManagementSubCommandParameters::try_from)
.transpose()?;
let pin_uv_auth_protocol = pin_uv_auth_protocol
.map(PinUvAuthProtocol::try_from)
.transpose()?;
let pin_uv_auth_param = pin_uv_auth_param.map(extract_byte_string).transpose()?;
Ok(AuthenticatorCredentialManagementParameters {
sub_command,
sub_command_params,
pin_uv_auth_protocol,
pin_uv_auth_param,
})
}
}
#[derive(Debug, PartialEq)]
pub struct AuthenticatorVendorConfigureParameters { pub struct AuthenticatorVendorConfigureParameters {
pub lockdown: bool, pub lockdown: bool,
pub attestation_material: Option<AuthenticatorAttestationMaterial>, pub attestation_material: Option<AuthenticatorAttestationMaterial>,
@@ -426,8 +562,8 @@ impl TryFrom<cbor::Value> for AuthenticatorVendorConfigureParameters {
fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> { fn try_from(cbor_value: cbor::Value) -> Result<Self, Ctap2StatusCode> {
destructure_cbor_map! { destructure_cbor_map! {
let { let {
1 => lockdown, 0x01 => lockdown,
2 => attestation_material, 0x02 => attestation_material,
} = extract_map(cbor_value)?; } = extract_map(cbor_value)?;
} }
let lockdown = lockdown.map_or(Ok(false), extract_bool)?; let lockdown = lockdown.map_or(Ok(false), extract_bool)?;
@@ -449,28 +585,29 @@ mod test {
}; };
use super::super::ES256_CRED_PARAM; use super::super::ES256_CRED_PARAM;
use super::*; use super::*;
use alloc::collections::BTreeMap;
use cbor::{cbor_array, cbor_map}; use cbor::{cbor_array, cbor_map};
use crypto::rng256::ThreadRng256;
#[test] #[test]
fn test_from_cbor_make_credential_parameters() { fn test_from_cbor_make_credential_parameters() {
let cbor_value = cbor_map! { let cbor_value = cbor_map! {
1 => vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F], 0x01 => vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F],
2 => cbor_map! { 0x02 => cbor_map! {
"id" => "example.com", "id" => "example.com",
"name" => "Example",
"icon" => "example.com/icon.png", "icon" => "example.com/icon.png",
"name" => "Example",
}, },
3 => cbor_map! { 0x03 => cbor_map! {
"id" => vec![0x1D, 0x1D, 0x1D, 0x1D], "id" => vec![0x1D, 0x1D, 0x1D, 0x1D],
"icon" => "example.com/foo/icon.png",
"name" => "foo", "name" => "foo",
"displayName" => "bar", "displayName" => "bar",
"icon" => "example.com/foo/icon.png",
}, },
4 => cbor_array![ES256_CRED_PARAM], 0x04 => cbor_array![ES256_CRED_PARAM],
5 => cbor_array![], 0x05 => cbor_array![],
8 => vec![0x12, 0x34], 0x08 => vec![0x12, 0x34],
9 => 1, 0x09 => 1,
0x0A => 2,
}; };
let returned_make_credential_parameters = let returned_make_credential_parameters =
AuthenticatorMakeCredentialParameters::try_from(cbor_value).unwrap(); AuthenticatorMakeCredentialParameters::try_from(cbor_value).unwrap();
@@ -500,10 +637,11 @@ mod test {
user, user,
pub_key_cred_params: vec![ES256_CRED_PARAM], pub_key_cred_params: vec![ES256_CRED_PARAM],
exclude_list: Some(vec![]), exclude_list: Some(vec![]),
extensions: None, extensions: MakeCredentialExtensions::default(),
options, options,
pin_uv_auth_param: Some(vec![0x12, 0x34]), pin_uv_auth_param: Some(vec![0x12, 0x34]),
pin_uv_auth_protocol: Some(1), pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
enterprise_attestation: Some(2),
}; };
assert_eq!( assert_eq!(
@@ -515,15 +653,15 @@ mod test {
#[test] #[test]
fn test_from_cbor_get_assertion_parameters() { fn test_from_cbor_get_assertion_parameters() {
let cbor_value = cbor_map! { let cbor_value = cbor_map! {
1 => "example.com", 0x01 => "example.com",
2 => vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F], 0x02 => vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F],
3 => cbor_array![ cbor_map! { 0x03 => cbor_array![ cbor_map! {
"type" => "public-key",
"id" => vec![0x2D, 0x2D, 0x2D, 0x2D], "id" => vec![0x2D, 0x2D, 0x2D, 0x2D],
"type" => "public-key",
"transports" => cbor_array!["usb"], "transports" => cbor_array!["usb"],
} ], } ],
6 => vec![0x12, 0x34], 0x06 => vec![0x12, 0x34],
7 => 1, 0x07 => 1,
}; };
let returned_get_assertion_parameters = let returned_get_assertion_parameters =
AuthenticatorGetAssertionParameters::try_from(cbor_value).unwrap(); AuthenticatorGetAssertionParameters::try_from(cbor_value).unwrap();
@@ -546,10 +684,10 @@ mod test {
rp_id, rp_id,
client_data_hash, client_data_hash,
allow_list: Some(vec![pub_key_cred_descriptor]), allow_list: Some(vec![pub_key_cred_descriptor]),
extensions: None, extensions: GetAssertionExtensions::default(),
options, options,
pin_uv_auth_param: Some(vec![0x12, 0x34]), pin_uv_auth_param: Some(vec![0x12, 0x34]),
pin_uv_auth_protocol: Some(1), pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
}; };
assert_eq!( assert_eq!(
@@ -560,53 +698,38 @@ mod test {
#[test] #[test]
fn test_from_cbor_client_pin_parameters() { fn test_from_cbor_client_pin_parameters() {
// TODO(kaczmarczyck) inline the #cfg when #128 is resolved: let mut rng = ThreadRng256 {};
// https://github.com/google/OpenSK/issues/128 let sk = crypto::ecdh::SecKey::gensk(&mut rng);
#[cfg(not(feature = "with_ctap2_1"))] let pk = sk.genpk();
let cose_key = CoseKey::from(pk);
let cbor_value = cbor_map! { let cbor_value = cbor_map! {
1 => 1, 0x01 => 1,
2 => ClientPinSubCommand::GetPinRetries, 0x02 => ClientPinSubCommand::GetPinRetries,
3 => cbor_map!{}, 0x03 => cbor::Value::from(cose_key.clone()),
4 => vec! [0xBB], 0x04 => vec! [0xBB],
5 => vec! [0xCC], 0x05 => vec! [0xCC],
6 => vec! [0xDD], 0x06 => vec! [0xDD],
0x09 => 0x03,
0x0A => "example.com",
}; };
#[cfg(feature = "with_ctap2_1")] let returned_client_pin_parameters =
let cbor_value = cbor_map! {
1 => 1,
2 => ClientPinSubCommand::GetPinRetries,
3 => cbor_map!{},
4 => vec! [0xBB],
5 => vec! [0xCC],
6 => vec! [0xDD],
7 => 4,
8 => cbor_array!["example.com"],
9 => 0x03,
10 => "example.com",
};
let returned_pin_protocol_parameters =
AuthenticatorClientPinParameters::try_from(cbor_value).unwrap(); AuthenticatorClientPinParameters::try_from(cbor_value).unwrap();
let expected_pin_protocol_parameters = AuthenticatorClientPinParameters { let expected_client_pin_parameters = AuthenticatorClientPinParameters {
pin_protocol: 1, pin_uv_auth_protocol: PinUvAuthProtocol::V1,
sub_command: ClientPinSubCommand::GetPinRetries, sub_command: ClientPinSubCommand::GetPinRetries,
key_agreement: Some(CoseKey(BTreeMap::new())), key_agreement: Some(cose_key),
pin_auth: Some(vec![0xBB]), pin_uv_auth_param: Some(vec![0xBB]),
new_pin_enc: Some(vec![0xCC]), new_pin_enc: Some(vec![0xCC]),
pin_hash_enc: Some(vec![0xDD]), pin_hash_enc: Some(vec![0xDD]),
#[cfg(feature = "with_ctap2_1")]
min_pin_length: Some(4),
#[cfg(feature = "with_ctap2_1")]
min_pin_length_rp_ids: Some(vec!["example.com".to_string()]),
#[cfg(feature = "with_ctap2_1")]
permissions: Some(0x03), permissions: Some(0x03),
#[cfg(feature = "with_ctap2_1")]
permissions_rp_id: Some("example.com".to_string()), permissions_rp_id: Some("example.com".to_string()),
}; };
assert_eq!( assert_eq!(
returned_pin_protocol_parameters, returned_client_pin_parameters,
expected_pin_protocol_parameters expected_client_pin_parameters
); );
} }
@@ -632,7 +755,37 @@ mod test {
assert_eq!(command, Ok(Command::AuthenticatorGetNextAssertion)); assert_eq!(command, Ok(Command::AuthenticatorGetNextAssertion));
} }
#[cfg(feature = "with_ctap2_1")] #[test]
fn test_from_cbor_cred_management_parameters() {
let cbor_value = cbor_map! {
0x01 => CredentialManagementSubCommand::EnumerateCredentialsBegin as u64,
0x02 => cbor_map!{
0x01 => vec![0x1D; 32],
},
0x03 => 1,
0x04 => vec! [0x9A; 16],
};
let returned_cred_management_parameters =
AuthenticatorCredentialManagementParameters::try_from(cbor_value).unwrap();
let params = CredentialManagementSubCommandParameters {
rp_id_hash: Some(vec![0x1D; 32]),
credential_id: None,
user: None,
};
let expected_cred_management_parameters = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateCredentialsBegin,
sub_command_params: Some(params),
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param: Some(vec![0x9A; 16]),
};
assert_eq!(
returned_cred_management_parameters,
expected_cred_management_parameters
);
}
#[test] #[test]
fn test_deserialize_selection() { fn test_deserialize_selection() {
let cbor_bytes = [Command::AUTHENTICATOR_SELECTION]; let cbor_bytes = [Command::AUTHENTICATOR_SELECTION];
@@ -640,6 +793,149 @@ mod test {
assert_eq!(command, Ok(Command::AuthenticatorSelection)); assert_eq!(command, Ok(Command::AuthenticatorSelection));
} }
#[test]
fn test_from_cbor_large_blobs_parameters() {
// successful get
let cbor_value = cbor_map! {
0x01 => 2,
0x03 => 4,
};
let returned_large_blobs_parameters =
AuthenticatorLargeBlobsParameters::try_from(cbor_value).unwrap();
let expected_large_blobs_parameters = AuthenticatorLargeBlobsParameters {
get: Some(2),
set: None,
offset: 4,
length: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
assert_eq!(
returned_large_blobs_parameters,
expected_large_blobs_parameters
);
// successful first set
let cbor_value = cbor_map! {
0x02 => vec! [0x5E],
0x03 => 0,
0x04 => MIN_LARGE_BLOB_LEN as u64,
0x05 => vec! [0xA9],
0x06 => 1,
};
let returned_large_blobs_parameters =
AuthenticatorLargeBlobsParameters::try_from(cbor_value).unwrap();
let expected_large_blobs_parameters = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(vec![0x5E]),
offset: 0,
length: Some(MIN_LARGE_BLOB_LEN),
pin_uv_auth_param: Some(vec![0xA9]),
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
};
assert_eq!(
returned_large_blobs_parameters,
expected_large_blobs_parameters
);
// successful next set
let cbor_value = cbor_map! {
0x02 => vec! [0x5E],
0x03 => 1,
0x05 => vec! [0xA9],
0x06 => 1,
};
let returned_large_blobs_parameters =
AuthenticatorLargeBlobsParameters::try_from(cbor_value).unwrap();
let expected_large_blobs_parameters = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(vec![0x5E]),
offset: 1,
length: None,
pin_uv_auth_param: Some(vec![0xA9]),
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
};
assert_eq!(
returned_large_blobs_parameters,
expected_large_blobs_parameters
);
// failing with neither get nor set
let cbor_value = cbor_map! {
0x03 => 4,
0x05 => vec! [0xA9],
0x06 => 1,
};
assert_eq!(
AuthenticatorLargeBlobsParameters::try_from(cbor_value),
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
// failing with get and set
let cbor_value = cbor_map! {
0x01 => 2,
0x02 => vec! [0x5E],
0x03 => 4,
0x05 => vec! [0xA9],
0x06 => 1,
};
assert_eq!(
AuthenticatorLargeBlobsParameters::try_from(cbor_value),
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
// failing with get and length
let cbor_value = cbor_map! {
0x01 => 2,
0x03 => 4,
0x04 => MIN_LARGE_BLOB_LEN as u64,
0x05 => vec! [0xA9],
0x06 => 1,
};
assert_eq!(
AuthenticatorLargeBlobsParameters::try_from(cbor_value),
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
// failing with zero offset and no length present
let cbor_value = cbor_map! {
0x02 => vec! [0x5E],
0x03 => 0,
0x05 => vec! [0xA9],
0x06 => 1,
};
assert_eq!(
AuthenticatorLargeBlobsParameters::try_from(cbor_value),
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
// failing with length smaller than minimum
let cbor_value = cbor_map! {
0x02 => vec! [0x5E],
0x03 => 0,
0x04 => MIN_LARGE_BLOB_LEN as u64 - 1,
0x05 => vec! [0xA9],
0x06 => 1,
};
assert_eq!(
AuthenticatorLargeBlobsParameters::try_from(cbor_value),
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
// failing with non-zero offset and length present
let cbor_value = cbor_map! {
0x02 => vec! [0x5E],
0x03 => 4,
0x04 => MIN_LARGE_BLOB_LEN as u64,
0x05 => vec! [0xA9],
0x06 => 1,
};
assert_eq!(
AuthenticatorLargeBlobsParameters::try_from(cbor_value),
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
}
#[test] #[test]
fn test_vendor_configure() { fn test_vendor_configure() {
// Incomplete command // Incomplete command
@@ -664,10 +960,10 @@ mod test {
// Attestation key is too short. // Attestation key is too short.
let cbor_value = cbor_map! { let cbor_value = cbor_map! {
1 => false, 0x01 => false,
2 => cbor_map! { 0x02 => cbor_map! {
1 => dummy_cert, 0x01 => dummy_cert,
2 => dummy_pkey[..key_material::ATTESTATION_PRIVATE_KEY_LENGTH - 1] 0x02 => dummy_pkey[..key_material::ATTESTATION_PRIVATE_KEY_LENGTH - 1]
} }
}; };
assert_eq!( assert_eq!(
@@ -677,9 +973,9 @@ mod test {
// Missing private key // Missing private key
let cbor_value = cbor_map! { let cbor_value = cbor_map! {
1 => false, 0x01 => false,
2 => cbor_map! { 0x02 => cbor_map! {
1 => dummy_cert 0x01 => dummy_cert
} }
}; };
assert_eq!( assert_eq!(
@@ -689,9 +985,9 @@ mod test {
// Missing certificate // Missing certificate
let cbor_value = cbor_map! { let cbor_value = cbor_map! {
1 => false, 0x01 => false,
2 => cbor_map! { 0x02 => cbor_map! {
2 => dummy_pkey 0x02 => dummy_pkey
} }
}; };
assert_eq!( assert_eq!(
@@ -701,10 +997,10 @@ mod test {
// Valid // Valid
let cbor_value = cbor_map! { let cbor_value = cbor_map! {
1 => false, 0x01 => false,
2 => cbor_map! { 0x02 => cbor_map! {
1 => dummy_cert, 0x01 => dummy_cert,
2 => dummy_pkey 0x02 => dummy_pkey
} }
}; };
assert_eq!( assert_eq!(

472
src/ctap/config_command.rs Normal file
View File

@@ -0,0 +1,472 @@
// Copyright 2020-2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::client_pin::{ClientPin, PinPermission};
use super::command::AuthenticatorConfigParameters;
use super::customization::ENTERPRISE_ATTESTATION_MODE;
use super::data_formats::{ConfigSubCommand, ConfigSubCommandParams, SetMinPinLengthParams};
use super::response::ResponseData;
use super::status_code::Ctap2StatusCode;
use super::storage::PersistentStore;
use alloc::vec;
/// Processes the subcommand enableEnterpriseAttestation for AuthenticatorConfig.
fn process_enable_enterprise_attestation(
persistent_store: &mut PersistentStore,
) -> Result<ResponseData, Ctap2StatusCode> {
if ENTERPRISE_ATTESTATION_MODE.is_some() {
persistent_store.enable_enterprise_attestation()?;
Ok(ResponseData::AuthenticatorConfig)
} else {
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
}
}
/// Processes the subcommand toggleAlwaysUv for AuthenticatorConfig.
fn process_toggle_always_uv(
persistent_store: &mut PersistentStore,
) -> Result<ResponseData, Ctap2StatusCode> {
persistent_store.toggle_always_uv()?;
Ok(ResponseData::AuthenticatorConfig)
}
/// Processes the subcommand setMinPINLength for AuthenticatorConfig.
fn process_set_min_pin_length(
persistent_store: &mut PersistentStore,
params: SetMinPinLengthParams,
) -> Result<ResponseData, Ctap2StatusCode> {
let SetMinPinLengthParams {
new_min_pin_length,
min_pin_length_rp_ids,
force_change_pin,
} = params;
let store_min_pin_length = persistent_store.min_pin_length()?;
let new_min_pin_length = new_min_pin_length.unwrap_or(store_min_pin_length);
if new_min_pin_length < store_min_pin_length {
return Err(Ctap2StatusCode::CTAP2_ERR_PIN_POLICY_VIOLATION);
}
let mut force_change_pin = force_change_pin.unwrap_or(false);
if force_change_pin && persistent_store.pin_hash()?.is_none() {
return Err(Ctap2StatusCode::CTAP2_ERR_PIN_NOT_SET);
}
if let Some(old_length) = persistent_store.pin_code_point_length()? {
force_change_pin |= new_min_pin_length > old_length;
}
if force_change_pin {
persistent_store.force_pin_change()?;
}
persistent_store.set_min_pin_length(new_min_pin_length)?;
if let Some(min_pin_length_rp_ids) = min_pin_length_rp_ids {
persistent_store.set_min_pin_length_rp_ids(min_pin_length_rp_ids)?;
}
Ok(ResponseData::AuthenticatorConfig)
}
/// Processes the AuthenticatorConfig command.
pub fn process_config(
persistent_store: &mut PersistentStore,
client_pin: &mut ClientPin,
params: AuthenticatorConfigParameters,
) -> Result<ResponseData, Ctap2StatusCode> {
let AuthenticatorConfigParameters {
sub_command,
sub_command_params,
pin_uv_auth_param,
pin_uv_auth_protocol,
} = params;
let enforce_uv = match sub_command {
ConfigSubCommand::ToggleAlwaysUv => false,
_ => true,
} && persistent_store.has_always_uv()?;
if persistent_store.pin_hash()?.is_some() || enforce_uv {
let pin_uv_auth_param =
pin_uv_auth_param.ok_or(Ctap2StatusCode::CTAP2_ERR_PUAT_REQUIRED)?;
let pin_uv_auth_protocol =
pin_uv_auth_protocol.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?;
// Constants are taken from the specification, section 6.11, step 4.2.
let mut config_data = vec![0xFF; 32];
config_data.extend(&[0x0D, sub_command as u8]);
if let Some(sub_command_params) = sub_command_params.clone() {
if !cbor::write(sub_command_params.into(), &mut config_data) {
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
}
}
client_pin.verify_pin_uv_auth_token(
&config_data,
&pin_uv_auth_param,
pin_uv_auth_protocol,
)?;
client_pin.has_permission(PinPermission::AuthenticatorConfiguration)?;
}
match sub_command {
ConfigSubCommand::EnableEnterpriseAttestation => {
process_enable_enterprise_attestation(persistent_store)
}
ConfigSubCommand::ToggleAlwaysUv => process_toggle_always_uv(persistent_store),
ConfigSubCommand::SetMinPinLength => {
if let Some(ConfigSubCommandParams::SetMinPinLength(params)) = sub_command_params {
process_set_min_pin_length(persistent_store, params)
} else {
Err(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)
}
}
_ => Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER),
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::ctap::customization::ENFORCE_ALWAYS_UV;
use crate::ctap::data_formats::PinUvAuthProtocol;
use crate::ctap::pin_protocol::authenticate_pin_uv_auth_token;
use crypto::rng256::ThreadRng256;
#[test]
fn test_process_enable_enterprise_attestation() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let config_params = AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::EnableEnterpriseAttestation,
sub_command_params: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
if ENTERPRISE_ATTESTATION_MODE.is_some() {
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert_eq!(persistent_store.enterprise_attestation(), Ok(true));
} else {
assert_eq!(
config_response,
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
}
}
#[test]
fn test_process_toggle_always_uv() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let config_params = AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::ToggleAlwaysUv,
sub_command_params: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert!(persistent_store.has_always_uv().unwrap());
let config_params = AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::ToggleAlwaysUv,
sub_command_params: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
if ENFORCE_ALWAYS_UV {
assert_eq!(
config_response,
Err(Ctap2StatusCode::CTAP2_ERR_OPERATION_DENIED)
);
} else {
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert!(!persistent_store.has_always_uv().unwrap());
}
}
fn test_helper_process_toggle_always_uv_with_pin(pin_uv_auth_protocol: PinUvAuthProtocol) {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, pin_uv_auth_protocol);
persistent_store.set_pin(&[0x88; 16], 4).unwrap();
let mut config_data = vec![0xFF; 32];
config_data.extend(&[0x0D, ConfigSubCommand::ToggleAlwaysUv as u8]);
let pin_uv_auth_param =
authenticate_pin_uv_auth_token(&pin_uv_auth_token, &config_data, pin_uv_auth_protocol);
let config_params = AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::ToggleAlwaysUv,
sub_command_params: None,
pin_uv_auth_param: Some(pin_uv_auth_param.clone()),
pin_uv_auth_protocol: Some(pin_uv_auth_protocol),
};
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
if ENFORCE_ALWAYS_UV {
assert_eq!(
config_response,
Err(Ctap2StatusCode::CTAP2_ERR_OPERATION_DENIED)
);
return;
}
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert!(persistent_store.has_always_uv().unwrap());
let config_params = AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::ToggleAlwaysUv,
sub_command_params: None,
pin_uv_auth_param: Some(pin_uv_auth_param),
pin_uv_auth_protocol: Some(pin_uv_auth_protocol),
};
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert!(!persistent_store.has_always_uv().unwrap());
}
#[test]
fn test_process_toggle_always_uv_with_pin_v1() {
test_helper_process_toggle_always_uv_with_pin(PinUvAuthProtocol::V1);
}
#[test]
fn test_process_toggle_always_uv_with_pin_v2() {
test_helper_process_toggle_always_uv_with_pin(PinUvAuthProtocol::V2);
}
fn create_min_pin_config_params(
min_pin_length: u8,
min_pin_length_rp_ids: Option<Vec<String>>,
) -> AuthenticatorConfigParameters {
let set_min_pin_length_params = SetMinPinLengthParams {
new_min_pin_length: Some(min_pin_length),
min_pin_length_rp_ids,
force_change_pin: None,
};
AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::SetMinPinLength,
sub_command_params: Some(ConfigSubCommandParams::SetMinPinLength(
set_min_pin_length_params,
)),
pin_uv_auth_param: None,
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
}
}
#[test]
fn test_process_set_min_pin_length() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
// First, increase minimum PIN length from 4 to 6 without PIN auth.
let min_pin_length = 6;
let config_params = create_min_pin_config_params(min_pin_length, None);
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
// Second, increase minimum PIN length from 6 to 8 with PIN auth.
// The stored PIN or its length don't matter since we control the token.
persistent_store.set_pin(&[0x88; 16], 8).unwrap();
let min_pin_length = 8;
let mut config_params = create_min_pin_config_params(min_pin_length, None);
let pin_uv_auth_param = vec![
0x5C, 0x69, 0x71, 0x29, 0xBD, 0xCC, 0x53, 0xE8, 0x3C, 0x97, 0x62, 0xDD, 0x90, 0x29,
0xB2, 0xDE,
];
config_params.pin_uv_auth_param = Some(pin_uv_auth_param);
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
// Third, decreasing the minimum PIN length from 8 to 7 fails.
let mut config_params = create_min_pin_config_params(7, None);
let pin_uv_auth_param = vec![
0xC5, 0xEA, 0xC1, 0x5E, 0x7F, 0x80, 0x70, 0x1A, 0x4E, 0xC4, 0xAD, 0x85, 0x35, 0xD8,
0xA7, 0x71,
];
config_params.pin_uv_auth_param = Some(pin_uv_auth_param);
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(
config_response,
Err(Ctap2StatusCode::CTAP2_ERR_PIN_POLICY_VIOLATION)
);
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
}
#[test]
fn test_process_set_min_pin_length_rp_ids() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
// First, set RP IDs without PIN auth.
let min_pin_length = 6;
let min_pin_length_rp_ids = vec!["example.com".to_string()];
let config_params =
create_min_pin_config_params(min_pin_length, Some(min_pin_length_rp_ids.clone()));
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
assert_eq!(
persistent_store.min_pin_length_rp_ids(),
Ok(min_pin_length_rp_ids)
);
// Second, change the RP IDs with PIN auth.
let min_pin_length = 8;
let min_pin_length_rp_ids = vec!["another.example.com".to_string()];
// The stored PIN or its length don't matter since we control the token.
persistent_store.set_pin(&[0x88; 16], 8).unwrap();
let mut config_params =
create_min_pin_config_params(min_pin_length, Some(min_pin_length_rp_ids.clone()));
let pin_uv_auth_param = vec![
0x40, 0x51, 0x2D, 0xAC, 0x2D, 0xE2, 0x15, 0x77, 0x5C, 0xF9, 0x5B, 0x62, 0x9A, 0x2D,
0xD6, 0xDA,
];
config_params.pin_uv_auth_param = Some(pin_uv_auth_param.clone());
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
assert_eq!(
persistent_store.min_pin_length_rp_ids(),
Ok(min_pin_length_rp_ids.clone())
);
// Third, changing RP IDs with bad PIN auth fails.
// One PIN auth shouldn't work for different lengths.
let mut config_params =
create_min_pin_config_params(9, Some(min_pin_length_rp_ids.clone()));
config_params.pin_uv_auth_param = Some(pin_uv_auth_param.clone());
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(
config_response,
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
assert_eq!(
persistent_store.min_pin_length_rp_ids(),
Ok(min_pin_length_rp_ids.clone())
);
// Forth, changing RP IDs with bad PIN auth fails.
// One PIN auth shouldn't work for different RP IDs.
let mut config_params = create_min_pin_config_params(
min_pin_length,
Some(vec!["counter.example.com".to_string()]),
);
config_params.pin_uv_auth_param = Some(pin_uv_auth_param);
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(
config_response,
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
assert_eq!(
persistent_store.min_pin_length_rp_ids(),
Ok(min_pin_length_rp_ids)
);
}
#[test]
fn test_process_set_min_pin_length_force_pin_change_implicit() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
persistent_store.set_pin(&[0x88; 16], 4).unwrap();
// Increase min PIN, force PIN change.
let min_pin_length = 6;
let mut config_params = create_min_pin_config_params(min_pin_length, None);
let pin_uv_auth_param = Some(vec![
0x81, 0x37, 0x37, 0xF3, 0xD8, 0x69, 0xBD, 0x74, 0xFE, 0x88, 0x30, 0x8C, 0xC4, 0x2E,
0xA8, 0xC8,
]);
config_params.pin_uv_auth_param = pin_uv_auth_param;
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert_eq!(persistent_store.min_pin_length(), Ok(min_pin_length));
assert_eq!(persistent_store.has_force_pin_change(), Ok(true));
}
#[test]
fn test_process_set_min_pin_length_force_pin_change_explicit() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
persistent_store.set_pin(&[0x88; 16], 4).unwrap();
let pin_uv_auth_param = Some(vec![
0xE3, 0x74, 0xF4, 0x27, 0xBE, 0x7D, 0x40, 0xB5, 0x71, 0xB6, 0xB4, 0x1A, 0xD2, 0xC1,
0x53, 0xD7,
]);
let set_min_pin_length_params = SetMinPinLengthParams {
new_min_pin_length: Some(persistent_store.min_pin_length().unwrap()),
min_pin_length_rp_ids: None,
force_change_pin: Some(true),
};
let config_params = AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::SetMinPinLength,
sub_command_params: Some(ConfigSubCommandParams::SetMinPinLength(
set_min_pin_length_params,
)),
pin_uv_auth_param,
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
};
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(config_response, Ok(ResponseData::AuthenticatorConfig));
assert_eq!(persistent_store.has_force_pin_change(), Ok(true));
}
#[test]
fn test_process_config_vendor_prototype() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let config_params = AuthenticatorConfigParameters {
sub_command: ConfigSubCommand::VendorPrototype,
sub_command_params: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let config_response = process_config(&mut persistent_store, &mut client_pin, config_params);
assert_eq!(
config_response,
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
);
}
}

View File

@@ -0,0 +1,928 @@
// Copyright 2020-2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::client_pin::{ClientPin, PinPermission};
use super::command::AuthenticatorCredentialManagementParameters;
use super::data_formats::{
CoseKey, CredentialManagementSubCommand, CredentialManagementSubCommandParameters,
PublicKeyCredentialDescriptor, PublicKeyCredentialRpEntity, PublicKeyCredentialSource,
PublicKeyCredentialUserEntity,
};
use super::response::{AuthenticatorCredentialManagementResponse, ResponseData};
use super::status_code::Ctap2StatusCode;
use super::storage::PersistentStore;
use super::{StatefulCommand, StatefulPermission};
use alloc::collections::BTreeSet;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use crypto::sha256::Sha256;
use crypto::Hash256;
use libtock_drivers::timer::ClockValue;
/// Generates a set with all existing RP IDs.
fn get_stored_rp_ids(
persistent_store: &PersistentStore,
) -> Result<BTreeSet<String>, Ctap2StatusCode> {
let mut rp_set = BTreeSet::new();
let mut iter_result = Ok(());
for (_, credential) in persistent_store.iter_credentials(&mut iter_result)? {
rp_set.insert(credential.rp_id);
}
iter_result?;
Ok(rp_set)
}
/// Generates the response for subcommands enumerating RPs.
fn enumerate_rps_response(
rp_id: String,
total_rps: Option<u64>,
) -> Result<AuthenticatorCredentialManagementResponse, Ctap2StatusCode> {
let rp_id_hash = Some(Sha256::hash(rp_id.as_bytes()).to_vec());
let rp = Some(PublicKeyCredentialRpEntity {
rp_id,
rp_name: None,
rp_icon: None,
});
Ok(AuthenticatorCredentialManagementResponse {
rp,
rp_id_hash,
total_rps,
..Default::default()
})
}
/// Generates the response for subcommands enumerating credentials.
fn enumerate_credentials_response(
credential: PublicKeyCredentialSource,
total_credentials: Option<u64>,
) -> Result<AuthenticatorCredentialManagementResponse, Ctap2StatusCode> {
let PublicKeyCredentialSource {
key_type,
credential_id,
private_key,
rp_id: _,
user_handle,
user_display_name,
cred_protect_policy,
creation_order: _,
user_name,
user_icon,
cred_blob: _,
large_blob_key,
} = credential;
let user = PublicKeyCredentialUserEntity {
user_id: user_handle,
user_name,
user_display_name,
user_icon,
};
let credential_id = PublicKeyCredentialDescriptor {
key_type,
key_id: credential_id,
transports: None, // You can set USB as a hint here.
};
let public_key = CoseKey::from(private_key.genpk());
Ok(AuthenticatorCredentialManagementResponse {
user: Some(user),
credential_id: Some(credential_id),
public_key: Some(public_key),
total_credentials,
cred_protect: cred_protect_policy,
large_blob_key,
..Default::default()
})
}
/// Check if the token permissions have the correct associated RP ID.
///
/// Either no RP ID is associated, or the RP ID matches the stored credential.
fn check_rp_id_permissions(
persistent_store: &mut PersistentStore,
client_pin: &mut ClientPin,
credential_id: &[u8],
) -> Result<(), Ctap2StatusCode> {
// Pre-check a sufficient condition before calling the store.
if client_pin.has_no_rp_id_permission().is_ok() {
return Ok(());
}
let (_, credential) = persistent_store.find_credential_item(credential_id)?;
client_pin.has_no_or_rp_id_permission(&credential.rp_id)
}
/// Processes the subcommand getCredsMetadata for CredentialManagement.
fn process_get_creds_metadata(
persistent_store: &PersistentStore,
) -> Result<AuthenticatorCredentialManagementResponse, Ctap2StatusCode> {
Ok(AuthenticatorCredentialManagementResponse {
existing_resident_credentials_count: Some(persistent_store.count_credentials()? as u64),
max_possible_remaining_resident_credentials_count: Some(
persistent_store.remaining_credentials()? as u64,
),
..Default::default()
})
}
/// Processes the subcommand enumerateRPsBegin for CredentialManagement.
fn process_enumerate_rps_begin(
persistent_store: &PersistentStore,
stateful_command_permission: &mut StatefulPermission,
now: ClockValue,
) -> Result<AuthenticatorCredentialManagementResponse, Ctap2StatusCode> {
let rp_set = get_stored_rp_ids(persistent_store)?;
let total_rps = rp_set.len();
if total_rps > 1 {
stateful_command_permission.set_command(now, StatefulCommand::EnumerateRps(1));
}
// TODO https://github.com/rust-lang/rust/issues/62924 replace with pop_first()
let rp_id = rp_set
.into_iter()
.next()
.ok_or(Ctap2StatusCode::CTAP2_ERR_NO_CREDENTIALS)?;
enumerate_rps_response(rp_id, Some(total_rps as u64))
}
/// Processes the subcommand enumerateRPsGetNextRP for CredentialManagement.
fn process_enumerate_rps_get_next_rp(
persistent_store: &PersistentStore,
stateful_command_permission: &mut StatefulPermission,
) -> Result<AuthenticatorCredentialManagementResponse, Ctap2StatusCode> {
let rp_id_index = stateful_command_permission.next_enumerate_rp()?;
let rp_set = get_stored_rp_ids(persistent_store)?;
// A BTreeSet is already sorted.
let rp_id = rp_set
.into_iter()
.nth(rp_id_index)
.ok_or(Ctap2StatusCode::CTAP2_ERR_NOT_ALLOWED)?;
enumerate_rps_response(rp_id, None)
}
/// Processes the subcommand enumerateCredentialsBegin for CredentialManagement.
fn process_enumerate_credentials_begin(
persistent_store: &PersistentStore,
stateful_command_permission: &mut StatefulPermission,
client_pin: &mut ClientPin,
sub_command_params: CredentialManagementSubCommandParameters,
now: ClockValue,
) -> Result<AuthenticatorCredentialManagementResponse, Ctap2StatusCode> {
let rp_id_hash = sub_command_params
.rp_id_hash
.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?;
client_pin.has_no_or_rp_id_hash_permission(&rp_id_hash[..])?;
let mut iter_result = Ok(());
let iter = persistent_store.iter_credentials(&mut iter_result)?;
let mut rp_credentials: Vec<usize> = iter
.filter_map(|(key, credential)| {
let cred_rp_id_hash = Sha256::hash(credential.rp_id.as_bytes());
if cred_rp_id_hash == rp_id_hash.as_slice() {
Some(key)
} else {
None
}
})
.collect();
iter_result?;
let total_credentials = rp_credentials.len();
let current_key = rp_credentials
.pop()
.ok_or(Ctap2StatusCode::CTAP2_ERR_NO_CREDENTIALS)?;
let credential = persistent_store.get_credential(current_key)?;
if total_credentials > 1 {
stateful_command_permission
.set_command(now, StatefulCommand::EnumerateCredentials(rp_credentials));
}
enumerate_credentials_response(credential, Some(total_credentials as u64))
}
/// Processes the subcommand enumerateCredentialsGetNextCredential for CredentialManagement.
fn process_enumerate_credentials_get_next_credential(
persistent_store: &PersistentStore,
stateful_command_permission: &mut StatefulPermission,
) -> Result<AuthenticatorCredentialManagementResponse, Ctap2StatusCode> {
let credential_key = stateful_command_permission.next_enumerate_credential()?;
let credential = persistent_store.get_credential(credential_key)?;
enumerate_credentials_response(credential, None)
}
/// Processes the subcommand deleteCredential for CredentialManagement.
fn process_delete_credential(
persistent_store: &mut PersistentStore,
client_pin: &mut ClientPin,
sub_command_params: CredentialManagementSubCommandParameters,
) -> Result<(), Ctap2StatusCode> {
let credential_id = sub_command_params
.credential_id
.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?
.key_id;
check_rp_id_permissions(persistent_store, client_pin, &credential_id)?;
persistent_store.delete_credential(&credential_id)
}
/// Processes the subcommand updateUserInformation for CredentialManagement.
fn process_update_user_information(
persistent_store: &mut PersistentStore,
client_pin: &mut ClientPin,
sub_command_params: CredentialManagementSubCommandParameters,
) -> Result<(), Ctap2StatusCode> {
let credential_id = sub_command_params
.credential_id
.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?
.key_id;
let user = sub_command_params
.user
.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?;
check_rp_id_permissions(persistent_store, client_pin, &credential_id)?;
persistent_store.update_credential(&credential_id, user)
}
/// Processes the CredentialManagement command and all its subcommands.
pub fn process_credential_management(
persistent_store: &mut PersistentStore,
stateful_command_permission: &mut StatefulPermission,
client_pin: &mut ClientPin,
cred_management_params: AuthenticatorCredentialManagementParameters,
now: ClockValue,
) -> Result<ResponseData, Ctap2StatusCode> {
let AuthenticatorCredentialManagementParameters {
sub_command,
sub_command_params,
pin_uv_auth_protocol,
pin_uv_auth_param,
} = cred_management_params;
match (sub_command, stateful_command_permission.get_command()) {
(
CredentialManagementSubCommand::EnumerateRpsGetNextRp,
Ok(StatefulCommand::EnumerateRps(_)),
)
| (
CredentialManagementSubCommand::EnumerateCredentialsGetNextCredential,
Ok(StatefulCommand::EnumerateCredentials(_)),
) => stateful_command_permission.check_command_permission(now)?,
(_, _) => {
stateful_command_permission.clear();
}
}
match sub_command {
CredentialManagementSubCommand::GetCredsMetadata
| CredentialManagementSubCommand::EnumerateRpsBegin
| CredentialManagementSubCommand::EnumerateCredentialsBegin
| CredentialManagementSubCommand::DeleteCredential
| CredentialManagementSubCommand::UpdateUserInformation => {
let pin_uv_auth_param =
pin_uv_auth_param.ok_or(Ctap2StatusCode::CTAP2_ERR_PUAT_REQUIRED)?;
let pin_uv_auth_protocol =
pin_uv_auth_protocol.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?;
let mut management_data = vec![sub_command as u8];
if let Some(sub_command_params) = sub_command_params.clone() {
if !cbor::write(sub_command_params.into(), &mut management_data) {
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
}
}
client_pin.verify_pin_uv_auth_token(
&management_data,
&pin_uv_auth_param,
pin_uv_auth_protocol,
)?;
// The RP ID permission is handled differently per subcommand below.
client_pin.has_permission(PinPermission::CredentialManagement)?;
}
CredentialManagementSubCommand::EnumerateRpsGetNextRp
| CredentialManagementSubCommand::EnumerateCredentialsGetNextCredential => {}
}
let response = match sub_command {
CredentialManagementSubCommand::GetCredsMetadata => {
client_pin.has_no_rp_id_permission()?;
Some(process_get_creds_metadata(persistent_store)?)
}
CredentialManagementSubCommand::EnumerateRpsBegin => {
client_pin.has_no_rp_id_permission()?;
Some(process_enumerate_rps_begin(
persistent_store,
stateful_command_permission,
now,
)?)
}
CredentialManagementSubCommand::EnumerateRpsGetNextRp => Some(
process_enumerate_rps_get_next_rp(persistent_store, stateful_command_permission)?,
),
CredentialManagementSubCommand::EnumerateCredentialsBegin => {
Some(process_enumerate_credentials_begin(
persistent_store,
stateful_command_permission,
client_pin,
sub_command_params.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?,
now,
)?)
}
CredentialManagementSubCommand::EnumerateCredentialsGetNextCredential => {
Some(process_enumerate_credentials_get_next_credential(
persistent_store,
stateful_command_permission,
)?)
}
CredentialManagementSubCommand::DeleteCredential => {
process_delete_credential(
persistent_store,
client_pin,
sub_command_params.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?,
)?;
None
}
CredentialManagementSubCommand::UpdateUserInformation => {
process_update_user_information(
persistent_store,
client_pin,
sub_command_params.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?,
)?;
None
}
};
Ok(ResponseData::AuthenticatorCredentialManagement(response))
}
#[cfg(test)]
mod test {
use super::super::data_formats::{PinUvAuthProtocol, PublicKeyCredentialType};
use super::super::pin_protocol::authenticate_pin_uv_auth_token;
use super::super::CtapState;
use super::*;
use crypto::rng256::{Rng256, ThreadRng256};
const CLOCK_FREQUENCY_HZ: usize = 32768;
const DUMMY_CLOCK_VALUE: ClockValue = ClockValue::new(0, CLOCK_FREQUENCY_HZ);
fn create_credential_source(rng: &mut impl Rng256) -> PublicKeyCredentialSource {
let private_key = crypto::ecdsa::SecKey::gensk(rng);
PublicKeyCredentialSource {
key_type: PublicKeyCredentialType::PublicKey,
credential_id: rng.gen_uniform_u8x32().to_vec(),
private_key,
rp_id: String::from("example.com"),
user_handle: vec![0x01],
user_display_name: Some("display_name".to_string()),
cred_protect_policy: None,
creation_order: 0,
user_name: Some("name".to_string()),
user_icon: Some("icon".to_string()),
cred_blob: None,
large_blob_key: None,
}
}
fn test_helper_process_get_creds_metadata(pin_uv_auth_protocol: PinUvAuthProtocol) {
let mut rng = ThreadRng256 {};
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, pin_uv_auth_protocol);
let credential_source = create_credential_source(&mut rng);
let user_immediately_present = |_| Ok(());
let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE);
ctap_state.client_pin = client_pin;
ctap_state.persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let management_data = vec![CredentialManagementSubCommand::GetCredsMetadata as u8];
let pin_uv_auth_param = authenticate_pin_uv_auth_token(
&pin_uv_auth_token,
&management_data,
pin_uv_auth_protocol,
);
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::GetCredsMetadata,
sub_command_params: None,
pin_uv_auth_protocol: Some(pin_uv_auth_protocol),
pin_uv_auth_param: Some(pin_uv_auth_param.clone()),
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
let initial_capacity = match cred_management_response.unwrap() {
ResponseData::AuthenticatorCredentialManagement(Some(response)) => {
assert_eq!(response.existing_resident_credentials_count, Some(0));
response
.max_possible_remaining_resident_credentials_count
.unwrap()
}
_ => panic!("Invalid response type"),
};
ctap_state
.persistent_store
.store_credential(credential_source)
.unwrap();
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::GetCredsMetadata,
sub_command_params: None,
pin_uv_auth_protocol: Some(pin_uv_auth_protocol),
pin_uv_auth_param: Some(pin_uv_auth_param),
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
match cred_management_response.unwrap() {
ResponseData::AuthenticatorCredentialManagement(Some(response)) => {
assert_eq!(response.existing_resident_credentials_count, Some(1));
assert_eq!(
response.max_possible_remaining_resident_credentials_count,
Some(initial_capacity - 1)
);
}
_ => panic!("Invalid response type"),
};
}
#[test]
fn test_process_get_creds_metadata_v1() {
test_helper_process_get_creds_metadata(PinUvAuthProtocol::V1);
}
#[test]
fn test_process_get_creds_metadata_v2() {
test_helper_process_get_creds_metadata(PinUvAuthProtocol::V2);
}
#[test]
fn test_process_enumerate_rps_with_uv() {
let mut rng = ThreadRng256 {};
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let credential_source1 = create_credential_source(&mut rng);
let mut credential_source2 = create_credential_source(&mut rng);
credential_source2.rp_id = "another.example.com".to_string();
let user_immediately_present = |_| Ok(());
let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE);
ctap_state.client_pin = client_pin;
ctap_state
.persistent_store
.store_credential(credential_source1)
.unwrap();
ctap_state
.persistent_store
.store_credential(credential_source2)
.unwrap();
ctap_state.persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let pin_uv_auth_param = Some(vec![
0x1A, 0xA4, 0x96, 0xDA, 0x62, 0x80, 0x28, 0x13, 0xEB, 0x32, 0xB9, 0xF1, 0xD2, 0xA9,
0xD0, 0xD1,
]);
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateRpsBegin,
sub_command_params: None,
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
let first_rp_id = match cred_management_response.unwrap() {
ResponseData::AuthenticatorCredentialManagement(Some(response)) => {
assert_eq!(response.total_rps, Some(2));
let rp_id = response.rp.unwrap().rp_id;
let rp_id_hash = Sha256::hash(rp_id.as_bytes());
assert_eq!(rp_id_hash, response.rp_id_hash.unwrap().as_slice());
rp_id
}
_ => panic!("Invalid response type"),
};
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateRpsGetNextRp,
sub_command_params: None,
pin_uv_auth_protocol: None,
pin_uv_auth_param: None,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
let second_rp_id = match cred_management_response.unwrap() {
ResponseData::AuthenticatorCredentialManagement(Some(response)) => {
assert_eq!(response.total_rps, None);
let rp_id = response.rp.unwrap().rp_id;
let rp_id_hash = Sha256::hash(rp_id.as_bytes());
assert_eq!(rp_id_hash, response.rp_id_hash.unwrap().as_slice());
rp_id
}
_ => panic!("Invalid response type"),
};
assert!(first_rp_id != second_rp_id);
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateRpsGetNextRp,
sub_command_params: None,
pin_uv_auth_protocol: None,
pin_uv_auth_param: None,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
assert_eq!(
cred_management_response,
Err(Ctap2StatusCode::CTAP2_ERR_NOT_ALLOWED)
);
}
#[test]
fn test_process_enumerate_rps_completeness() {
let mut rng = ThreadRng256 {};
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let credential_source = create_credential_source(&mut rng);
let user_immediately_present = |_| Ok(());
let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE);
ctap_state.client_pin = client_pin;
const NUM_CREDENTIALS: usize = 20;
for i in 0..NUM_CREDENTIALS {
let mut credential = credential_source.clone();
credential.rp_id = i.to_string();
ctap_state
.persistent_store
.store_credential(credential)
.unwrap();
}
ctap_state.persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let pin_uv_auth_param = Some(vec![
0x1A, 0xA4, 0x96, 0xDA, 0x62, 0x80, 0x28, 0x13, 0xEB, 0x32, 0xB9, 0xF1, 0xD2, 0xA9,
0xD0, 0xD1,
]);
let mut rp_set = BTreeSet::new();
// This mut is just to make the test code shorter.
// The command is different on the first loop iteration.
let mut cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateRpsBegin,
sub_command_params: None,
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param,
};
for _ in 0..NUM_CREDENTIALS {
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
match cred_management_response.unwrap() {
ResponseData::AuthenticatorCredentialManagement(Some(response)) => {
if rp_set.is_empty() {
assert_eq!(response.total_rps, Some(NUM_CREDENTIALS as u64));
} else {
assert_eq!(response.total_rps, None);
}
let rp_id = response.rp.unwrap().rp_id;
let rp_id_hash = Sha256::hash(rp_id.as_bytes());
assert_eq!(rp_id_hash, response.rp_id_hash.unwrap().as_slice());
assert!(!rp_set.contains(&rp_id));
rp_set.insert(rp_id);
}
_ => panic!("Invalid response type"),
};
cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateRpsGetNextRp,
sub_command_params: None,
pin_uv_auth_protocol: None,
pin_uv_auth_param: None,
};
}
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
assert_eq!(
cred_management_response,
Err(Ctap2StatusCode::CTAP2_ERR_NOT_ALLOWED)
);
}
#[test]
fn test_process_enumerate_credentials_with_uv() {
let mut rng = ThreadRng256 {};
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let credential_source1 = create_credential_source(&mut rng);
let mut credential_source2 = create_credential_source(&mut rng);
credential_source2.user_handle = vec![0x02];
credential_source2.user_name = Some("user2".to_string());
credential_source2.user_display_name = Some("User Two".to_string());
credential_source2.user_icon = Some("icon2".to_string());
let user_immediately_present = |_| Ok(());
let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE);
ctap_state.client_pin = client_pin;
ctap_state
.persistent_store
.store_credential(credential_source1)
.unwrap();
ctap_state
.persistent_store
.store_credential(credential_source2)
.unwrap();
ctap_state.persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let pin_uv_auth_param = Some(vec![
0xF8, 0xB0, 0x3C, 0xC1, 0xD5, 0x58, 0x9C, 0xB7, 0x4D, 0x42, 0xA1, 0x64, 0x14, 0x28,
0x2B, 0x68,
]);
let sub_command_params = CredentialManagementSubCommandParameters {
rp_id_hash: Some(Sha256::hash(b"example.com").to_vec()),
credential_id: None,
user: None,
};
// RP ID hash:
// A379A6F6EEAFB9A55E378C118034E2751E682FAB9F2D30AB13D2125586CE1947
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateCredentialsBegin,
sub_command_params: Some(sub_command_params),
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
let first_credential_id = match cred_management_response.unwrap() {
ResponseData::AuthenticatorCredentialManagement(Some(response)) => {
assert!(response.user.is_some());
assert!(response.public_key.is_some());
assert_eq!(response.total_credentials, Some(2));
response.credential_id.unwrap().key_id
}
_ => panic!("Invalid response type"),
};
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateCredentialsGetNextCredential,
sub_command_params: None,
pin_uv_auth_protocol: None,
pin_uv_auth_param: None,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
let second_credential_id = match cred_management_response.unwrap() {
ResponseData::AuthenticatorCredentialManagement(Some(response)) => {
assert!(response.user.is_some());
assert!(response.public_key.is_some());
assert_eq!(response.total_credentials, None);
response.credential_id.unwrap().key_id
}
_ => panic!("Invalid response type"),
};
assert!(first_credential_id != second_credential_id);
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::EnumerateCredentialsGetNextCredential,
sub_command_params: None,
pin_uv_auth_protocol: None,
pin_uv_auth_param: None,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
assert_eq!(
cred_management_response,
Err(Ctap2StatusCode::CTAP2_ERR_NOT_ALLOWED)
);
}
#[test]
fn test_process_delete_credential() {
let mut rng = ThreadRng256 {};
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let mut credential_source = create_credential_source(&mut rng);
credential_source.credential_id = vec![0x1D; 32];
let user_immediately_present = |_| Ok(());
let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE);
ctap_state.client_pin = client_pin;
ctap_state
.persistent_store
.store_credential(credential_source)
.unwrap();
ctap_state.persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let pin_uv_auth_param = Some(vec![
0xBD, 0xE3, 0xEF, 0x8A, 0x77, 0x01, 0xB1, 0x69, 0x19, 0xE6, 0x62, 0xB9, 0x9B, 0x89,
0x9C, 0x64,
]);
let credential_id = PublicKeyCredentialDescriptor {
key_type: PublicKeyCredentialType::PublicKey,
key_id: vec![0x1D; 32],
transports: None, // You can set USB as a hint here.
};
let sub_command_params = CredentialManagementSubCommandParameters {
rp_id_hash: None,
credential_id: Some(credential_id),
user: None,
};
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::DeleteCredential,
sub_command_params: Some(sub_command_params.clone()),
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param: pin_uv_auth_param.clone(),
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
assert_eq!(
cred_management_response,
Ok(ResponseData::AuthenticatorCredentialManagement(None))
);
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::DeleteCredential,
sub_command_params: Some(sub_command_params),
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
assert_eq!(
cred_management_response,
Err(Ctap2StatusCode::CTAP2_ERR_NO_CREDENTIALS)
);
}
#[test]
fn test_process_update_user_information() {
let mut rng = ThreadRng256 {};
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let mut credential_source = create_credential_source(&mut rng);
credential_source.credential_id = vec![0x1D; 32];
let user_immediately_present = |_| Ok(());
let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE);
ctap_state.client_pin = client_pin;
ctap_state
.persistent_store
.store_credential(credential_source)
.unwrap();
ctap_state.persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let pin_uv_auth_param = Some(vec![
0xA5, 0x55, 0x8F, 0x03, 0xC3, 0xD3, 0x73, 0x1C, 0x07, 0xDA, 0x1F, 0x8C, 0xC7, 0xBD,
0x9D, 0xB7,
]);
let credential_id = PublicKeyCredentialDescriptor {
key_type: PublicKeyCredentialType::PublicKey,
key_id: vec![0x1D; 32],
transports: None, // You can set USB as a hint here.
};
let new_user = PublicKeyCredentialUserEntity {
user_id: vec![0xFF],
user_name: Some("new_name".to_string()),
user_display_name: Some("new_display_name".to_string()),
user_icon: Some("new_icon".to_string()),
};
let sub_command_params = CredentialManagementSubCommandParameters {
rp_id_hash: None,
credential_id: Some(credential_id),
user: Some(new_user),
};
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::UpdateUserInformation,
sub_command_params: Some(sub_command_params),
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param,
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
assert_eq!(
cred_management_response,
Ok(ResponseData::AuthenticatorCredentialManagement(None))
);
let updated_credential = ctap_state
.persistent_store
.find_credential("example.com", &[0x1D; 32], false)
.unwrap()
.unwrap();
assert_eq!(updated_credential.user_handle, vec![0x01]);
assert_eq!(&updated_credential.user_name.unwrap(), "new_name");
assert_eq!(
&updated_credential.user_display_name.unwrap(),
"new_display_name"
);
assert_eq!(&updated_credential.user_icon.unwrap(), "new_icon");
}
#[test]
fn test_process_credential_management_invalid_pin_uv_auth_param() {
let mut rng = ThreadRng256 {};
let user_immediately_present = |_| Ok(());
let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE);
ctap_state.persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let cred_management_params = AuthenticatorCredentialManagementParameters {
sub_command: CredentialManagementSubCommand::GetCredsMetadata,
sub_command_params: None,
pin_uv_auth_protocol: Some(PinUvAuthProtocol::V1),
pin_uv_auth_param: Some(vec![0u8; 16]),
};
let cred_management_response = process_credential_management(
&mut ctap_state.persistent_store,
&mut ctap_state.stateful_command_permission,
&mut ctap_state.client_pin,
cred_management_params,
DUMMY_CLOCK_VALUE,
);
assert_eq!(
cred_management_response,
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
}

147
src/ctap/crypto_wrapper.rs Normal file
View File

@@ -0,0 +1,147 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::ctap::status_code::Ctap2StatusCode;
use alloc::vec;
use alloc::vec::Vec;
use crypto::cbc::{cbc_decrypt, cbc_encrypt};
use crypto::rng256::Rng256;
/// Wraps the AES256-CBC encryption to match what we need in CTAP.
pub fn aes256_cbc_encrypt(
rng: &mut dyn Rng256,
aes_enc_key: &crypto::aes256::EncryptionKey,
plaintext: &[u8],
embeds_iv: bool,
) -> Result<Vec<u8>, Ctap2StatusCode> {
if plaintext.len() % 16 != 0 {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
let iv = if embeds_iv {
let random_bytes = rng.gen_uniform_u8x32();
*array_ref!(random_bytes, 0, 16)
} else {
[0u8; 16]
};
let mut blocks = Vec::with_capacity(plaintext.len() / 16);
// TODO(https://github.com/rust-lang/rust/issues/74985) Use array_chunks when stable.
for block in plaintext.chunks_exact(16) {
blocks.push(*array_ref!(block, 0, 16));
}
cbc_encrypt(aes_enc_key, iv, &mut blocks);
let mut ciphertext = if embeds_iv { iv.to_vec() } else { vec![] };
ciphertext.extend(blocks.iter().flatten());
Ok(ciphertext)
}
/// Wraps the AES256-CBC decryption to match what we need in CTAP.
pub fn aes256_cbc_decrypt(
aes_enc_key: &crypto::aes256::EncryptionKey,
ciphertext: &[u8],
embeds_iv: bool,
) -> Result<Vec<u8>, Ctap2StatusCode> {
if ciphertext.len() % 16 != 0 {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
let mut block_len = ciphertext.len() / 16;
// TODO(https://github.com/rust-lang/rust/issues/74985) Use array_chunks when stable.
let mut block_iter = ciphertext.chunks_exact(16);
let iv = if embeds_iv {
block_len -= 1;
let iv_block = block_iter
.next()
.ok_or(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)?;
*array_ref!(iv_block, 0, 16)
} else {
[0u8; 16]
};
let mut blocks = Vec::with_capacity(block_len);
for block in block_iter {
blocks.push(*array_ref!(block, 0, 16));
}
let aes_dec_key = crypto::aes256::DecryptionKey::new(aes_enc_key);
cbc_decrypt(&aes_dec_key, iv, &mut blocks);
Ok(blocks.iter().flatten().cloned().collect::<Vec<u8>>())
}
#[cfg(test)]
mod test {
use super::*;
use crypto::rng256::ThreadRng256;
#[test]
fn test_encrypt_decrypt_with_iv() {
let mut rng = ThreadRng256 {};
let aes_enc_key = crypto::aes256::EncryptionKey::new(&[0xC2; 32]);
let plaintext = vec![0xAA; 64];
let ciphertext = aes256_cbc_encrypt(&mut rng, &aes_enc_key, &plaintext, true).unwrap();
let decrypted = aes256_cbc_decrypt(&aes_enc_key, &ciphertext, true).unwrap();
assert_eq!(decrypted, plaintext);
}
#[test]
fn test_encrypt_decrypt_without_iv() {
let mut rng = ThreadRng256 {};
let aes_enc_key = crypto::aes256::EncryptionKey::new(&[0xC2; 32]);
let plaintext = vec![0xAA; 64];
let ciphertext = aes256_cbc_encrypt(&mut rng, &aes_enc_key, &plaintext, false).unwrap();
let decrypted = aes256_cbc_decrypt(&aes_enc_key, &ciphertext, false).unwrap();
assert_eq!(decrypted, plaintext);
}
#[test]
fn test_correct_iv_usage() {
let mut rng = ThreadRng256 {};
let aes_enc_key = crypto::aes256::EncryptionKey::new(&[0xC2; 32]);
let plaintext = vec![0xAA; 64];
let mut ciphertext_no_iv =
aes256_cbc_encrypt(&mut rng, &aes_enc_key, &plaintext, false).unwrap();
let mut ciphertext_with_iv = vec![0u8; 16];
ciphertext_with_iv.append(&mut ciphertext_no_iv);
let decrypted = aes256_cbc_decrypt(&aes_enc_key, &ciphertext_with_iv, true).unwrap();
assert_eq!(decrypted, plaintext);
}
#[test]
fn test_iv_manipulation_property() {
let mut rng = ThreadRng256 {};
let aes_enc_key = crypto::aes256::EncryptionKey::new(&[0xC2; 32]);
let plaintext = vec![0xAA; 64];
let mut ciphertext = aes256_cbc_encrypt(&mut rng, &aes_enc_key, &plaintext, true).unwrap();
let mut expected_plaintext = plaintext;
for i in 0..16 {
ciphertext[i] ^= 0xBB;
expected_plaintext[i] ^= 0xBB;
}
let decrypted = aes256_cbc_decrypt(&aes_enc_key, &ciphertext, true).unwrap();
assert_eq!(decrypted, expected_plaintext);
}
#[test]
fn test_chaining() {
let mut rng = ThreadRng256 {};
let aes_enc_key = crypto::aes256::EncryptionKey::new(&[0xC2; 32]);
let plaintext = vec![0xAA; 64];
let ciphertext1 = aes256_cbc_encrypt(&mut rng, &aes_enc_key, &plaintext, true).unwrap();
let ciphertext2 = aes256_cbc_encrypt(&mut rng, &aes_enc_key, &plaintext, true).unwrap();
assert_eq!(ciphertext1.len(), 80);
assert_eq!(ciphertext2.len(), 80);
// The ciphertext should mutate in all blocks with a different IV.
let block_iter1 = ciphertext1.chunks_exact(16);
let block_iter2 = ciphertext2.chunks_exact(16);
for (block1, block2) in block_iter1.zip(block_iter2) {
assert_ne!(block1, block2);
}
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -29,8 +29,7 @@ pub type Ctap1StatusCode = ApduStatusCode;
// The specification referenced in this file is at: // The specification referenced in this file is at:
// https://fidoalliance.org/specs/fido-u2f-v1.2-ps-20170411/fido-u2f-raw-message-formats-v1.2-ps-20170411.pdf // https://fidoalliance.org/specs/fido-u2f-v1.2-ps-20170411/fido-u2f-raw-message-formats-v1.2-ps-20170411.pdf
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Clone, Debug))] #[derive(Clone, Debug, PartialEq)]
#[derive(PartialEq)]
pub enum Ctap1Flags { pub enum Ctap1Flags {
CheckOnly = 0x07, CheckOnly = 0x07,
EnforceUpAndSign = 0x03, EnforceUpAndSign = 0x03,
@@ -56,7 +55,7 @@ impl Into<u8> for Ctap1Flags {
} }
} }
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug, PartialEq))] #[derive(Debug, PartialEq)]
// TODO: remove #allow when https://github.com/rust-lang/rust/issues/64362 is fixed // TODO: remove #allow when https://github.com/rust-lang/rust/issues/64362 is fixed
enum U2fCommand { enum U2fCommand {
#[allow(dead_code)] #[allow(dead_code)]
@@ -190,6 +189,12 @@ impl Ctap1Command {
R: Rng256, R: Rng256,
CheckUserPresence: Fn(ChannelID) -> Result<(), Ctap2StatusCode>, CheckUserPresence: Fn(ChannelID) -> Result<(), Ctap2StatusCode>,
{ {
if !ctap_state
.allows_ctap1()
.map_err(|_| Ctap1StatusCode::SW_INTERNAL_EXCEPTION)?
{
return Err(Ctap1StatusCode::SW_COMMAND_NOT_ALLOWED);
}
let command = U2fCommand::try_from(message)?; let command = U2fCommand::try_from(message)?;
match command { match command {
U2fCommand::Register { U2fCommand::Register {
@@ -399,6 +404,21 @@ mod test {
message message
} }
#[test]
fn test_process_allowed() {
let mut rng = ThreadRng256 {};
let dummy_user_presence = |_| panic!("Unexpected user presence check in CTAP1");
let mut ctap_state = CtapState::new(&mut rng, dummy_user_presence, START_CLOCK_VALUE);
ctap_state.persistent_store.toggle_always_uv().unwrap();
let application = [0x0A; 32];
let message = create_register_message(&application);
ctap_state.u2f_up_state.consume_up(START_CLOCK_VALUE);
ctap_state.u2f_up_state.grant_up(START_CLOCK_VALUE);
let response = Ctap1Command::process_command(&message, &mut ctap_state, START_CLOCK_VALUE);
assert_eq!(response, Err(Ctap1StatusCode::SW_COMMAND_NOT_ALLOWED));
}
#[test] #[test]
fn test_process_register() { fn test_process_register() {
let mut rng = ThreadRng256 {}; let mut rng = ThreadRng256 {};

280
src/ctap/customization.rs Normal file
View File

@@ -0,0 +1,280 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This file contains all customizable constants.
//!
//! If you adapt them, make sure to run the tests before flashing the firmware.
//! Our deploy script enforces the invariants.
use crate::ctap::data_formats::{CredentialProtectionPolicy, EnterpriseAttestationMode};
// ###########################################################################
// Constants for adjusting privacy and protection levels.
// ###########################################################################
/// Changes the default level for the credProtect extension.
///
/// You can change this value to one of the following for more privacy:
/// - CredentialProtectionPolicy::UserVerificationOptionalWithCredentialIdList
/// - CredentialProtectionPolicy::UserVerificationRequired
///
/// UserVerificationOptionalWithCredentialIdList
/// Resident credentials are discoverable with
/// - an allowList,
/// - an excludeList,
/// - user verification.
///
/// UserVerificationRequired
/// Resident credentials are discoverable with user verification only.
///
/// This can improve privacy, but can make usage less comfortable.
pub const DEFAULT_CRED_PROTECT: Option<CredentialProtectionPolicy> = None;
/// Sets the initial minimum PIN length in code points.
///
/// # Invariant
///
/// - The minimum PIN length must be at least 4.
/// - The minimum PIN length must be at most 63.
/// - DEFAULT_MIN_PIN_LENGTH_RP_IDS must be non-empty if MAX_RP_IDS_LENGTH is 0.
///
/// Requiring longer PINs can help establish trust between users and relying
/// parties. It makes user verification harder to break, but less convenient.
/// NIST recommends at least 6-digit PINs in section 5.1.9.1:
/// https://pages.nist.gov/800-63-3/sp800-63b.html
///
/// Reset reverts the minimum PIN length to this DEFAULT_MIN_PIN_LENGTH.
pub const DEFAULT_MIN_PIN_LENGTH: u8 = 4;
/// Lists relying parties that can read the minimum PIN length.
///
/// # Invariant
///
/// - DEFAULT_MIN_PIN_LENGTH_RP_IDS must be non-empty if MAX_RP_IDS_LENGTH is 0
///
/// Only the RP IDs listed in DEFAULT_MIN_PIN_LENGTH_RP_IDS are allowed to read
/// the minimum PIN length with the minPinLength extension.
pub const DEFAULT_MIN_PIN_LENGTH_RP_IDS: &[&str] = &[];
/// Enforces the alwaysUv option.
///
/// When setting to true, commands require a PIN.
/// Also, alwaysUv can not be disabled by commands.
///
/// A certification (additional to FIDO Alliance's) might require enforcing
/// alwaysUv. Otherwise, users should have the choice to configure alwaysUv.
/// Calling toggleAlwaysUv is preferred over enforcing alwaysUv here.
pub const ENFORCE_ALWAYS_UV: bool = false;
/// Allows usage of enterprise attestation.
///
/// # Invariant
///
/// - Enterprise and batch attestation can not both be active.
/// - If the mode is VendorFacilitated, ENTERPRISE_RP_ID_LIST must be non-empty.
///
/// For privacy reasons, it is disabled by default. You can choose between:
/// - EnterpriseAttestationMode::VendorFacilitated
/// - EnterpriseAttestationMode::PlatformManaged
///
/// VendorFacilitated
/// Enterprise attestation is restricted to ENTERPRISE_RP_ID_LIST. Add your
/// enterprises domain, e.g. "example.com", to the list below.
///
/// PlatformManaged
/// All relying parties can request an enterprise attestation. The authenticator
/// trusts the platform to filter requests.
///
/// To enable the feature, send the subcommand enableEnterpriseAttestation in
/// AuthenticatorConfig. An enterprise might want to customize the type of
/// attestation that is used. OpenSK defaults to batch attestation. Configuring
/// individual certificates then makes authenticators identifiable.
///
/// OpenSK prevents activating batch and enterprise attestation together. The
/// current implementation uses the same key material at the moment, and these
/// two modes have conflicting privacy guarantees.
/// If you implement your own enterprise attestation mechanism, and you want
/// batch attestation at the same time, proceed carefully and remove the
/// assertion.
pub const ENTERPRISE_ATTESTATION_MODE: Option<EnterpriseAttestationMode> = None;
/// Lists relying party IDs that can perform enterprise attestation.
///
/// # Invariant
///
/// - If the mode is VendorFacilitated, ENTERPRISE_RP_ID_LIST must be non-empty.
///
/// This list is only considered if the enterprise attestation mode is
/// VendorFacilitated.
pub const ENTERPRISE_RP_ID_LIST: &[&str] = &[];
/// Maximum message size send for CTAP commands.
///
/// The maximum value is 7609, as HID packets can not encode longer messages.
/// 1024 is the default mentioned in the authenticatorLargeBlobs commands.
/// Larger values are preferred, as that allows more parameters in commands.
/// If long commands are too unreliable on your hardware, consider decreasing
/// this value.
pub const MAX_MSG_SIZE: usize = 7609;
/// Sets the number of consecutive failed PINs before blocking interaction.
///
/// # Invariant
///
/// - CTAP2.0: Maximum PIN retries must be 8.
/// - CTAP2.1: Maximum PIN retries must be 8 at most.
///
/// The fail retry counter is reset after entering the correct PIN.
pub const MAX_PIN_RETRIES: u8 = 8;
/// Enables or disables basic attestation for FIDO2.
///
/// # Invariant
///
/// - Enterprise and batch attestation can not both be active (see above).
///
/// The basic attestation uses the signing key configured with a vendor command
/// as a batch key. If you turn batch attestation on, be aware that it is your
/// responsibility to safely generate and store the key material. Also, the
/// batches must have size of at least 100k authenticators before using new key
/// material.
/// U2F is unaffected by this setting.
///
/// https://www.w3.org/TR/webauthn/#attestation
pub const USE_BATCH_ATTESTATION: bool = false;
/// Enables or disables signature counters.
///
/// The signature counter is currently implemented as a global counter.
/// The specification strongly suggests to have per-credential counters.
/// Implementing those means you can't have an infinite amount of server-side
/// credentials anymore. Also, since counters need frequent writes on the
/// persistent storage, we might need a flash friendly implementation. This
/// solution is a compromise to be compatible with U2F and not wasting storage.
///
/// https://www.w3.org/TR/webauthn/#signature-counter
pub const USE_SIGNATURE_COUNTER: bool = true;
// ###########################################################################
// Constants for performance optimization or adapting to different hardware.
//
// Those constants may be modified before compilation to tune the behavior of
// the key.
// ###########################################################################
/// Sets the maximum blob size stored with the credBlob extension.
///
/// # Invariant
///
/// - The length must be at least 32.
pub const MAX_CRED_BLOB_LENGTH: usize = 32;
/// Limits the number of considered entries in credential lists.
///
/// # Invariant
///
/// - This value, if present, must be at least 1 (more is preferred).
///
/// Depending on your memory, you can use Some(n) to limit request sizes in
/// MakeCredential and GetAssertion. This affects allowList and excludeList.
pub const MAX_CREDENTIAL_COUNT_IN_LIST: Option<usize> = None;
/// Limits the size of largeBlobs the authenticator stores.
///
/// # Invariant
///
/// - The allowed size must be at least 1024.
/// - The array must fit into the shards reserved in storage/key.rs.
pub const MAX_LARGE_BLOB_ARRAY_SIZE: usize = 2048;
/// Limits the number of RP IDs that can change the minimum PIN length.
///
/// # Invariant
///
/// - If this value is 0, DEFAULT_MIN_PIN_LENGTH_RP_IDS must be non-empty.
///
/// You can use this constant to have an upper limit in storage requirements.
/// This might be useful if you want to more reliably predict the remaining
/// storage. Stored string can still be of arbitrary length though, until RP ID
/// truncation is implemented.
/// Outside of memory considerations, you can set this value to 0 if only RP IDs
/// in DEFAULT_MIN_PIN_LENGTH_RP_IDS should be allowed to change the minimum PIN
/// length.
pub const MAX_RP_IDS_LENGTH: usize = 8;
/// Sets the number of resident keys you can store.
///
/// # Invariant
///
/// - The storage key CREDENTIALS must fit at least this number of credentials.
///
/// This value has implications on the flash lifetime, please see the
/// documentation for NUM_PAGES below.
pub const MAX_SUPPORTED_RESIDENT_KEYS: usize = 150;
/// Sets the number of pages used for persistent storage.
///
/// The number of pages should be at least 3 and at most what the flash can
/// hold. There should be no reason to put a small number here, except that the
/// latency of flash operations is linear in the number of pages. This may
/// improve in the future. Currently, using 20 pages gives between 20ms and
/// 240ms per operation. The rule of thumb is between 1ms and 12ms per
/// additional page.
///
/// Limiting the number of resident keys permits to ensure a minimum number of
/// counter increments.
/// Let:
/// - P the number of pages (NUM_PAGES)
/// - K the maximum number of resident keys (MAX_SUPPORTED_RESIDENT_KEYS)
/// - S the maximum size of a resident key (about 500)
/// - C the number of erase cycles (10000)
/// - I the minimum number of counter increments
///
/// We have: I = (P * 4084 - 5107 - K * S) / 8 * C
///
/// With P=20 and K=150, we have I=2M which is enough for 500 increments per day
/// for 10 years.
pub const NUM_PAGES: usize = 20;
#[cfg(test)]
mod test {
use super::*;
#[test]
#[allow(clippy::assertions_on_constants)]
fn test_invariants() {
// Two invariants are currently tested in different files:
// - storage.rs: if MAX_LARGE_BLOB_ARRAY_SIZE fits the shards
// - storage/key.rs: if MAX_SUPPORTED_RESIDENT_KEYS fits CREDENTIALS
assert!(DEFAULT_MIN_PIN_LENGTH >= 4);
assert!(DEFAULT_MIN_PIN_LENGTH <= 63);
assert!(!USE_BATCH_ATTESTATION || ENTERPRISE_ATTESTATION_MODE.is_none());
if let Some(EnterpriseAttestationMode::VendorFacilitated) = ENTERPRISE_ATTESTATION_MODE {
assert!(!ENTERPRISE_RP_ID_LIST.is_empty());
} else {
assert!(ENTERPRISE_RP_ID_LIST.is_empty());
}
assert!(MAX_MSG_SIZE >= 1024);
assert!(MAX_MSG_SIZE <= 7609);
assert!(MAX_PIN_RETRIES <= 8);
assert!(MAX_CRED_BLOB_LENGTH >= 32);
if let Some(count) = MAX_CREDENTIAL_COUNT_IN_LIST {
assert!(count >= 1);
}
assert!(MAX_LARGE_BLOB_ARRAY_SIZE >= 1024);
if MAX_RP_IDS_LENGTH == 0 {
assert!(!DEFAULT_MIN_PIN_LENGTH_RP_IDS.is_empty());
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -177,7 +177,7 @@ impl CtapHid {
match message.cmd { match message.cmd {
// CTAP specification (version 20190130) section 8.1.9.1.1 // CTAP specification (version 20190130) section 8.1.9.1.1
CtapHid::COMMAND_MSG => { CtapHid::COMMAND_MSG => {
// If we don't have CTAP1 backward compatibilty, this command in invalid. // If we don't have CTAP1 backward compatibilty, this command is invalid.
#[cfg(not(feature = "with_ctap1"))] #[cfg(not(feature = "with_ctap1"))]
return CtapHid::error_message(cid, CtapHid::ERR_INVALID_CMD); return CtapHid::error_message(cid, CtapHid::ERR_INVALID_CMD);
@@ -219,7 +219,7 @@ impl CtapHid {
cid, cid,
cmd: CtapHid::COMMAND_CBOR, cmd: CtapHid::COMMAND_CBOR,
payload: vec![ payload: vec![
Ctap2StatusCode::CTAP2_ERR_VENDOR_RESPONSE_TOO_LONG as u8, Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR as u8,
], ],
}) })
.unwrap() .unwrap()
@@ -322,6 +322,9 @@ impl CtapHid {
receive::Error::UnexpectedSeq => { receive::Error::UnexpectedSeq => {
CtapHid::error_message(cid, CtapHid::ERR_INVALID_SEQ) CtapHid::error_message(cid, CtapHid::ERR_INVALID_SEQ)
} }
receive::Error::UnexpectedLen => {
CtapHid::error_message(cid, CtapHid::ERR_INVALID_LEN)
}
receive::Error::Timeout => { receive::Error::Timeout => {
CtapHid::error_message(cid, CtapHid::ERR_MSG_TIMEOUT) CtapHid::error_message(cid, CtapHid::ERR_MSG_TIMEOUT)
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use super::super::customization::MAX_MSG_SIZE;
use super::{ChannelID, CtapHid, HidPacket, Message, ProcessedPacket}; use super::{ChannelID, CtapHid, HidPacket, Message, ProcessedPacket};
use alloc::vec::Vec; use alloc::vec::Vec;
use core::mem::swap; use core::mem::swap;
@@ -45,6 +46,8 @@ pub enum Error {
UnexpectedContinuation, UnexpectedContinuation,
// Expected a continuation packet with a specific sequence number, got another sequence number. // Expected a continuation packet with a specific sequence number, got another sequence number.
UnexpectedSeq, UnexpectedSeq,
// The length of a message is too big.
UnexpectedLen,
// This packet arrived after a timeout. // This packet arrived after a timeout.
Timeout, Timeout,
} }
@@ -107,7 +110,7 @@ impl MessageAssembler {
// Expecting an initialization packet. // Expecting an initialization packet.
match processed_packet { match processed_packet {
ProcessedPacket::InitPacket { cmd, len, data } => { ProcessedPacket::InitPacket { cmd, len, data } => {
Ok(self.accept_init_packet(*cid, cmd, len, data, timestamp)) self.parse_init_packet(*cid, cmd, len, data, timestamp)
} }
ProcessedPacket::ContinuationPacket { .. } => { ProcessedPacket::ContinuationPacket { .. } => {
// CTAP specification (version 20190130) section 8.1.5.4 // CTAP specification (version 20190130) section 8.1.5.4
@@ -129,7 +132,7 @@ impl MessageAssembler {
ProcessedPacket::InitPacket { cmd, len, data } => { ProcessedPacket::InitPacket { cmd, len, data } => {
self.reset(); self.reset();
if cmd == CtapHid::COMMAND_INIT { if cmd == CtapHid::COMMAND_INIT {
Ok(self.accept_init_packet(*cid, cmd, len, data, timestamp)) self.parse_init_packet(*cid, cmd, len, data, timestamp)
} else { } else {
Err((*cid, Error::UnexpectedInit)) Err((*cid, Error::UnexpectedInit))
} }
@@ -151,24 +154,25 @@ impl MessageAssembler {
} }
} }
fn accept_init_packet( fn parse_init_packet(
&mut self, &mut self,
cid: ChannelID, cid: ChannelID,
cmd: u8, cmd: u8,
len: usize, len: usize,
data: &[u8], data: &[u8],
timestamp: Timestamp<isize>, timestamp: Timestamp<isize>,
) -> Option<Message> { ) -> Result<Option<Message>, (ChannelID, Error)> {
// TODO: Should invalid commands/payload lengths be rejected early, i.e. as soon as the // Reject invalid lengths early to reduce the risk of running out of memory.
// initialization packet is received, or should we build a message and then catch the // TODO: also reject invalid commands early?
// error? if len > MAX_MSG_SIZE {
// The specification (version 20190130) isn't clear on this point. return Err((cid, Error::UnexpectedLen));
}
self.cid = cid; self.cid = cid;
self.last_timestamp = timestamp; self.last_timestamp = timestamp;
self.cmd = cmd; self.cmd = cmd;
self.seq = 0; self.seq = 0;
self.remaining_payload_len = len; self.remaining_payload_len = len;
self.append_payload(data) Ok(self.append_payload(data))
} }
fn append_payload(&mut self, data: &[u8]) -> Option<Message> { fn append_payload(&mut self, data: &[u8]) -> Option<Message> {

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

411
src/ctap/large_blobs.rs Normal file
View File

@@ -0,0 +1,411 @@
// Copyright 2020-2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::client_pin::{ClientPin, PinPermission};
use super::command::AuthenticatorLargeBlobsParameters;
use super::customization::MAX_MSG_SIZE;
use super::response::{AuthenticatorLargeBlobsResponse, ResponseData};
use super::status_code::Ctap2StatusCode;
use super::storage::PersistentStore;
use alloc::vec;
use alloc::vec::Vec;
use byteorder::{ByteOrder, LittleEndian};
use crypto::sha256::Sha256;
use crypto::Hash256;
/// The length of the truncated hash that as appended to the large blob data.
const TRUNCATED_HASH_LEN: usize = 16;
pub struct LargeBlobs {
buffer: Vec<u8>,
expected_length: usize,
expected_next_offset: usize,
}
/// Implements the logic for the AuthenticatorLargeBlobs command and keeps its state.
impl LargeBlobs {
pub fn new() -> LargeBlobs {
LargeBlobs {
buffer: Vec::new(),
expected_length: 0,
expected_next_offset: 0,
}
}
/// Process the large blob command.
pub fn process_command(
&mut self,
persistent_store: &mut PersistentStore,
client_pin: &mut ClientPin,
large_blobs_params: AuthenticatorLargeBlobsParameters,
) -> Result<ResponseData, Ctap2StatusCode> {
let AuthenticatorLargeBlobsParameters {
get,
set,
offset,
length,
pin_uv_auth_param,
pin_uv_auth_protocol,
} = large_blobs_params;
const MAX_FRAGMENT_LENGTH: usize = MAX_MSG_SIZE - 64;
if let Some(get) = get {
if get > MAX_FRAGMENT_LENGTH {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_LENGTH);
}
let config = persistent_store.get_large_blob_array(offset, get)?;
return Ok(ResponseData::AuthenticatorLargeBlobs(Some(
AuthenticatorLargeBlobsResponse { config },
)));
}
if let Some(mut set) = set {
if set.len() > MAX_FRAGMENT_LENGTH {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_LENGTH);
}
if offset == 0 {
// Checks for offset and length are already done in command.
self.expected_length =
length.ok_or(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)?;
self.expected_next_offset = 0;
}
if offset != self.expected_next_offset {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_SEQ);
}
if persistent_store.pin_hash()?.is_some() || persistent_store.has_always_uv()? {
let pin_uv_auth_param =
pin_uv_auth_param.ok_or(Ctap2StatusCode::CTAP2_ERR_PUAT_REQUIRED)?;
let pin_uv_auth_protocol =
pin_uv_auth_protocol.ok_or(Ctap2StatusCode::CTAP2_ERR_MISSING_PARAMETER)?;
let mut large_blob_data = vec![0xFF; 32];
large_blob_data.extend(&[0x0C, 0x00]);
let mut offset_bytes = [0u8; 4];
LittleEndian::write_u32(&mut offset_bytes, offset as u32);
large_blob_data.extend(&offset_bytes);
large_blob_data.extend(&Sha256::hash(set.as_slice()));
client_pin.verify_pin_uv_auth_token(
&large_blob_data,
&pin_uv_auth_param,
pin_uv_auth_protocol,
)?;
client_pin.has_permission(PinPermission::LargeBlobWrite)?;
}
if offset + set.len() > self.expected_length {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
if offset == 0 {
self.buffer = Vec::with_capacity(self.expected_length);
}
self.buffer.append(&mut set);
self.expected_next_offset = self.buffer.len();
if self.expected_next_offset == self.expected_length {
self.expected_length = 0;
self.expected_next_offset = 0;
// Must be a positive number.
let buffer_hash_index = self.buffer.len() - TRUNCATED_HASH_LEN;
if Sha256::hash(&self.buffer[..buffer_hash_index])[..TRUNCATED_HASH_LEN]
!= self.buffer[buffer_hash_index..]
{
self.buffer = Vec::new();
return Err(Ctap2StatusCode::CTAP2_ERR_INTEGRITY_FAILURE);
}
persistent_store.commit_large_blob_array(&self.buffer)?;
self.buffer = Vec::new();
}
return Ok(ResponseData::AuthenticatorLargeBlobs(None));
}
// This should be unreachable, since the command has either get or set.
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER)
}
}
#[cfg(test)]
mod test {
use super::super::data_formats::PinUvAuthProtocol;
use super::super::pin_protocol::authenticate_pin_uv_auth_token;
use super::*;
use crypto::rng256::ThreadRng256;
#[test]
fn test_process_command_get_empty() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let mut large_blobs = LargeBlobs::new();
let large_blob = vec![
0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5,
0x7A, 0x6D, 0x3C,
];
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: Some(large_blob.len()),
set: None,
offset: 0,
length: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
match large_blobs_response.unwrap() {
ResponseData::AuthenticatorLargeBlobs(Some(response)) => {
assert_eq!(response.config, large_blob);
}
_ => panic!("Invalid response type"),
};
}
#[test]
fn test_process_command_commit_and_get() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let mut large_blobs = LargeBlobs::new();
const BLOB_LEN: usize = 200;
const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN;
let mut large_blob = vec![0x1B; DATA_LEN];
large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob[..BLOB_LEN / 2].to_vec()),
offset: 0,
length: Some(BLOB_LEN),
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Ok(ResponseData::AuthenticatorLargeBlobs(None))
);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob[BLOB_LEN / 2..].to_vec()),
offset: BLOB_LEN / 2,
length: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Ok(ResponseData::AuthenticatorLargeBlobs(None))
);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: Some(BLOB_LEN),
set: None,
offset: 0,
length: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
match large_blobs_response.unwrap() {
ResponseData::AuthenticatorLargeBlobs(Some(response)) => {
assert_eq!(response.config, large_blob);
}
_ => panic!("Invalid response type"),
};
}
#[test]
fn test_process_command_commit_unexpected_offset() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let mut large_blobs = LargeBlobs::new();
const BLOB_LEN: usize = 200;
const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN;
let mut large_blob = vec![0x1B; DATA_LEN];
large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob[..BLOB_LEN / 2].to_vec()),
offset: 0,
length: Some(BLOB_LEN),
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Ok(ResponseData::AuthenticatorLargeBlobs(None))
);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob[BLOB_LEN / 2..].to_vec()),
// The offset is 1 too big.
offset: BLOB_LEN / 2 + 1,
length: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_SEQ),
);
}
#[test]
fn test_process_command_commit_unexpected_length() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let mut large_blobs = LargeBlobs::new();
const BLOB_LEN: usize = 200;
const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN;
let mut large_blob = vec![0x1B; DATA_LEN];
large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob[..BLOB_LEN / 2].to_vec()),
offset: 0,
// The length is 1 too small.
length: Some(BLOB_LEN - 1),
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Ok(ResponseData::AuthenticatorLargeBlobs(None))
);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob[BLOB_LEN / 2..].to_vec()),
offset: BLOB_LEN / 2,
length: None,
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER),
);
}
#[test]
fn test_process_command_commit_unexpected_hash() {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, PinUvAuthProtocol::V1);
let mut large_blobs = LargeBlobs::new();
const BLOB_LEN: usize = 20;
// This blob does not have an appropriate hash.
let large_blob = vec![0x1B; BLOB_LEN];
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob.to_vec()),
offset: 0,
length: Some(BLOB_LEN),
pin_uv_auth_param: None,
pin_uv_auth_protocol: None,
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Err(Ctap2StatusCode::CTAP2_ERR_INTEGRITY_FAILURE),
);
}
fn test_helper_process_command_commit_with_pin(pin_uv_auth_protocol: PinUvAuthProtocol) {
let mut rng = ThreadRng256 {};
let mut persistent_store = PersistentStore::new(&mut rng);
let key_agreement_key = crypto::ecdh::SecKey::gensk(&mut rng);
let pin_uv_auth_token = [0x55; 32];
let mut client_pin =
ClientPin::new_test(key_agreement_key, pin_uv_auth_token, pin_uv_auth_protocol);
let mut large_blobs = LargeBlobs::new();
const BLOB_LEN: usize = 20;
const DATA_LEN: usize = BLOB_LEN - TRUNCATED_HASH_LEN;
let mut large_blob = vec![0x1B; DATA_LEN];
large_blob.extend_from_slice(&Sha256::hash(&large_blob[..])[..TRUNCATED_HASH_LEN]);
persistent_store.set_pin(&[0u8; 16], 4).unwrap();
let mut large_blob_data = vec![0xFF; 32];
// Command constant and offset bytes.
large_blob_data.extend(&[0x0C, 0x00, 0x00, 0x00, 0x00, 0x00]);
large_blob_data.extend(&Sha256::hash(&large_blob));
let pin_uv_auth_param = authenticate_pin_uv_auth_token(
&pin_uv_auth_token,
&large_blob_data,
pin_uv_auth_protocol,
);
let large_blobs_params = AuthenticatorLargeBlobsParameters {
get: None,
set: Some(large_blob),
offset: 0,
length: Some(BLOB_LEN),
pin_uv_auth_param: Some(pin_uv_auth_param),
pin_uv_auth_protocol: Some(pin_uv_auth_protocol),
};
let large_blobs_response =
large_blobs.process_command(&mut persistent_store, &mut client_pin, large_blobs_params);
assert_eq!(
large_blobs_response,
Ok(ResponseData::AuthenticatorLargeBlobs(None))
);
}
#[test]
fn test_process_command_commit_with_pin_v1() {
test_helper_process_command_commit_with_pin(PinUvAuthProtocol::V1);
}
#[test]
fn test_process_command_commit_with_pin_v2() {
test_helper_process_command_commit_with_pin(PinUvAuthProtocol::V2);
}
}

File diff suppressed because it is too large Load Diff

408
src/ctap/pin_protocol.rs Normal file
View File

@@ -0,0 +1,408 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::ctap::client_pin::PIN_TOKEN_LENGTH;
use crate::ctap::crypto_wrapper::{aes256_cbc_decrypt, aes256_cbc_encrypt};
use crate::ctap::data_formats::{CoseKey, PinUvAuthProtocol};
use crate::ctap::status_code::Ctap2StatusCode;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::convert::TryInto;
use crypto::hkdf::hkdf_empty_salt_256;
#[cfg(test)]
use crypto::hmac::hmac_256;
use crypto::hmac::{verify_hmac_256, verify_hmac_256_first_128bits};
use crypto::rng256::Rng256;
use crypto::sha256::Sha256;
use crypto::Hash256;
/// Implements common functions between existing PIN protocols for handshakes.
pub struct PinProtocol {
key_agreement_key: crypto::ecdh::SecKey,
pin_uv_auth_token: [u8; PIN_TOKEN_LENGTH],
}
impl PinProtocol {
/// This process is run by the authenticator at power-on.
///
/// This function implements "initialize" from the specification.
pub fn new(rng: &mut impl Rng256) -> PinProtocol {
let key_agreement_key = crypto::ecdh::SecKey::gensk(rng);
let pin_uv_auth_token = rng.gen_uniform_u8x32();
PinProtocol {
key_agreement_key,
pin_uv_auth_token,
}
}
/// Generates a fresh public key.
pub fn regenerate(&mut self, rng: &mut impl Rng256) {
self.key_agreement_key = crypto::ecdh::SecKey::gensk(rng);
}
/// Generates a fresh pinUvAuthToken.
pub fn reset_pin_uv_auth_token(&mut self, rng: &mut impl Rng256) {
self.pin_uv_auth_token = rng.gen_uniform_u8x32();
}
/// Returns the authenticators public key as a CoseKey structure.
pub fn get_public_key(&self) -> CoseKey {
CoseKey::from(self.key_agreement_key.genpk())
}
/// Processes the peer's encapsulated CoseKey and returns the shared secret.
pub fn decapsulate(
&self,
peer_cose_key: CoseKey,
pin_uv_auth_protocol: PinUvAuthProtocol,
) -> Result<Box<dyn SharedSecret>, Ctap2StatusCode> {
let pk: crypto::ecdh::PubKey = CoseKey::try_into(peer_cose_key)?;
let handshake = self.key_agreement_key.exchange_x(&pk);
match pin_uv_auth_protocol {
PinUvAuthProtocol::V1 => Ok(Box::new(SharedSecretV1::new(handshake))),
PinUvAuthProtocol::V2 => Ok(Box::new(SharedSecretV2::new(handshake))),
}
}
/// Getter for pinUvAuthToken.
pub fn get_pin_uv_auth_token(&self) -> &[u8; PIN_TOKEN_LENGTH] {
&self.pin_uv_auth_token
}
/// This is used for debugging to inject key material.
#[cfg(test)]
pub fn new_test(
key_agreement_key: crypto::ecdh::SecKey,
pin_uv_auth_token: [u8; PIN_TOKEN_LENGTH],
) -> PinProtocol {
PinProtocol {
key_agreement_key,
pin_uv_auth_token,
}
}
}
/// Authenticates the pinUvAuthToken for the given PIN protocol.
#[cfg(test)]
pub fn authenticate_pin_uv_auth_token(
token: &[u8; PIN_TOKEN_LENGTH],
message: &[u8],
pin_uv_auth_protocol: PinUvAuthProtocol,
) -> Vec<u8> {
match pin_uv_auth_protocol {
PinUvAuthProtocol::V1 => hmac_256::<Sha256>(token, message)[..16].to_vec(),
PinUvAuthProtocol::V2 => hmac_256::<Sha256>(token, message).to_vec(),
}
}
/// Verifies the pinUvAuthToken for the given PIN protocol.
pub fn verify_pin_uv_auth_token(
token: &[u8; PIN_TOKEN_LENGTH],
message: &[u8],
signature: &[u8],
pin_uv_auth_protocol: PinUvAuthProtocol,
) -> Result<(), Ctap2StatusCode> {
match pin_uv_auth_protocol {
PinUvAuthProtocol::V1 => verify_v1(token, message, signature),
PinUvAuthProtocol::V2 => verify_v2(token, message, signature),
}
}
pub trait SharedSecret {
/// Returns the encrypted plaintext.
fn encrypt(&self, rng: &mut dyn Rng256, plaintext: &[u8]) -> Result<Vec<u8>, Ctap2StatusCode>;
/// Returns the decrypted ciphertext.
fn decrypt(&self, ciphertext: &[u8]) -> Result<Vec<u8>, Ctap2StatusCode>;
/// Verifies that the signature is a valid MAC for the given message.
fn verify(&self, message: &[u8], signature: &[u8]) -> Result<(), Ctap2StatusCode>;
/// Creates a signature that matches verify.
#[cfg(test)]
fn authenticate(&self, message: &[u8]) -> Vec<u8>;
}
fn verify_v1(key: &[u8], message: &[u8], signature: &[u8]) -> Result<(), Ctap2StatusCode> {
if signature.len() != 16 {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
if verify_hmac_256_first_128bits::<Sha256>(key, message, array_ref![signature, 0, 16]) {
Ok(())
} else {
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
}
}
fn verify_v2(key: &[u8], message: &[u8], signature: &[u8]) -> Result<(), Ctap2StatusCode> {
if signature.len() != 32 {
return Err(Ctap2StatusCode::CTAP1_ERR_INVALID_PARAMETER);
}
if verify_hmac_256::<Sha256>(key, message, array_ref![signature, 0, 32]) {
Ok(())
} else {
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
}
}
pub struct SharedSecretV1 {
common_secret: [u8; 32],
aes_enc_key: crypto::aes256::EncryptionKey,
}
impl SharedSecretV1 {
/// Creates a new shared secret from the handshake result.
fn new(handshake: [u8; 32]) -> SharedSecretV1 {
let common_secret = Sha256::hash(&handshake);
let aes_enc_key = crypto::aes256::EncryptionKey::new(&common_secret);
SharedSecretV1 {
common_secret,
aes_enc_key,
}
}
}
impl SharedSecret for SharedSecretV1 {
fn encrypt(&self, rng: &mut dyn Rng256, plaintext: &[u8]) -> Result<Vec<u8>, Ctap2StatusCode> {
aes256_cbc_encrypt(rng, &self.aes_enc_key, plaintext, false)
}
fn decrypt(&self, ciphertext: &[u8]) -> Result<Vec<u8>, Ctap2StatusCode> {
aes256_cbc_decrypt(&self.aes_enc_key, ciphertext, false)
}
fn verify(&self, message: &[u8], signature: &[u8]) -> Result<(), Ctap2StatusCode> {
verify_v1(&self.common_secret, message, signature)
}
#[cfg(test)]
fn authenticate(&self, message: &[u8]) -> Vec<u8> {
hmac_256::<Sha256>(&self.common_secret, message)[..16].to_vec()
}
}
pub struct SharedSecretV2 {
aes_enc_key: crypto::aes256::EncryptionKey,
hmac_key: [u8; 32],
}
impl SharedSecretV2 {
/// Creates a new shared secret from the handshake result.
fn new(handshake: [u8; 32]) -> SharedSecretV2 {
let aes_key = hkdf_empty_salt_256::<Sha256>(&handshake, b"CTAP2 AES key");
SharedSecretV2 {
aes_enc_key: crypto::aes256::EncryptionKey::new(&aes_key),
hmac_key: hkdf_empty_salt_256::<Sha256>(&handshake, b"CTAP2 HMAC key"),
}
}
}
impl SharedSecret for SharedSecretV2 {
fn encrypt(&self, rng: &mut dyn Rng256, plaintext: &[u8]) -> Result<Vec<u8>, Ctap2StatusCode> {
aes256_cbc_encrypt(rng, &self.aes_enc_key, plaintext, true)
}
fn decrypt(&self, ciphertext: &[u8]) -> Result<Vec<u8>, Ctap2StatusCode> {
aes256_cbc_decrypt(&self.aes_enc_key, ciphertext, true)
}
fn verify(&self, message: &[u8], signature: &[u8]) -> Result<(), Ctap2StatusCode> {
verify_v2(&self.hmac_key, message, signature)
}
#[cfg(test)]
fn authenticate(&self, message: &[u8]) -> Vec<u8> {
hmac_256::<Sha256>(&self.hmac_key, message).to_vec()
}
}
#[cfg(test)]
mod test {
use super::*;
use crypto::rng256::ThreadRng256;
#[test]
fn test_pin_protocol_public_key() {
let mut rng = ThreadRng256 {};
let mut pin_protocol = PinProtocol::new(&mut rng);
let public_key = pin_protocol.get_public_key();
pin_protocol.regenerate(&mut rng);
let new_public_key = pin_protocol.get_public_key();
assert_ne!(public_key, new_public_key);
}
#[test]
fn test_pin_protocol_pin_uv_auth_token() {
let mut rng = ThreadRng256 {};
let mut pin_protocol = PinProtocol::new(&mut rng);
let token = *pin_protocol.get_pin_uv_auth_token();
pin_protocol.reset_pin_uv_auth_token(&mut rng);
let new_token = pin_protocol.get_pin_uv_auth_token();
assert_ne!(&token, new_token);
}
#[test]
fn test_shared_secret_v1_encrypt_decrypt() {
let mut rng = ThreadRng256 {};
let shared_secret = SharedSecretV1::new([0x55; 32]);
let plaintext = vec![0xAA; 64];
let ciphertext = shared_secret.encrypt(&mut rng, &plaintext).unwrap();
assert_eq!(shared_secret.decrypt(&ciphertext), Ok(plaintext));
}
#[test]
fn test_shared_secret_v1_authenticate_verify() {
let shared_secret = SharedSecretV1::new([0x55; 32]);
let message = [0xAA; 32];
let signature = shared_secret.authenticate(&message);
assert_eq!(shared_secret.verify(&message, &signature), Ok(()));
}
#[test]
fn test_shared_secret_v1_verify() {
let shared_secret = SharedSecretV1::new([0x55; 32]);
let message = [0xAA];
let signature = [
0x8B, 0x60, 0x15, 0x7D, 0xF3, 0x44, 0x82, 0x2E, 0x54, 0x34, 0x7A, 0x01, 0xFB, 0x02,
0x48, 0xA6,
];
assert_eq!(shared_secret.verify(&message, &signature), Ok(()));
assert_eq!(
shared_secret.verify(&[0xBB], &signature),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
shared_secret.verify(&message, &[0x12; 16]),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
#[test]
fn test_shared_secret_v2_encrypt_decrypt() {
let mut rng = ThreadRng256 {};
let shared_secret = SharedSecretV2::new([0x55; 32]);
let plaintext = vec![0xAA; 64];
let ciphertext = shared_secret.encrypt(&mut rng, &plaintext).unwrap();
assert_eq!(shared_secret.decrypt(&ciphertext), Ok(plaintext));
}
#[test]
fn test_shared_secret_v2_authenticate_verify() {
let shared_secret = SharedSecretV2::new([0x55; 32]);
let message = [0xAA; 32];
let signature = shared_secret.authenticate(&message);
assert_eq!(shared_secret.verify(&message, &signature), Ok(()));
}
#[test]
fn test_shared_secret_v2_verify() {
let shared_secret = SharedSecretV2::new([0x55; 32]);
let message = [0xAA];
let signature = [
0xC0, 0x3F, 0x2A, 0x22, 0x5C, 0xC3, 0x4E, 0x05, 0xC1, 0x0E, 0x72, 0x9C, 0x8D, 0xD5,
0x7D, 0xE5, 0x98, 0x9C, 0x68, 0x15, 0xEC, 0xE2, 0x3A, 0x95, 0xD5, 0x90, 0xE1, 0xE9,
0x3F, 0xF0, 0x1A, 0xAF,
];
assert_eq!(shared_secret.verify(&message, &signature), Ok(()));
assert_eq!(
shared_secret.verify(&[0xBB], &signature),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
shared_secret.verify(&message, &[0x12; 32]),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
#[test]
fn test_decapsulate_symmetric() {
let mut rng = ThreadRng256 {};
let pin_protocol1 = PinProtocol::new(&mut rng);
let pin_protocol2 = PinProtocol::new(&mut rng);
for &protocol in &[PinUvAuthProtocol::V1, PinUvAuthProtocol::V2] {
let shared_secret1 = pin_protocol1
.decapsulate(pin_protocol2.get_public_key(), protocol)
.unwrap();
let shared_secret2 = pin_protocol2
.decapsulate(pin_protocol1.get_public_key(), protocol)
.unwrap();
let plaintext = vec![0xAA; 64];
let ciphertext = shared_secret1.encrypt(&mut rng, &plaintext).unwrap();
assert_eq!(plaintext, shared_secret2.decrypt(&ciphertext).unwrap());
}
}
#[test]
fn test_verify_pin_uv_auth_token_v1() {
let token = [0x91; PIN_TOKEN_LENGTH];
let message = [0xAA];
let signature = [
0x9C, 0x1C, 0xFE, 0x9D, 0xD7, 0x64, 0x6A, 0x06, 0xB9, 0xA8, 0x0F, 0x96, 0xAD, 0x50,
0x49, 0x68,
];
assert_eq!(
verify_pin_uv_auth_token(&token, &message, &signature, PinUvAuthProtocol::V1),
Ok(())
);
assert_eq!(
verify_pin_uv_auth_token(
&[0x12; PIN_TOKEN_LENGTH],
&message,
&signature,
PinUvAuthProtocol::V1
),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
verify_pin_uv_auth_token(&token, &[0xBB], &signature, PinUvAuthProtocol::V1),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
verify_pin_uv_auth_token(&token, &message, &[0x12; 16], PinUvAuthProtocol::V1),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
#[test]
fn test_verify_pin_uv_auth_token_v2() {
let token = [0x91; PIN_TOKEN_LENGTH];
let message = [0xAA];
let signature = [
0x9C, 0x1C, 0xFE, 0x9D, 0xD7, 0x64, 0x6A, 0x06, 0xB9, 0xA8, 0x0F, 0x96, 0xAD, 0x50,
0x49, 0x68, 0x94, 0x90, 0x20, 0x53, 0x0F, 0xA3, 0xD2, 0x7A, 0x9F, 0xFD, 0xFA, 0x62,
0x36, 0x93, 0xF7, 0x84,
];
assert_eq!(
verify_pin_uv_auth_token(&token, &message, &signature, PinUvAuthProtocol::V2),
Ok(())
);
assert_eq!(
verify_pin_uv_auth_token(
&[0x12; PIN_TOKEN_LENGTH],
&message,
&signature,
PinUvAuthProtocol::V2
),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
verify_pin_uv_auth_token(&token, &[0xBB], &signature, PinUvAuthProtocol::V2),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
verify_pin_uv_auth_token(&token, &message, &[0x12; 32], PinUvAuthProtocol::V2),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,19 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#[cfg(feature = "with_ctap2_1")]
use super::data_formats::{AuthenticatorTransport, PublicKeyCredentialParameter};
use super::data_formats::{ use super::data_formats::{
CoseKey, CredentialProtectionPolicy, PackedAttestationStatement, PublicKeyCredentialDescriptor, AuthenticatorTransport, CoseKey, CredentialProtectionPolicy, PackedAttestationStatement,
PublicKeyCredentialDescriptor, PublicKeyCredentialParameter, PublicKeyCredentialRpEntity,
PublicKeyCredentialUserEntity, PublicKeyCredentialUserEntity,
}; };
use alloc::collections::BTreeMap;
use alloc::string::String; use alloc::string::String;
use alloc::vec::Vec; use alloc::vec::Vec;
use cbor::{cbor_array_vec, cbor_bool, cbor_map_btree, cbor_map_options, cbor_text}; use cbor::{cbor_array_vec, cbor_bool, cbor_int, cbor_map_collection, cbor_map_options, cbor_text};
#[cfg_attr(test, derive(PartialEq))] #[derive(Debug, PartialEq)]
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))]
pub enum ResponseData { pub enum ResponseData {
AuthenticatorMakeCredential(AuthenticatorMakeCredentialResponse), AuthenticatorMakeCredential(AuthenticatorMakeCredentialResponse),
AuthenticatorGetAssertion(AuthenticatorGetAssertionResponse), AuthenticatorGetAssertion(AuthenticatorGetAssertionResponse),
@@ -32,8 +29,11 @@ pub enum ResponseData {
AuthenticatorGetInfo(AuthenticatorGetInfoResponse), AuthenticatorGetInfo(AuthenticatorGetInfoResponse),
AuthenticatorClientPin(Option<AuthenticatorClientPinResponse>), AuthenticatorClientPin(Option<AuthenticatorClientPinResponse>),
AuthenticatorReset, AuthenticatorReset,
#[cfg(feature = "with_ctap2_1")] AuthenticatorCredentialManagement(Option<AuthenticatorCredentialManagementResponse>),
AuthenticatorSelection, AuthenticatorSelection,
AuthenticatorLargeBlobs(Option<AuthenticatorLargeBlobsResponse>),
// TODO(kaczmarczyck) dummy, extend
AuthenticatorConfig,
AuthenticatorVendor(AuthenticatorVendorResponse), AuthenticatorVendor(AuthenticatorVendorResponse),
} }
@@ -44,22 +44,24 @@ impl From<ResponseData> for Option<cbor::Value> {
ResponseData::AuthenticatorGetAssertion(data) => Some(data.into()), ResponseData::AuthenticatorGetAssertion(data) => Some(data.into()),
ResponseData::AuthenticatorGetNextAssertion(data) => Some(data.into()), ResponseData::AuthenticatorGetNextAssertion(data) => Some(data.into()),
ResponseData::AuthenticatorGetInfo(data) => Some(data.into()), ResponseData::AuthenticatorGetInfo(data) => Some(data.into()),
ResponseData::AuthenticatorClientPin(Some(data)) => Some(data.into()), ResponseData::AuthenticatorClientPin(data) => data.map(|d| d.into()),
ResponseData::AuthenticatorClientPin(None) => None,
ResponseData::AuthenticatorReset => None, ResponseData::AuthenticatorReset => None,
#[cfg(feature = "with_ctap2_1")] ResponseData::AuthenticatorCredentialManagement(data) => data.map(|d| d.into()),
ResponseData::AuthenticatorSelection => None, ResponseData::AuthenticatorSelection => None,
ResponseData::AuthenticatorLargeBlobs(data) => data.map(|d| d.into()),
ResponseData::AuthenticatorConfig => None,
ResponseData::AuthenticatorVendor(data) => Some(data.into()), ResponseData::AuthenticatorVendor(data) => Some(data.into()),
} }
} }
} }
#[cfg_attr(test, derive(PartialEq))] #[derive(Debug, PartialEq)]
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))]
pub struct AuthenticatorMakeCredentialResponse { pub struct AuthenticatorMakeCredentialResponse {
pub fmt: String, pub fmt: String,
pub auth_data: Vec<u8>, pub auth_data: Vec<u8>,
pub att_stmt: PackedAttestationStatement, pub att_stmt: PackedAttestationStatement,
pub ep_att: Option<bool>,
pub large_blob_key: Option<Vec<u8>>,
} }
impl From<AuthenticatorMakeCredentialResponse> for cbor::Value { impl From<AuthenticatorMakeCredentialResponse> for cbor::Value {
@@ -68,24 +70,29 @@ impl From<AuthenticatorMakeCredentialResponse> for cbor::Value {
fmt, fmt,
auth_data, auth_data,
att_stmt, att_stmt,
ep_att,
large_blob_key,
} = make_credential_response; } = make_credential_response;
cbor_map_options! { cbor_map_options! {
1 => fmt, 0x01 => fmt,
2 => auth_data, 0x02 => auth_data,
3 => att_stmt, 0x03 => att_stmt,
0x04 => ep_att,
0x05 => large_blob_key,
} }
} }
} }
#[cfg_attr(test, derive(PartialEq))] #[derive(Debug, PartialEq)]
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))]
pub struct AuthenticatorGetAssertionResponse { pub struct AuthenticatorGetAssertionResponse {
pub credential: Option<PublicKeyCredentialDescriptor>, pub credential: Option<PublicKeyCredentialDescriptor>,
pub auth_data: Vec<u8>, pub auth_data: Vec<u8>,
pub signature: Vec<u8>, pub signature: Vec<u8>,
pub user: Option<PublicKeyCredentialUserEntity>, pub user: Option<PublicKeyCredentialUserEntity>,
pub number_of_credentials: Option<u64>, pub number_of_credentials: Option<u64>,
// 0x06: userSelected missing as we don't support displays.
pub large_blob_key: Option<Vec<u8>>,
} }
impl From<AuthenticatorGetAssertionResponse> for cbor::Value { impl From<AuthenticatorGetAssertionResponse> for cbor::Value {
@@ -96,45 +103,49 @@ impl From<AuthenticatorGetAssertionResponse> for cbor::Value {
signature, signature,
user, user,
number_of_credentials, number_of_credentials,
large_blob_key,
} = get_assertion_response; } = get_assertion_response;
cbor_map_options! { cbor_map_options! {
1 => credential, 0x01 => credential,
2 => auth_data, 0x02 => auth_data,
3 => signature, 0x03 => signature,
4 => user, 0x04 => user,
5 => number_of_credentials, 0x05 => number_of_credentials,
0x07 => large_blob_key,
} }
} }
} }
#[cfg_attr(test, derive(PartialEq))] #[derive(Debug, PartialEq)]
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))]
pub struct AuthenticatorGetInfoResponse { pub struct AuthenticatorGetInfoResponse {
// TODO(kaczmarczyck) add maxAuthenticatorConfigLength and defaultCredProtect
pub versions: Vec<String>, pub versions: Vec<String>,
pub extensions: Option<Vec<String>>, pub extensions: Option<Vec<String>>,
pub aaguid: [u8; 16], pub aaguid: [u8; 16],
pub options: Option<BTreeMap<String, bool>>, pub options: Option<Vec<(String, bool)>>,
pub max_msg_size: Option<u64>, pub max_msg_size: Option<u64>,
pub pin_protocols: Option<Vec<u64>>, pub pin_protocols: Option<Vec<u64>>,
#[cfg(feature = "with_ctap2_1")]
pub max_credential_count_in_list: Option<u64>, pub max_credential_count_in_list: Option<u64>,
#[cfg(feature = "with_ctap2_1")]
pub max_credential_id_length: Option<u64>, pub max_credential_id_length: Option<u64>,
#[cfg(feature = "with_ctap2_1")]
pub transports: Option<Vec<AuthenticatorTransport>>, pub transports: Option<Vec<AuthenticatorTransport>>,
#[cfg(feature = "with_ctap2_1")]
pub algorithms: Option<Vec<PublicKeyCredentialParameter>>, pub algorithms: Option<Vec<PublicKeyCredentialParameter>>,
pub default_cred_protect: Option<CredentialProtectionPolicy>, pub max_serialized_large_blob_array: Option<u64>,
#[cfg(feature = "with_ctap2_1")] pub force_pin_change: Option<bool>,
pub min_pin_length: u8, pub min_pin_length: u8,
#[cfg(feature = "with_ctap2_1")]
pub firmware_version: Option<u64>, pub firmware_version: Option<u64>,
pub max_cred_blob_length: Option<u64>,
pub max_rp_ids_for_set_min_pin_length: Option<u64>,
// Missing response fields as they are only relevant for internal UV:
// - 0x11: preferredPlatformUvAttempts
// - 0x12: uvModality
// Add them when your hardware supports any kind of user verification within
// the boundary of the device, e.g. fingerprint or built-in keyboard.
pub certifications: Option<Vec<(String, i64)>>,
pub remaining_discoverable_credentials: Option<u64>,
// - 0x15: vendorPrototypeConfigCommands missing as we don't support it.
} }
impl From<AuthenticatorGetInfoResponse> for cbor::Value { impl From<AuthenticatorGetInfoResponse> for cbor::Value {
#[cfg(feature = "with_ctap2_1")]
fn from(get_info_response: AuthenticatorGetInfoResponse) -> Self { fn from(get_info_response: AuthenticatorGetInfoResponse) -> Self {
let AuthenticatorGetInfoResponse { let AuthenticatorGetInfoResponse {
versions, versions,
@@ -147,17 +158,30 @@ impl From<AuthenticatorGetInfoResponse> for cbor::Value {
max_credential_id_length, max_credential_id_length,
transports, transports,
algorithms, algorithms,
default_cred_protect, max_serialized_large_blob_array,
force_pin_change,
min_pin_length, min_pin_length,
firmware_version, firmware_version,
max_cred_blob_length,
max_rp_ids_for_set_min_pin_length,
certifications,
remaining_discoverable_credentials,
} = get_info_response; } = get_info_response;
let options_cbor: Option<cbor::Value> = options.map(|options| { let options_cbor: Option<cbor::Value> = options.map(|options| {
let option_map: BTreeMap<_, _> = options let options_map: Vec<(_, _)> = options
.into_iter() .into_iter()
.map(|(key, value)| (cbor_text!(key), cbor_bool!(value))) .map(|(key, value)| (cbor_text!(key), cbor_bool!(value)))
.collect(); .collect();
cbor_map_btree!(option_map) cbor_map_collection!(options_map)
});
let certifications_cbor: Option<cbor::Value> = certifications.map(|certifications| {
let certifications_map: Vec<(_, _)> = certifications
.into_iter()
.map(|(key, value)| (cbor_text!(key), cbor_int!(value)))
.collect();
cbor_map_collection!(certifications_map)
}); });
cbor_map_options! { cbor_map_options! {
@@ -171,70 +195,108 @@ impl From<AuthenticatorGetInfoResponse> for cbor::Value {
0x08 => max_credential_id_length, 0x08 => max_credential_id_length,
0x09 => transports.map(|vec| cbor_array_vec!(vec)), 0x09 => transports.map(|vec| cbor_array_vec!(vec)),
0x0A => algorithms.map(|vec| cbor_array_vec!(vec)), 0x0A => algorithms.map(|vec| cbor_array_vec!(vec)),
0x0C => default_cred_protect.map(|p| p as u64), 0x0B => max_serialized_large_blob_array,
0x0C => force_pin_change,
0x0D => min_pin_length as u64, 0x0D => min_pin_length as u64,
0x0E => firmware_version, 0x0E => firmware_version,
} 0x0F => max_cred_blob_length,
} 0x10 => max_rp_ids_for_set_min_pin_length,
0x13 => certifications_cbor,
#[cfg(not(feature = "with_ctap2_1"))] 0x14 => remaining_discoverable_credentials,
fn from(get_info_response: AuthenticatorGetInfoResponse) -> Self {
let AuthenticatorGetInfoResponse {
versions,
extensions,
aaguid,
options,
max_msg_size,
pin_protocols,
default_cred_protect,
} = get_info_response;
let options_cbor: Option<cbor::Value> = options.map(|options| {
let option_map: BTreeMap<_, _> = options
.into_iter()
.map(|(key, value)| (cbor_text!(key), cbor_bool!(value)))
.collect();
cbor_map_btree!(option_map)
});
cbor_map_options! {
0x01 => cbor_array_vec!(versions),
0x02 => extensions.map(|vec| cbor_array_vec!(vec)),
0x03 => &aaguid,
0x04 => options_cbor,
0x05 => max_msg_size,
0x06 => pin_protocols.map(|vec| cbor_array_vec!(vec)),
0x0C => default_cred_protect.map(|p| p as u64),
} }
} }
} }
#[cfg_attr(test, derive(PartialEq))] #[derive(Debug, PartialEq)]
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))]
pub struct AuthenticatorClientPinResponse { pub struct AuthenticatorClientPinResponse {
pub key_agreement: Option<CoseKey>, pub key_agreement: Option<CoseKey>,
pub pin_token: Option<Vec<u8>>, pub pin_uv_auth_token: Option<Vec<u8>>,
pub retries: Option<u64>, pub retries: Option<u64>,
pub power_cycle_state: Option<bool>,
// - 0x05: uvRetries missing as we don't support internal UV.
} }
impl From<AuthenticatorClientPinResponse> for cbor::Value { impl From<AuthenticatorClientPinResponse> for cbor::Value {
fn from(client_pin_response: AuthenticatorClientPinResponse) -> Self { fn from(client_pin_response: AuthenticatorClientPinResponse) -> Self {
let AuthenticatorClientPinResponse { let AuthenticatorClientPinResponse {
key_agreement, key_agreement,
pin_token, pin_uv_auth_token,
retries, retries,
power_cycle_state,
} = client_pin_response; } = client_pin_response;
cbor_map_options! { cbor_map_options! {
1 => key_agreement.map(|cose_key| cbor_map_btree!(cose_key.0)), 0x01 => key_agreement.map(cbor::Value::from),
2 => pin_token, 0x02 => pin_uv_auth_token,
3 => retries, 0x03 => retries,
0x04 => power_cycle_state,
} }
} }
} }
#[cfg_attr(test, derive(PartialEq))] #[derive(Debug, PartialEq)]
#[cfg_attr(any(test, feature = "debug_ctap"), derive(Debug))] pub struct AuthenticatorLargeBlobsResponse {
pub config: Vec<u8>,
}
impl From<AuthenticatorLargeBlobsResponse> for cbor::Value {
fn from(platform_large_blobs_response: AuthenticatorLargeBlobsResponse) -> Self {
let AuthenticatorLargeBlobsResponse { config } = platform_large_blobs_response;
cbor_map_options! {
0x01 => config,
}
}
}
#[derive(Debug, Default, PartialEq)]
pub struct AuthenticatorCredentialManagementResponse {
pub existing_resident_credentials_count: Option<u64>,
pub max_possible_remaining_resident_credentials_count: Option<u64>,
pub rp: Option<PublicKeyCredentialRpEntity>,
pub rp_id_hash: Option<Vec<u8>>,
pub total_rps: Option<u64>,
pub user: Option<PublicKeyCredentialUserEntity>,
pub credential_id: Option<PublicKeyCredentialDescriptor>,
pub public_key: Option<CoseKey>,
pub total_credentials: Option<u64>,
pub cred_protect: Option<CredentialProtectionPolicy>,
pub large_blob_key: Option<Vec<u8>>,
}
impl From<AuthenticatorCredentialManagementResponse> for cbor::Value {
fn from(cred_management_response: AuthenticatorCredentialManagementResponse) -> Self {
let AuthenticatorCredentialManagementResponse {
existing_resident_credentials_count,
max_possible_remaining_resident_credentials_count,
rp,
rp_id_hash,
total_rps,
user,
credential_id,
public_key,
total_credentials,
cred_protect,
large_blob_key,
} = cred_management_response;
cbor_map_options! {
0x01 => existing_resident_credentials_count,
0x02 => max_possible_remaining_resident_credentials_count,
0x03 => rp,
0x04 => rp_id_hash,
0x05 => total_rps,
0x06 => user,
0x07 => credential_id,
0x08 => public_key.map(cbor::Value::from),
0x09 => total_credentials,
0x0A => cred_protect,
0x0B => large_blob_key,
}
}
}
#[derive(Debug, PartialEq)]
pub struct AuthenticatorVendorResponse { pub struct AuthenticatorVendorResponse {
pub cert_programmed: bool, pub cert_programmed: bool,
pub pkey_programmed: bool, pub pkey_programmed: bool,
@@ -248,19 +310,19 @@ impl From<AuthenticatorVendorResponse> for cbor::Value {
} = vendor_response; } = vendor_response;
cbor_map_options! { cbor_map_options! {
1 => cert_programmed, 0x01 => cert_programmed,
2 => pkey_programmed, 0x02 => pkey_programmed,
} }
} }
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::super::data_formats::PackedAttestationStatement; use super::super::data_formats::{PackedAttestationStatement, PublicKeyCredentialType};
#[cfg(feature = "with_ctap2_1")]
use super::super::ES256_CRED_PARAM; use super::super::ES256_CRED_PARAM;
use super::*; use super::*;
use cbor::{cbor_bytes, cbor_map}; use cbor::{cbor_array, cbor_bytes, cbor_map};
use crypto::rng256::ThreadRng256;
#[test] #[test]
fn test_make_credential_into_cbor() { fn test_make_credential_into_cbor() {
@@ -274,7 +336,7 @@ mod test {
let cbor_packed_attestation_statement = cbor_map! { let cbor_packed_attestation_statement = cbor_map! {
"alg" => 1, "alg" => 1,
"sig" => vec![0x55, 0x55, 0x55, 0x55], "sig" => vec![0x55, 0x55, 0x55, 0x55],
"x5c" => cbor_array_vec![vec![certificate]], "x5c" => cbor_array![certificate],
"ecdaaKeyId" => vec![0xEC, 0xDA, 0x1D], "ecdaaKeyId" => vec![0xEC, 0xDA, 0x1D],
}; };
@@ -282,31 +344,60 @@ mod test {
fmt: "packed".to_string(), fmt: "packed".to_string(),
auth_data: vec![0xAD], auth_data: vec![0xAD],
att_stmt, att_stmt,
ep_att: Some(true),
large_blob_key: Some(vec![0x1B]),
}; };
let response_cbor: Option<cbor::Value> = let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorMakeCredential(make_credential_response).into(); ResponseData::AuthenticatorMakeCredential(make_credential_response).into();
let expected_cbor = cbor_map_options! { let expected_cbor = cbor_map_options! {
1 => "packed", 0x01 => "packed",
2 => vec![0xAD], 0x02 => vec![0xAD],
3 => cbor_packed_attestation_statement, 0x03 => cbor_packed_attestation_statement,
0x04 => true,
0x05 => vec![0x1B],
}; };
assert_eq!(response_cbor, Some(expected_cbor)); assert_eq!(response_cbor, Some(expected_cbor));
} }
#[test] #[test]
fn test_get_assertion_into_cbor() { fn test_get_assertion_into_cbor() {
let pub_key_cred_descriptor = PublicKeyCredentialDescriptor {
key_type: PublicKeyCredentialType::PublicKey,
key_id: vec![0x2D, 0x2D, 0x2D, 0x2D],
transports: Some(vec![AuthenticatorTransport::Usb]),
};
let user = PublicKeyCredentialUserEntity {
user_id: vec![0x1D, 0x1D, 0x1D, 0x1D],
user_name: Some("foo".to_string()),
user_display_name: Some("bar".to_string()),
user_icon: Some("example.com/foo/icon.png".to_string()),
};
let get_assertion_response = AuthenticatorGetAssertionResponse { let get_assertion_response = AuthenticatorGetAssertionResponse {
credential: None, credential: Some(pub_key_cred_descriptor),
auth_data: vec![0xAD], auth_data: vec![0xAD],
signature: vec![0x51], signature: vec![0x51],
user: None, user: Some(user),
number_of_credentials: None, number_of_credentials: Some(2),
large_blob_key: Some(vec![0x1B]),
}; };
let response_cbor: Option<cbor::Value> = let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorGetAssertion(get_assertion_response).into(); ResponseData::AuthenticatorGetAssertion(get_assertion_response).into();
let expected_cbor = cbor_map_options! { let expected_cbor = cbor_map_options! {
2 => vec![0xAD], 0x01 => cbor_map! {
3 => vec![0x51], "id" => vec![0x2D, 0x2D, 0x2D, 0x2D],
"type" => "public-key",
"transports" => cbor_array!["usb"],
},
0x02 => vec![0xAD],
0x03 => vec![0x51],
0x04 => cbor_map! {
"id" => vec![0x1D, 0x1D, 0x1D, 0x1D],
"icon" => "example.com/foo/icon.png".to_string(),
"name" => "foo".to_string(),
"displayName" => "bar".to_string(),
},
0x05 => 2,
0x07 => vec![0x1B],
}; };
assert_eq!(response_cbor, Some(expected_cbor)); assert_eq!(response_cbor, Some(expected_cbor));
} }
@@ -321,28 +412,21 @@ mod test {
options: None, options: None,
max_msg_size: None, max_msg_size: None,
pin_protocols: None, pin_protocols: None,
#[cfg(feature = "with_ctap2_1")]
max_credential_count_in_list: None, max_credential_count_in_list: None,
#[cfg(feature = "with_ctap2_1")]
max_credential_id_length: None, max_credential_id_length: None,
#[cfg(feature = "with_ctap2_1")]
transports: None, transports: None,
#[cfg(feature = "with_ctap2_1")]
algorithms: None, algorithms: None,
default_cred_protect: None, max_serialized_large_blob_array: None,
#[cfg(feature = "with_ctap2_1")] force_pin_change: None,
min_pin_length: 4, min_pin_length: 4,
#[cfg(feature = "with_ctap2_1")]
firmware_version: None, firmware_version: None,
max_cred_blob_length: None,
max_rp_ids_for_set_min_pin_length: None,
certifications: None,
remaining_discoverable_credentials: None,
}; };
let response_cbor: Option<cbor::Value> = let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorGetInfo(get_info_response).into(); ResponseData::AuthenticatorGetInfo(get_info_response).into();
#[cfg(not(feature = "with_ctap2_1"))]
let expected_cbor = cbor_map_options! {
0x01 => cbor_array_vec![versions],
0x03 => vec![0x00; 16],
};
#[cfg(feature = "with_ctap2_1")]
let expected_cbor = cbor_map_options! { let expected_cbor = cbor_map_options! {
0x01 => cbor_array_vec![versions], 0x01 => cbor_array_vec![versions],
0x03 => vec![0x00; 16], 0x03 => vec![0x00; 16],
@@ -352,56 +436,71 @@ mod test {
} }
#[test] #[test]
#[cfg(feature = "with_ctap2_1")]
fn test_get_info_optionals_into_cbor() { fn test_get_info_optionals_into_cbor() {
let mut options_map = BTreeMap::new();
options_map.insert(String::from("rk"), true);
let get_info_response = AuthenticatorGetInfoResponse { let get_info_response = AuthenticatorGetInfoResponse {
versions: vec!["FIDO_2_0".to_string()], versions: vec!["FIDO_2_0".to_string()],
extensions: Some(vec!["extension".to_string()]), extensions: Some(vec!["extension".to_string()]),
aaguid: [0x00; 16], aaguid: [0x00; 16],
options: Some(options_map), options: Some(vec![(String::from("rk"), true)]),
max_msg_size: Some(1024), max_msg_size: Some(1024),
pin_protocols: Some(vec![1]), pin_protocols: Some(vec![1]),
max_credential_count_in_list: Some(20), max_credential_count_in_list: Some(20),
max_credential_id_length: Some(256), max_credential_id_length: Some(256),
transports: Some(vec![AuthenticatorTransport::Usb]), transports: Some(vec![AuthenticatorTransport::Usb]),
algorithms: Some(vec![ES256_CRED_PARAM]), algorithms: Some(vec![ES256_CRED_PARAM]),
default_cred_protect: Some(CredentialProtectionPolicy::UserVerificationRequired), max_serialized_large_blob_array: Some(1024),
force_pin_change: Some(false),
min_pin_length: 4, min_pin_length: 4,
firmware_version: Some(0), firmware_version: Some(0),
max_cred_blob_length: Some(1024),
max_rp_ids_for_set_min_pin_length: Some(8),
certifications: Some(vec![(String::from("example-cert"), 1)]),
remaining_discoverable_credentials: Some(150),
}; };
let response_cbor: Option<cbor::Value> = let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorGetInfo(get_info_response).into(); ResponseData::AuthenticatorGetInfo(get_info_response).into();
let expected_cbor = cbor_map_options! { let expected_cbor = cbor_map_options! {
0x01 => cbor_array_vec![vec!["FIDO_2_0"]], 0x01 => cbor_array!["FIDO_2_0"],
0x02 => cbor_array_vec![vec!["extension"]], 0x02 => cbor_array!["extension"],
0x03 => vec![0x00; 16], 0x03 => vec![0x00; 16],
0x04 => cbor_map! {"rk" => true}, 0x04 => cbor_map! {"rk" => true},
0x05 => 1024, 0x05 => 1024,
0x06 => cbor_array_vec![vec![1]], 0x06 => cbor_array![1],
0x07 => 20, 0x07 => 20,
0x08 => 256, 0x08 => 256,
0x09 => cbor_array_vec![vec!["usb"]], 0x09 => cbor_array!["usb"],
0x0A => cbor_array_vec![vec![ES256_CRED_PARAM]], 0x0A => cbor_array![ES256_CRED_PARAM],
0x0C => CredentialProtectionPolicy::UserVerificationRequired as u64, 0x0B => 1024,
0x0C => false,
0x0D => 4, 0x0D => 4,
0x0E => 0, 0x0E => 0,
0x0F => 1024,
0x10 => 8,
0x13 => cbor_map! {"example-cert" => 1},
0x14 => 150,
}; };
assert_eq!(response_cbor, Some(expected_cbor)); assert_eq!(response_cbor, Some(expected_cbor));
} }
#[test] #[test]
fn test_used_client_pin_into_cbor() { fn test_used_client_pin_into_cbor() {
let mut rng = ThreadRng256 {};
let sk = crypto::ecdh::SecKey::gensk(&mut rng);
let pk = sk.genpk();
let cose_key = CoseKey::from(pk);
let client_pin_response = AuthenticatorClientPinResponse { let client_pin_response = AuthenticatorClientPinResponse {
key_agreement: None, key_agreement: Some(cose_key.clone()),
pin_token: Some(vec![70]), pin_uv_auth_token: Some(vec![70]),
retries: None, retries: Some(8),
power_cycle_state: Some(false),
}; };
let response_cbor: Option<cbor::Value> = let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorClientPin(Some(client_pin_response)).into(); ResponseData::AuthenticatorClientPin(Some(client_pin_response)).into();
let expected_cbor = cbor_map_options! { let expected_cbor = cbor_map_options! {
2 => vec![70], 0x01 => cbor::Value::from(cose_key),
0x02 => vec![70],
0x03 => 8,
0x04 => false,
}; };
assert_eq!(response_cbor, Some(expected_cbor)); assert_eq!(response_cbor, Some(expected_cbor));
} }
@@ -418,13 +517,105 @@ mod test {
assert_eq!(response_cbor, None); assert_eq!(response_cbor, None);
} }
#[cfg(feature = "with_ctap2_1")] #[test]
fn test_used_credential_management_into_cbor() {
let cred_management_response = AuthenticatorCredentialManagementResponse::default();
let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorCredentialManagement(Some(cred_management_response)).into();
let expected_cbor = cbor_map_options! {};
assert_eq!(response_cbor, Some(expected_cbor));
}
#[test]
fn test_used_credential_management_optionals_into_cbor() {
let mut rng = ThreadRng256 {};
let sk = crypto::ecdh::SecKey::gensk(&mut rng);
let rp = PublicKeyCredentialRpEntity {
rp_id: String::from("example.com"),
rp_name: None,
rp_icon: None,
};
let user = PublicKeyCredentialUserEntity {
user_id: vec![0xFA, 0xB1, 0xA2],
user_name: None,
user_display_name: None,
user_icon: None,
};
let cred_descriptor = PublicKeyCredentialDescriptor {
key_type: PublicKeyCredentialType::PublicKey,
key_id: vec![0x1D; 32],
transports: None,
};
let pk = sk.genpk();
let cose_key = CoseKey::from(pk);
let cred_management_response = AuthenticatorCredentialManagementResponse {
existing_resident_credentials_count: Some(100),
max_possible_remaining_resident_credentials_count: Some(96),
rp: Some(rp.clone()),
rp_id_hash: Some(vec![0x1D; 32]),
total_rps: Some(3),
user: Some(user.clone()),
credential_id: Some(cred_descriptor.clone()),
public_key: Some(cose_key.clone()),
total_credentials: Some(2),
cred_protect: Some(CredentialProtectionPolicy::UserVerificationOptional),
large_blob_key: Some(vec![0xBB; 64]),
};
let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorCredentialManagement(Some(cred_management_response)).into();
let expected_cbor = cbor_map_options! {
0x01 => 100,
0x02 => 96,
0x03 => rp,
0x04 => vec![0x1D; 32],
0x05 => 3,
0x06 => user,
0x07 => cred_descriptor,
0x08 => cbor::Value::from(cose_key),
0x09 => 2,
0x0A => 0x01,
0x0B => vec![0xBB; 64],
};
assert_eq!(response_cbor, Some(expected_cbor));
}
#[test]
fn test_empty_credential_management_into_cbor() {
let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorCredentialManagement(None).into();
assert_eq!(response_cbor, None);
}
#[test] #[test]
fn test_selection_into_cbor() { fn test_selection_into_cbor() {
let response_cbor: Option<cbor::Value> = ResponseData::AuthenticatorSelection.into(); let response_cbor: Option<cbor::Value> = ResponseData::AuthenticatorSelection.into();
assert_eq!(response_cbor, None); assert_eq!(response_cbor, None);
} }
#[test]
fn test_large_blobs_into_cbor() {
let large_blobs_response = AuthenticatorLargeBlobsResponse { config: vec![0xC0] };
let response_cbor: Option<cbor::Value> =
ResponseData::AuthenticatorLargeBlobs(Some(large_blobs_response)).into();
let expected_cbor = cbor_map_options! {
0x01 => vec![0xC0],
};
assert_eq!(response_cbor, Some(expected_cbor));
}
#[test]
fn test_empty_large_blobs_into_cbor() {
let response_cbor: Option<cbor::Value> = ResponseData::AuthenticatorLargeBlobs(None).into();
assert_eq!(response_cbor, None);
}
#[test]
fn test_config_into_cbor() {
let response_cbor: Option<cbor::Value> = ResponseData::AuthenticatorConfig.into();
assert_eq!(response_cbor, None);
}
#[test] #[test]
fn test_vendor_response_into_cbor() { fn test_vendor_response_into_cbor() {
let response_cbor: Option<cbor::Value> = let response_cbor: Option<cbor::Value> =
@@ -436,8 +627,8 @@ mod test {
assert_eq!( assert_eq!(
response_cbor, response_cbor,
Some(cbor_map_options! { Some(cbor_map_options! {
1 => true, 0x01 => true,
2 => false, 0x02 => false,
}) })
); );
let response_cbor: Option<cbor::Value> = let response_cbor: Option<cbor::Value> =
@@ -449,8 +640,8 @@ mod test {
assert_eq!( assert_eq!(
response_cbor, response_cbor,
Some(cbor_map_options! { Some(cbor_map_options! {
1 => false, 0x01 => false,
2 => true, 0x02 => true,
}) })
); );
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -31,11 +31,8 @@ pub enum Ctap2StatusCode {
CTAP2_ERR_INVALID_CBOR = 0x12, CTAP2_ERR_INVALID_CBOR = 0x12,
CTAP2_ERR_MISSING_PARAMETER = 0x14, CTAP2_ERR_MISSING_PARAMETER = 0x14,
CTAP2_ERR_LIMIT_EXCEEDED = 0x15, CTAP2_ERR_LIMIT_EXCEEDED = 0x15,
CTAP2_ERR_UNSUPPORTED_EXTENSION = 0x16,
#[cfg(feature = "with_ctap2_1")]
CTAP2_ERR_FP_DATABASE_FULL = 0x17, CTAP2_ERR_FP_DATABASE_FULL = 0x17,
#[cfg(feature = "with_ctap2_1")] CTAP2_ERR_LARGE_BLOB_STORAGE_FULL = 0x18,
CTAP2_ERR_PC_STORAGE_FULL = 0x18,
CTAP2_ERR_CREDENTIAL_EXCLUDED = 0x19, CTAP2_ERR_CREDENTIAL_EXCLUDED = 0x19,
CTAP2_ERR_PROCESSING = 0x21, CTAP2_ERR_PROCESSING = 0x21,
CTAP2_ERR_INVALID_CREDENTIAL = 0x22, CTAP2_ERR_INVALID_CREDENTIAL = 0x22,
@@ -57,25 +54,22 @@ pub enum Ctap2StatusCode {
CTAP2_ERR_PIN_AUTH_INVALID = 0x33, CTAP2_ERR_PIN_AUTH_INVALID = 0x33,
CTAP2_ERR_PIN_AUTH_BLOCKED = 0x34, CTAP2_ERR_PIN_AUTH_BLOCKED = 0x34,
CTAP2_ERR_PIN_NOT_SET = 0x35, CTAP2_ERR_PIN_NOT_SET = 0x35,
CTAP2_ERR_PIN_REQUIRED = 0x36, CTAP2_ERR_PUAT_REQUIRED = 0x36,
CTAP2_ERR_PIN_POLICY_VIOLATION = 0x37, CTAP2_ERR_PIN_POLICY_VIOLATION = 0x37,
CTAP2_ERR_PIN_TOKEN_EXPIRED = 0x38, CTAP2_ERR_PIN_TOKEN_EXPIRED = 0x38,
CTAP2_ERR_REQUEST_TOO_LARGE = 0x39, CTAP2_ERR_REQUEST_TOO_LARGE = 0x39,
CTAP2_ERR_ACTION_TIMEOUT = 0x3A, CTAP2_ERR_ACTION_TIMEOUT = 0x3A,
CTAP2_ERR_UP_REQUIRED = 0x3B, CTAP2_ERR_UP_REQUIRED = 0x3B,
CTAP2_ERR_UV_BLOCKED = 0x3C, CTAP2_ERR_UV_BLOCKED = 0x3C,
#[cfg(feature = "with_ctap2_1")]
CTAP2_ERR_INTEGRITY_FAILURE = 0x3D, CTAP2_ERR_INTEGRITY_FAILURE = 0x3D,
#[cfg(feature = "with_ctap2_1")]
CTAP2_ERR_INVALID_SUBCOMMAND = 0x3E, CTAP2_ERR_INVALID_SUBCOMMAND = 0x3E,
CTAP2_ERR_UV_INVALID = 0x3F,
CTAP2_ERR_UNAUTHORIZED_PERMISSION = 0x40,
CTAP1_ERR_OTHER = 0x7F, CTAP1_ERR_OTHER = 0x7F,
CTAP2_ERR_SPEC_LAST = 0xDF, _CTAP2_ERR_SPEC_LAST = 0xDF,
CTAP2_ERR_EXTENSION_FIRST = 0xE0, _CTAP2_ERR_EXTENSION_FIRST = 0xE0,
CTAP2_ERR_EXTENSION_LAST = 0xEF, _CTAP2_ERR_EXTENSION_LAST = 0xEF,
// CTAP2_ERR_VENDOR_FIRST = 0xF0, _CTAP2_ERR_VENDOR_FIRST = 0xF0,
CTAP2_ERR_VENDOR_RESPONSE_TOO_LONG = 0xF0,
CTAP2_ERR_VENDOR_RESPONSE_CANNOT_WRITE_CBOR = 0xF1,
/// An internal invariant is broken. /// An internal invariant is broken.
/// ///
/// This type of error is unexpected and the current state is undefined. /// This type of error is unexpected and the current state is undefined.
@@ -85,6 +79,5 @@ pub enum Ctap2StatusCode {
/// ///
/// It may be possible that some of those errors are actually internal errors. /// It may be possible that some of those errors are actually internal errors.
CTAP2_ERR_VENDOR_HARDWARE_FAILURE = 0xF3, CTAP2_ERR_VENDOR_HARDWARE_FAILURE = 0xF3,
_CTAP2_ERR_VENDOR_LAST = 0xFF,
CTAP2_ERR_VENDOR_LAST = 0xFF,
} }

File diff suppressed because it is too large Load Diff

View File

@@ -84,21 +84,33 @@ make_partition! {
/// The credentials. /// The credentials.
/// ///
/// Depending on `MAX_SUPPORTED_RESIDENTIAL_KEYS`, only a prefix of those keys is used. Each /// Depending on `MAX_SUPPORTED_RESIDENT_KEYS`, only a prefix of those keys is used. Each
/// board may configure `MAX_SUPPORTED_RESIDENTIAL_KEYS` depending on the storage size. /// board may configure `MAX_SUPPORTED_RESIDENT_KEYS` depending on the storage size.
CREDENTIALS = 1700..2000; CREDENTIALS = 1700..2000;
/// Storage for the serialized large blob array.
///
/// The stored large blob can be too big for one key, so it has to be sharded.
LARGE_BLOB_SHARDS = 2000..2004;
/// If this entry exists and is empty, alwaysUv is enabled.
ALWAYS_UV = 2038;
/// If this entry exists and is empty, enterprise attestation is enabled.
ENTERPRISE_ATTESTATION = 2039;
/// If this entry exists and is empty, the PIN needs to be changed.
FORCE_PIN_CHANGE = 2040;
/// The secret of the CredRandom feature. /// The secret of the CredRandom feature.
CRED_RANDOM_SECRET = 2041; CRED_RANDOM_SECRET = 2041;
/// List of RP IDs allowed to read the minimum PIN length. /// List of RP IDs allowed to read the minimum PIN length.
#[cfg(feature = "with_ctap2_1")] MIN_PIN_LENGTH_RP_IDS = 2042;
_MIN_PIN_LENGTH_RP_IDS = 2042;
/// The minimum PIN length. /// The minimum PIN length.
/// ///
/// If the entry is absent, the minimum PIN length is `DEFAULT_MIN_PIN_LENGTH`. /// If the entry is absent, the minimum PIN length is `DEFAULT_MIN_PIN_LENGTH`.
#[cfg(feature = "with_ctap2_1")]
MIN_PIN_LENGTH = 2043; MIN_PIN_LENGTH = 2043;
/// The number of PIN retries. /// The number of PIN retries.
@@ -106,10 +118,11 @@ make_partition! {
/// If the entry is absent, the number of PIN retries is `MAX_PIN_RETRIES`. /// If the entry is absent, the number of PIN retries is `MAX_PIN_RETRIES`.
PIN_RETRIES = 2044; PIN_RETRIES = 2044;
/// The PIN hash. /// The PIN hash and length.
/// ///
/// If the entry is absent, there is no PIN set. /// If the entry is absent, there is no PIN set. The first byte represents
PIN_HASH = 2045; /// the length, the following are an array with the hash.
PIN_PROPERTIES = 2045;
/// The encryption and hmac keys. /// The encryption and hmac keys.
/// ///
@@ -128,8 +141,8 @@ mod test {
#[test] #[test]
fn enough_credentials() { fn enough_credentials() {
use super::super::MAX_SUPPORTED_RESIDENTIAL_KEYS; use crate::ctap::customization::MAX_SUPPORTED_RESIDENT_KEYS;
assert!(MAX_SUPPORTED_RESIDENTIAL_KEYS <= CREDENTIALS.end - CREDENTIALS.start); assert!(MAX_SUPPORTED_RESIDENT_KEYS <= CREDENTIALS.end - CREDENTIALS.start);
} }
#[test] #[test]

View File

@@ -1,4 +1,4 @@
// Copyright 2019 Google LLC // Copyright 2019-2021 Google LLC
// //
// Licensed under the Apache License, Version 2 (the "License"); // Licensed under the Apache License, Version 2 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

277
src/ctap/token_state.rs Normal file
View File

@@ -0,0 +1,277 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::ctap::client_pin::PinPermission;
use crate::ctap::status_code::Ctap2StatusCode;
use crate::ctap::timed_permission::TimedPermission;
use alloc::string::String;
use crypto::sha256::Sha256;
use crypto::Hash256;
use libtock_drivers::timer::{ClockValue, Duration};
/// Timeout for auth tokens.
///
/// This usage time limit is correct for USB, BLE, and internal.
/// NFC only allows 19.8 seconds.
/// TODO(#15) multiplex over transports, add NFC
const INITIAL_USAGE_TIME_LIMIT: Duration<isize> = Duration::from_ms(30000);
/// Implements pinUvAuthToken state from section 6.5.2.1.
///
/// The userPresent flag is omitted as the only way to set it to true is
/// built-in user verification. Therefore, we never cache user presence.
///
/// This implementation does not use a rolling timer.
pub struct PinUvAuthTokenState {
// Relies on the fact that all permissions are represented by powers of two.
permissions_set: u8,
permissions_rp_id: Option<String>,
usage_timer: TimedPermission,
user_verified: bool,
in_use: bool,
}
impl PinUvAuthTokenState {
/// Creates a pinUvAuthToken state without permissions.
pub fn new() -> PinUvAuthTokenState {
PinUvAuthTokenState {
permissions_set: 0,
permissions_rp_id: None,
usage_timer: TimedPermission::waiting(),
user_verified: false,
in_use: false,
}
}
/// Returns whether the pinUvAuthToken is active.
pub fn is_in_use(&self) -> bool {
self.in_use
}
/// Checks if the permission is granted.
pub fn has_permission(&self, permission: PinPermission) -> Result<(), Ctap2StatusCode> {
if permission as u8 & self.permissions_set != 0 {
Ok(())
} else {
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
}
}
/// Checks if there is no associated permissions RPID.
pub fn has_no_permissions_rp_id(&self) -> Result<(), Ctap2StatusCode> {
if self.permissions_rp_id.is_some() {
return Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID);
}
Ok(())
}
/// Checks if the permissions RPID is associated.
pub fn has_permissions_rp_id(&self, rp_id: &str) -> Result<(), Ctap2StatusCode> {
match &self.permissions_rp_id {
Some(p) if rp_id == p => Ok(()),
_ => Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID),
}
}
/// Checks if the permissions RPID's association matches the hash.
pub fn has_permissions_rp_id_hash(&self, rp_id_hash: &[u8]) -> Result<(), Ctap2StatusCode> {
match &self.permissions_rp_id {
Some(p) if rp_id_hash == Sha256::hash(p.as_bytes()) => Ok(()),
_ => Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID),
}
}
/// Sets the permissions, represented as bits in a byte.
pub fn set_permissions(&mut self, permissions: u8) {
self.permissions_set = permissions;
}
/// Sets the permissions RPID.
pub fn set_permissions_rp_id(&mut self, permissions_rp_id: Option<String>) {
self.permissions_rp_id = permissions_rp_id;
}
/// Sets the default permissions.
///
/// Allows MakeCredential and GetAssertion, without specifying a RP ID.
pub fn set_default_permissions(&mut self) {
self.set_permissions(0x03);
self.set_permissions_rp_id(None);
}
/// Starts the timer for pinUvAuthToken usage.
pub fn begin_using_pin_uv_auth_token(&mut self, now: ClockValue) {
self.user_verified = true;
self.usage_timer = TimedPermission::granted(now, INITIAL_USAGE_TIME_LIMIT);
self.in_use = true;
}
/// Updates the usage timer, and disables the pinUvAuthToken on timeout.
pub fn pin_uv_auth_token_usage_timer_observer(&mut self, now: ClockValue) {
if !self.in_use {
return;
}
self.usage_timer = self.usage_timer.check_expiration(now);
if !self.usage_timer.is_granted(now) {
self.stop_using_pin_uv_auth_token();
}
}
/// Returns whether the user is verified.
pub fn get_user_verified_flag_value(&self) -> bool {
self.in_use && self.user_verified
}
/// Consumes the user verification.
pub fn clear_user_verified_flag(&mut self) {
self.user_verified = false;
}
/// Clears all permissions except Large Blob Write.
pub fn clear_pin_uv_auth_token_permissions_except_lbw(&mut self) {
self.permissions_set &= PinPermission::LargeBlobWrite as u8;
}
/// Resets to the initial state.
pub fn stop_using_pin_uv_auth_token(&mut self) {
self.permissions_rp_id = None;
self.permissions_set = 0;
self.usage_timer = TimedPermission::waiting();
self.user_verified = false;
self.in_use = false;
}
}
#[cfg(test)]
mod test {
use super::*;
use enum_iterator::IntoEnumIterator;
const CLOCK_FREQUENCY_HZ: usize = 32768;
const START_CLOCK_VALUE: ClockValue = ClockValue::new(0, CLOCK_FREQUENCY_HZ);
const SMALL_DURATION: Duration<isize> = Duration::from_ms(100);
#[test]
fn test_observer() {
let mut token_state = PinUvAuthTokenState::new();
let mut now = START_CLOCK_VALUE;
token_state.begin_using_pin_uv_auth_token(now);
assert!(token_state.is_in_use());
now = now.wrapping_add(SMALL_DURATION);
token_state.pin_uv_auth_token_usage_timer_observer(now);
assert!(token_state.is_in_use());
now = now.wrapping_add(INITIAL_USAGE_TIME_LIMIT);
token_state.pin_uv_auth_token_usage_timer_observer(now);
assert!(!token_state.is_in_use());
}
#[test]
fn test_stop() {
let mut token_state = PinUvAuthTokenState::new();
token_state.begin_using_pin_uv_auth_token(START_CLOCK_VALUE);
assert!(token_state.is_in_use());
token_state.stop_using_pin_uv_auth_token();
assert!(!token_state.is_in_use());
}
#[test]
fn test_permissions() {
let mut token_state = PinUvAuthTokenState::new();
token_state.set_permissions(0xFF);
for permission in PinPermission::into_enum_iter() {
assert_eq!(token_state.has_permission(permission), Ok(()));
}
token_state.clear_pin_uv_auth_token_permissions_except_lbw();
assert_eq!(
token_state.has_permission(PinPermission::CredentialManagement),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
token_state.has_permission(PinPermission::LargeBlobWrite),
Ok(())
);
token_state.stop_using_pin_uv_auth_token();
for permission in PinPermission::into_enum_iter() {
assert_eq!(
token_state.has_permission(permission),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
}
#[test]
fn test_permissions_rp_id_none() {
let mut token_state = PinUvAuthTokenState::new();
let example_hash = Sha256::hash(b"example.com");
token_state.set_permissions_rp_id(None);
assert_eq!(token_state.has_no_permissions_rp_id(), Ok(()));
assert_eq!(
token_state.has_permissions_rp_id("example.com"),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
token_state.has_permissions_rp_id_hash(&example_hash),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
#[test]
fn test_permissions_rp_id_some() {
let mut token_state = PinUvAuthTokenState::new();
let example_hash = Sha256::hash(b"example.com");
token_state.set_permissions_rp_id(Some(String::from("example.com")));
assert_eq!(
token_state.has_no_permissions_rp_id(),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(token_state.has_permissions_rp_id("example.com"), Ok(()));
assert_eq!(
token_state.has_permissions_rp_id("another.example.com"),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
token_state.has_permissions_rp_id_hash(&example_hash),
Ok(())
);
assert_eq!(
token_state.has_permissions_rp_id_hash(&[0x1D; 32]),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
token_state.stop_using_pin_uv_auth_token();
assert_eq!(
token_state.has_permissions_rp_id("example.com"),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
assert_eq!(
token_state.has_permissions_rp_id_hash(&example_hash),
Err(Ctap2StatusCode::CTAP2_ERR_PIN_AUTH_INVALID)
);
}
#[test]
fn test_user_verified_flag() {
let mut token_state = PinUvAuthTokenState::new();
assert!(!token_state.get_user_verified_flag_value());
token_state.begin_using_pin_uv_auth_token(START_CLOCK_VALUE);
assert!(token_state.get_user_verified_flag_value());
token_state.clear_user_verified_flag();
assert!(!token_state.get_user_verified_flag_value());
token_state.begin_using_pin_uv_auth_token(START_CLOCK_VALUE);
assert!(token_state.get_user_verified_flag_value());
token_state.stop_using_pin_uv_auth_token();
assert!(!token_state.get_user_verified_flag_value());
}
}

View File

@@ -120,8 +120,8 @@ fn main() {
} }
// These calls are making sure that even for long inactivity, wrapping clock values // These calls are making sure that even for long inactivity, wrapping clock values
// never randomly wink or grant user presence for U2F. // don't cause problems with timers.
ctap_state.update_command_permission(now); ctap_state.update_timeouts(now);
ctap_hid.wink_permission = ctap_hid.wink_permission.check_expiration(now); ctap_hid.wink_permission = ctap_hid.wink_permission.check_expiration(now);
if has_packet { if has_packet {