From 5673b9148f185931a3a35c576fa7435b3a2b40fa Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Wed, 11 Nov 2020 12:55:37 +0100 Subject: [PATCH 01/20] Use new persistent store library (and delete old) --- Cargo.toml | 3 +- src/ctap/mod.rs | 2 +- src/ctap/status_code.rs | 12 + src/ctap/storage.rs | 744 +++++++--------- src/ctap/storage/key.rs | 135 +++ src/embedded_flash/buffer.rs | 457 ---------- src/embedded_flash/mod.rs | 10 +- src/embedded_flash/storage.rs | 107 --- src/embedded_flash/store/bitfield.rs | 177 ---- src/embedded_flash/store/format.rs | 565 ------------ src/embedded_flash/store/mod.rs | 1182 -------------------------- src/embedded_flash/syscall.rs | 36 +- 12 files changed, 493 insertions(+), 2937 deletions(-) create mode 100644 src/ctap/storage/key.rs delete mode 100644 src/embedded_flash/buffer.rs delete mode 100644 src/embedded_flash/storage.rs delete mode 100644 src/embedded_flash/store/bitfield.rs delete mode 100644 src/embedded_flash/store/format.rs delete mode 100644 src/embedded_flash/store/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 8faf8dd..5b01795 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ libtock_drivers = { path = "third_party/libtock-drivers" } lang_items = { path = "third_party/lang-items" } cbor = { path = "libraries/cbor" } crypto = { path = "libraries/crypto" } +persistent_store = { path = "libraries/persistent_store" } byteorder = { version = "1", default-features = false } arrayref = "0.3.6" subtle = { version = "2.2", default-features = false, features = ["nightly"] } @@ -23,7 +24,7 @@ subtle = { version = "2.2", default-features = false, features = ["nightly"] } debug_allocations = ["lang_items/debug_allocations"] debug_ctap = ["crypto/derive_debug", "libtock_drivers/debug_ctap"] panic_console = ["lang_items/panic_console"] -std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std"] +std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std", "persistent_store/std"] ram_storage = [] verbose = ["debug_ctap", "libtock_drivers/verbose_usb"] with_ctap1 = ["crypto/with_ctap1"] diff --git a/src/ctap/mod.rs b/src/ctap/mod.rs index 1a98ce5..8f46f77 100644 --- a/src/ctap/mod.rs +++ b/src/ctap/mod.rs @@ -500,7 +500,7 @@ where let (signature, x5c) = match self.persistent_store.attestation_private_key()? { Some(attestation_private_key) => { let attestation_key = - crypto::ecdsa::SecKey::from_bytes(attestation_private_key).unwrap(); + crypto::ecdsa::SecKey::from_bytes(&attestation_private_key).unwrap(); let attestation_certificate = self .persistent_store .attestation_certificate()? diff --git a/src/ctap/status_code.rs b/src/ctap/status_code.rs index adb84fd..b4f78fd 100644 --- a/src/ctap/status_code.rs +++ b/src/ctap/status_code.rs @@ -81,5 +81,17 @@ pub enum Ctap2StatusCode { /// This type of error is unexpected and the current state is undefined. CTAP2_ERR_VENDOR_INTERNAL_ERROR = 0xF2, + /// The persistent storage invariant is broken. + /// + /// There can be multiple reasons: + /// - The persistent storage has not been erased before its first usage. + /// - The persistent storage has been tempered by a third party. + /// - The flash is malfunctioning (including the Tock driver). + /// + /// In the first 2 cases the persistent storage should be completely erased. If the error + /// reproduces, it may indicate a software bug or a hardware deficiency. In both cases, the + /// error should be reported. + CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE = 0xF3, + CTAP2_ERR_VENDOR_LAST = 0xFF, } diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 127dc23..d99d62c 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -12,13 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod key; + #[cfg(feature = "with_ctap2_1")] use crate::ctap::data_formats::{extract_array, extract_text_string}; use crate::ctap::data_formats::{CredentialProtectionPolicy, PublicKeyCredentialSource}; use crate::ctap::pin_protocol_v1::PIN_AUTH_LENGTH; use crate::ctap::status_code::Ctap2StatusCode; use crate::ctap::{key_material, USE_BATCH_ATTESTATION}; -use crate::embedded_flash::{self, StoreConfig, StoreEntry, StoreError}; +#[cfg(feature = "with_ctap2_1")] use alloc::string::String; #[cfg(any(test, feature = "ram_storage", feature = "with_ctap2_1"))] use alloc::vec; @@ -30,16 +32,16 @@ use core::convert::TryInto; use crypto::rng256::Rng256; #[cfg(any(test, feature = "ram_storage"))] -type Storage = embedded_flash::BufferStorage; +type Storage = persistent_store::BufferStorage; #[cfg(not(any(test, feature = "ram_storage")))] -type Storage = embedded_flash::SyscallStorage; +type Storage = crate::embedded_flash::SyscallStorage; // Those constants may be modified before compilation to tune the behavior of the key. // -// The number of pages should be at least 2 and at most what the flash can hold. There should be no -// reason to put a small number here, except that the latency of flash operations depends on the -// number of pages. This will improve in the future. Currently, using 20 pages gives 65ms per -// operation. The rule of thumb is 3.5ms per additional page. +// The number of pages should be at least 3 and at most what the flash can hold. There should be no +// reason to put a small number here, except that the latency of flash operations is linear in the +// number of pages. This may improve in the future. Currently, using 20 pages gives between 20ms and +// 240ms per operation. The rule of thumb is between 1ms and 12ms per additional page. // // Limiting the number of residential keys permits to ensure a minimum number of counter increments. // Let: @@ -49,32 +51,15 @@ type Storage = embedded_flash::SyscallStorage; // - C the number of erase cycles (10000) // - I the minimum number of counter increments // -// We have: I = ((P - 1) * 4092 - K * S) / 12 * C +// We have: I = (P * 4084 - 5107 - K * S) / 8 * C // -// With P=20 and K=150, we have I > 2M which is enough for 500 increments per day for 10 years. +// With P=20 and K=150, we have I=2M which is enough for 500 increments per day for 10 years. #[cfg(feature = "ram_storage")] -const NUM_PAGES: usize = 2; +const NUM_PAGES: usize = 3; #[cfg(not(feature = "ram_storage"))] const NUM_PAGES: usize = 20; const MAX_SUPPORTED_RESIDENTIAL_KEYS: usize = 150; -// List of tags. They should all be unique. And there should be less than NUM_TAGS. -const TAG_CREDENTIAL: usize = 0; -const GLOBAL_SIGNATURE_COUNTER: usize = 1; -const MASTER_KEYS: usize = 2; -const PIN_HASH: usize = 3; -const PIN_RETRIES: usize = 4; -const ATTESTATION_PRIVATE_KEY: usize = 5; -const ATTESTATION_CERTIFICATE: usize = 6; -const AAGUID: usize = 7; -#[cfg(feature = "with_ctap2_1")] -const MIN_PIN_LENGTH: usize = 8; -#[cfg(feature = "with_ctap2_1")] -const MIN_PIN_LENGTH_RP_IDS: usize = 9; -// Different NUM_TAGS depending on the CTAP version make the storage incompatible, -// so we use the maximum. -const NUM_TAGS: usize = 10; - const MAX_PIN_RETRIES: u8 = 8; const ATTESTATION_PRIVATE_KEY_LENGTH: usize = 32; const AAGUID_LENGTH: usize = 16; @@ -88,92 +73,18 @@ const _DEFAULT_MIN_PIN_LENGTH_RP_IDS: Vec = Vec::new(); #[cfg(feature = "with_ctap2_1")] const _MAX_RP_IDS_LENGTH: usize = 8; -#[allow(clippy::enum_variant_names)] -#[derive(PartialEq, Eq, PartialOrd, Ord)] -enum Key { - // TODO(cretin): Test whether this doesn't consume too much memory. Otherwise, we can use less - // keys. Either only a simple enum value for all credentials, or group by rp_id. - Credential { - rp_id: Option, - credential_id: Option>, - user_handle: Option>, - }, - GlobalSignatureCounter, - MasterKeys, - PinHash, - PinRetries, - AttestationPrivateKey, - AttestationCertificate, - Aaguid, - #[cfg(feature = "with_ctap2_1")] - MinPinLength, - #[cfg(feature = "with_ctap2_1")] - MinPinLengthRpIds, -} - +/// Wrapper for master keys. pub struct MasterKeys { + /// Master encryption key. pub encryption: [u8; 32], + + /// Master hmac key. pub hmac: [u8; 32], } -struct Config; - -impl StoreConfig for Config { - type Key = Key; - - fn num_tags(&self) -> usize { - NUM_TAGS - } - - fn keys(&self, entry: StoreEntry, mut add: impl FnMut(Key)) { - match entry.tag { - TAG_CREDENTIAL => { - let credential = match deserialize_credential(entry.data) { - None => { - debug_assert!(false); - return; - } - Some(credential) => credential, - }; - add(Key::Credential { - rp_id: Some(credential.rp_id.clone()), - credential_id: Some(credential.credential_id), - user_handle: None, - }); - add(Key::Credential { - rp_id: Some(credential.rp_id.clone()), - credential_id: None, - user_handle: None, - }); - add(Key::Credential { - rp_id: Some(credential.rp_id), - credential_id: None, - user_handle: Some(credential.user_handle), - }); - add(Key::Credential { - rp_id: None, - credential_id: None, - user_handle: None, - }); - } - GLOBAL_SIGNATURE_COUNTER => add(Key::GlobalSignatureCounter), - MASTER_KEYS => add(Key::MasterKeys), - PIN_HASH => add(Key::PinHash), - PIN_RETRIES => add(Key::PinRetries), - ATTESTATION_PRIVATE_KEY => add(Key::AttestationPrivateKey), - ATTESTATION_CERTIFICATE => add(Key::AttestationCertificate), - AAGUID => add(Key::Aaguid), - #[cfg(feature = "with_ctap2_1")] - MIN_PIN_LENGTH => add(Key::MinPinLength), - #[cfg(feature = "with_ctap2_1")] - MIN_PIN_LENGTH_RP_IDS => add(Key::MinPinLengthRpIds), - _ => debug_assert!(false), - } - } -} - +/// CTAP persistent storage. pub struct PersistentStore { - store: embedded_flash::Store, + store: persistent_store::Store, } impl PersistentStore { @@ -188,17 +99,19 @@ impl PersistentStore { #[cfg(any(test, feature = "ram_storage"))] let storage = PersistentStore::new_test_storage(); let mut store = PersistentStore { - store: embedded_flash::Store::new(storage, Config).unwrap(), + store: persistent_store::Store::new(storage).ok().unwrap(), }; - store.init(rng); + store.init(rng).unwrap(); store } + /// Creates a syscall storage in flash. #[cfg(not(any(test, feature = "ram_storage")))] fn new_prod_storage() -> Storage { Storage::new(NUM_PAGES).unwrap() } + /// Creates a buffer storage in RAM. #[cfg(any(test, feature = "ram_storage"))] fn new_test_storage() -> Storage { #[cfg(not(test))] @@ -206,7 +119,7 @@ impl PersistentStore { #[cfg(test)] const PAGE_SIZE: usize = 0x1000; let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); - let options = embedded_flash::BufferOptions { + let options = persistent_store::BufferOptions { word_size: 4, page_size: PAGE_SIZE, max_word_writes: 2, @@ -216,283 +129,265 @@ impl PersistentStore { Storage::new(store, options) } - fn init(&mut self, rng: &mut impl Rng256) { - if self.store.find_one(&Key::MasterKeys).is_none() { + /// Initializes the store by creating missing objects. + fn init(&mut self, rng: &mut impl Rng256) -> Result<(), Ctap2StatusCode> { + // Generate and store the master keys if they are missing. + if self.store.find_handle(key::MASTER_KEYS)?.is_none() { let master_encryption_key = rng.gen_uniform_u8x32(); let master_hmac_key = rng.gen_uniform_u8x32(); let mut master_keys = Vec::with_capacity(64); master_keys.extend_from_slice(&master_encryption_key); master_keys.extend_from_slice(&master_hmac_key); - self.store - .insert(StoreEntry { - tag: MASTER_KEYS, - data: &master_keys, - sensitive: true, - }) - .unwrap(); + self.store.insert(key::MASTER_KEYS, &master_keys)?; } // The following 3 entries are meant to be written by vendor-specific commands. if USE_BATCH_ATTESTATION { - if self.store.find_one(&Key::AttestationPrivateKey).is_none() { - self.set_attestation_private_key(key_material::ATTESTATION_PRIVATE_KEY) - .unwrap(); + if self + .store + .find_handle(key::ATTESTATION_PRIVATE_KEY)? + .is_none() + { + self.set_attestation_private_key(key_material::ATTESTATION_PRIVATE_KEY)?; } - if self.store.find_one(&Key::AttestationCertificate).is_none() { - self.set_attestation_certificate(key_material::ATTESTATION_CERTIFICATE) - .unwrap(); + if self + .store + .find_handle(key::ATTESTATION_CERTIFICATE)? + .is_none() + { + self.set_attestation_certificate(key_material::ATTESTATION_CERTIFICATE)?; } } - if self.store.find_one(&Key::Aaguid).is_none() { - self.set_aaguid(key_material::AAGUID).unwrap(); + if self.store.find_handle(key::AAGUID)?.is_none() { + self.set_aaguid(key_material::AAGUID)?; } + Ok(()) } + /// Returns the first matching credential. + /// + /// Returns `None` if no credentials are matched or if `check_cred_protect` is set and the first + /// matched credential requires user verification. pub fn find_credential( &self, rp_id: &str, credential_id: &[u8], check_cred_protect: bool, ) -> Result, Ctap2StatusCode> { - let key = Key::Credential { - rp_id: Some(rp_id.into()), - credential_id: Some(credential_id.into()), - user_handle: None, - }; - let entry = match self.store.find_one(&key) { - None => return Ok(None), - Some((_, entry)) => entry, - }; - debug_assert_eq!(entry.tag, TAG_CREDENTIAL); - let result = deserialize_credential(entry.data); - debug_assert!(result.is_some()); - let user_verification_required = result.as_ref().map_or(false, |cred| { - cred.cred_protect_policy == Some(CredentialProtectionPolicy::UserVerificationRequired) + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + // TODO(reviewer): Should we return an error if we find more than one matching credential? + // We did not use to in the previous version (panic in debug mode, nothing in release mode) + // but I don't remember why. Let's document it. + let result = iter.map(|(_, credential)| credential).find(|credential| { + credential.rp_id == rp_id && credential.credential_id == credential_id }); - if check_cred_protect && user_verification_required { - Ok(None) - } else { - Ok(result) + iter_result?; + if let Some(cred) = &result { + let user_verification_required = cred.cred_protect_policy + == Some(CredentialProtectionPolicy::UserVerificationRequired); + if check_cred_protect && user_verification_required { + return Ok(None); + } } + Ok(result) } + /// Stores or updates a credential. + /// + /// If a credential with the same RP id and user handle already exists, it is replaced. pub fn store_credential( &mut self, - credential: PublicKeyCredentialSource, + new_credential: PublicKeyCredentialSource, ) -> Result<(), Ctap2StatusCode> { - let key = Key::Credential { - rp_id: Some(credential.rp_id.clone()), - credential_id: None, - user_handle: Some(credential.user_handle.clone()), - }; - let old_entry = self.store.find_one(&key); - if old_entry.is_none() && self.count_credentials()? >= MAX_SUPPORTED_RESIDENTIAL_KEYS { + // Holds the key of the existing credential if this is an update. + let mut old_key = None; + // Holds the unordered list of used keys. + let mut keys = Vec::new(); + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + for (key, credential) in iter { + keys.push(key); + if credential.rp_id == new_credential.rp_id + && credential.user_handle == new_credential.user_handle + { + if old_key.is_some() { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + } + old_key = Some(key); + } + } + iter_result?; + if old_key.is_none() && keys.len() >= MAX_SUPPORTED_RESIDENTIAL_KEYS { return Err(Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL); } - let credential = serialize_credential(credential)?; - let new_entry = StoreEntry { - tag: TAG_CREDENTIAL, - data: &credential, - sensitive: true, - }; - match old_entry { - None => self.store.insert(new_entry)?, - Some((index, old_entry)) => { - debug_assert_eq!(old_entry.tag, TAG_CREDENTIAL); - self.store.replace(index, new_entry)? - } + let key = match old_key { + // This is a new credential being added, we need to allocate a free key. We choose the + // first available key. This is quadratic in the number of existing keys. + None => key::CREDENTIALS + .take(MAX_SUPPORTED_RESIDENTIAL_KEYS) + .find(|key| !keys.contains(key)) + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?, + // This is an existing credential being updated, we reuse its key. + Some(x) => x, }; + let value = serialize_credential(new_credential)?; + self.store.insert(key, &value)?; Ok(()) } + /// Returns the list of matching credentials. + /// + /// Does not return credentials that are not discoverable if `check_cred_protect` is set. pub fn filter_credential( &self, rp_id: &str, check_cred_protect: bool, ) -> Result, Ctap2StatusCode> { - Ok(self - .store - .find_all(&Key::Credential { - rp_id: Some(rp_id.into()), - credential_id: None, - user_handle: None, - }) - .filter_map(|(_, entry)| { - debug_assert_eq!(entry.tag, TAG_CREDENTIAL); - let credential = deserialize_credential(entry.data); - debug_assert!(credential.is_some()); - credential + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + let result = iter + .filter_map(|(_, credential)| { + if credential.rp_id == rp_id { + Some(credential) + } else { + None + } }) .filter(|cred| !check_cred_protect || cred.is_discoverable()) - .collect()) + .collect(); + iter_result?; + Ok(result) } + /// Returns the number of credentials. + #[cfg(test)] pub fn count_credentials(&self) -> Result { - Ok(self - .store - .find_all(&Key::Credential { - rp_id: None, - credential_id: None, - user_handle: None, - }) - .count()) + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + let result = iter.count(); + iter_result?; + Ok(result) } + /// Iterates through the credentials. + /// + /// If an error is encountered during iteration, it is written to `result`. + fn iter_credentials<'a>( + &'a self, + result: &'a mut Result<(), Ctap2StatusCode>, + ) -> Result, Ctap2StatusCode> { + IterCredentials::new(&self.store, result) + } + + /// Returns the global signature counter. pub fn global_signature_counter(&self) -> Result { - Ok(self - .store - .find_one(&Key::GlobalSignatureCounter) - .map_or(0, |(_, entry)| { - u32::from_ne_bytes(*array_ref!(entry.data, 0, 4)) - })) - } - - pub fn incr_global_signature_counter(&mut self) -> Result<(), Ctap2StatusCode> { - let mut buffer = [0; core::mem::size_of::()]; - match self.store.find_one(&Key::GlobalSignatureCounter) { - None => { - buffer.copy_from_slice(&1u32.to_ne_bytes()); - self.store.insert(StoreEntry { - tag: GLOBAL_SIGNATURE_COUNTER, - data: &buffer, - sensitive: false, - })?; - } - Some((index, entry)) => { - let value = u32::from_ne_bytes(*array_ref!(entry.data, 0, 4)); - // In hopes that servers handle the wrapping gracefully. - buffer.copy_from_slice(&value.wrapping_add(1).to_ne_bytes()); - self.store.replace( - index, - StoreEntry { - tag: GLOBAL_SIGNATURE_COUNTER, - data: &buffer, - sensitive: false, - }, - )?; - } - } - Ok(()) - } - - pub fn master_keys(&self) -> Result { - let (_, entry) = self.store.find_one(&Key::MasterKeys).unwrap(); - if entry.data.len() != 64 { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); - } - Ok(MasterKeys { - encryption: *array_ref![entry.data, 0, 32], - hmac: *array_ref![entry.data, 32, 32], + Ok(match self.store.find(key::GLOBAL_SIGNATURE_COUNTER)? { + None => 0, + Some(value) => u32::from_ne_bytes(*array_ref!(&value, 0, 4)), }) } - pub fn pin_hash(&self) -> Result, Ctap2StatusCode> { - let data = match self.store.find_one(&Key::PinHash) { - None => return Ok(None), - Some((_, entry)) => entry.data, - }; - if data.len() != PIN_AUTH_LENGTH { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); - } - Ok(Some(*array_ref![data, 0, PIN_AUTH_LENGTH])) + /// Increments the global signature counter. + pub fn incr_global_signature_counter(&mut self) -> Result<(), Ctap2StatusCode> { + let old_value = self.global_signature_counter()?; + // In hopes that servers handle the wrapping gracefully. + let new_value = old_value.wrapping_add(1); + self.store + .insert(key::GLOBAL_SIGNATURE_COUNTER, &new_value.to_ne_bytes())?; + Ok(()) } + /// Returns the master keys. + pub fn master_keys(&self) -> Result { + let master_keys = self + .store + .find(key::MASTER_KEYS)? + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?; + if master_keys.len() != 64 { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + } + Ok(MasterKeys { + encryption: *array_ref![master_keys, 0, 32], + hmac: *array_ref![master_keys, 32, 32], + }) + } + + /// Returns the PIN hash if defined. + pub fn pin_hash(&self) -> Result, Ctap2StatusCode> { + let pin_hash = match self.store.find(key::PIN_HASH)? { + None => return Ok(None), + Some(pin_hash) => pin_hash, + }; + if pin_hash.len() != PIN_AUTH_LENGTH { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + } + Ok(Some(*array_ref![pin_hash, 0, PIN_AUTH_LENGTH])) + } + + /// Sets the PIN hash. + /// + /// If it was already defined, it is updated. pub fn set_pin_hash( &mut self, pin_hash: &[u8; PIN_AUTH_LENGTH], ) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: PIN_HASH, - data: pin_hash, - sensitive: true, - }; - match self.store.find_one(&Key::PinHash) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - } - Ok(()) + Ok(self.store.insert(key::PIN_HASH, pin_hash)?) } + /// Returns the number of remaining PIN retries. pub fn pin_retries(&self) -> Result { - Ok(self - .store - .find_one(&Key::PinRetries) - .map_or(MAX_PIN_RETRIES, |(_, entry)| { - debug_assert_eq!(entry.data.len(), 1); - entry.data[0] - })) + match self.store.find(key::PIN_RETRIES)? { + None => Ok(MAX_PIN_RETRIES), + Some(value) if value.len() == 1 => Ok(value[0]), + _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE), + } } + /// Decrements the number of remaining PIN retries. pub fn decr_pin_retries(&mut self) -> Result<(), Ctap2StatusCode> { - match self.store.find_one(&Key::PinRetries) { - None => { - self.store.insert(StoreEntry { - tag: PIN_RETRIES, - data: &[MAX_PIN_RETRIES.saturating_sub(1)], - sensitive: false, - })?; - } - Some((index, entry)) => { - debug_assert_eq!(entry.data.len(), 1); - if entry.data[0] == 0 { - return Ok(()); - } - let new_value = entry.data[0].saturating_sub(1); - self.store.replace( - index, - StoreEntry { - tag: PIN_RETRIES, - data: &[new_value], - sensitive: false, - }, - )?; - } + let old_value = self.pin_retries()?; + let new_value = old_value.saturating_sub(1); + if new_value != old_value { + self.store.insert(key::PIN_RETRIES, &[new_value])?; } Ok(()) } + /// Resets the number of remaining PIN retries. pub fn reset_pin_retries(&mut self) -> Result<(), Ctap2StatusCode> { - if let Some((index, _)) = self.store.find_one(&Key::PinRetries) { - self.store.delete(index)?; - } - Ok(()) + Ok(self.store.remove(key::PIN_RETRIES)?) } + /// Returns the minimum PIN length. #[cfg(feature = "with_ctap2_1")] pub fn min_pin_length(&self) -> Result { - Ok(self - .store - .find_one(&Key::MinPinLength) - .map_or(DEFAULT_MIN_PIN_LENGTH, |(_, entry)| { - debug_assert_eq!(entry.data.len(), 1); - entry.data[0] - })) + match self.store.find(key::MIN_PIN_LENGTH)? { + None => Ok(DEFAULT_MIN_PIN_LENGTH), + Some(value) if value.len() == 1 => Ok(value[0]), + _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE), + } } + /// Sets the minimum PIN length. #[cfg(feature = "with_ctap2_1")] pub fn set_min_pin_length(&mut self, min_pin_length: u8) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: MIN_PIN_LENGTH, - data: &[min_pin_length], - sensitive: false, - }; - Ok(match self.store.find_one(&Key::MinPinLength) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - }) + Ok(self.store.insert(key::MIN_PIN_LENGTH, &[min_pin_length])?) } + /// TODO: Help from reviewer needed for documentation. #[cfg(feature = "with_ctap2_1")] pub fn _min_pin_length_rp_ids(&self) -> Result, Ctap2StatusCode> { let rp_ids = self .store - .find_one(&Key::MinPinLengthRpIds) - .map_or(Some(_DEFAULT_MIN_PIN_LENGTH_RP_IDS), |(_, entry)| { - _deserialize_min_pin_length_rp_ids(entry.data) + .find(key::_MIN_PIN_LENGTH_RP_IDS)? + .map_or(Some(_DEFAULT_MIN_PIN_LENGTH_RP_IDS), |value| { + _deserialize_min_pin_length_rp_ids(&value) }); debug_assert!(rp_ids.is_some()); Ok(rp_ids.unwrap_or(vec![])) } + /// TODO: Help from reviewer needed for documentation. #[cfg(feature = "with_ctap2_1")] pub fn _set_min_pin_length_rp_ids( &mut self, @@ -507,138 +402,173 @@ impl PersistentStore { if min_pin_length_rp_ids.len() > _MAX_RP_IDS_LENGTH { return Err(Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL); } - let entry = StoreEntry { - tag: MIN_PIN_LENGTH_RP_IDS, - data: &_serialize_min_pin_length_rp_ids(min_pin_length_rp_ids)?, - sensitive: false, - }; - match self.store.find_one(&Key::MinPinLengthRpIds) { - None => { - self.store.insert(entry).unwrap(); - } - Some((index, _)) => { - self.store.replace(index, entry).unwrap(); - } - } - Ok(()) + Ok(self.store.insert( + key::_MIN_PIN_LENGTH_RP_IDS, + &_serialize_min_pin_length_rp_ids(min_pin_length_rp_ids)?, + )?) } + /// Returns the attestation private key if defined. pub fn attestation_private_key( &self, - ) -> Result, Ctap2StatusCode> { - let data = match self.store.find_one(&Key::AttestationPrivateKey) { - None => return Ok(None), - Some((_, entry)) => entry.data, - }; - if data.len() != ATTESTATION_PRIVATE_KEY_LENGTH { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + ) -> Result, Ctap2StatusCode> { + match self.store.find(key::ATTESTATION_PRIVATE_KEY)? { + None => Ok(None), + Some(key) if key.len() != ATTESTATION_PRIVATE_KEY_LENGTH => { + Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE) + } + Some(key) => Ok(Some(*array_ref![key, 0, ATTESTATION_PRIVATE_KEY_LENGTH])), } - Ok(Some(array_ref!(data, 0, ATTESTATION_PRIVATE_KEY_LENGTH))) } + /// Sets the attestation private key. + /// + /// If it is already defined, it is overwritten. pub fn set_attestation_private_key( &mut self, attestation_private_key: &[u8; ATTESTATION_PRIVATE_KEY_LENGTH], ) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: ATTESTATION_PRIVATE_KEY, - data: attestation_private_key, - sensitive: false, - }; - match self.store.find_one(&Key::AttestationPrivateKey) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - } - Ok(()) + Ok(self + .store + .insert(key::ATTESTATION_PRIVATE_KEY, attestation_private_key)?) } + /// Returns the attestation certificate if defined. pub fn attestation_certificate(&self) -> Result>, Ctap2StatusCode> { - let data = match self.store.find_one(&Key::AttestationCertificate) { - None => return Ok(None), - Some((_, entry)) => entry.data, - }; - Ok(Some(data.to_vec())) + Ok(self.store.find(key::ATTESTATION_CERTIFICATE)?) } + /// Sets the attestation certificate. + /// + /// If it is already defined, it is overwritten. pub fn set_attestation_certificate( &mut self, attestation_certificate: &[u8], ) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: ATTESTATION_CERTIFICATE, - data: attestation_certificate, - sensitive: false, - }; - match self.store.find_one(&Key::AttestationCertificate) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - } - Ok(()) - } - - pub fn aaguid(&self) -> Result<[u8; AAGUID_LENGTH], Ctap2StatusCode> { - let (_, entry) = self + Ok(self .store - .find_one(&Key::Aaguid) - .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?; - let data = entry.data; - if data.len() != AAGUID_LENGTH { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); - } - Ok(*array_ref![data, 0, AAGUID_LENGTH]) + .insert(key::ATTESTATION_CERTIFICATE, attestation_certificate)?) } + /// Returns the AAGUID. + pub fn aaguid(&self) -> Result<[u8; AAGUID_LENGTH], Ctap2StatusCode> { + let aaguid = self + .store + .find(key::AAGUID)? + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?; + if aaguid.len() != AAGUID_LENGTH { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + } + Ok(*array_ref![aaguid, 0, AAGUID_LENGTH]) + } + + /// Sets the AAGUID. + /// + /// If it is already defined, it is overwritten. pub fn set_aaguid(&mut self, aaguid: &[u8; AAGUID_LENGTH]) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: AAGUID, - data: aaguid, - sensitive: false, - }; - match self.store.find_one(&Key::Aaguid) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - } - Ok(()) + Ok(self.store.insert(key::AAGUID, aaguid)?) } + /// Resets the store as for a CTAP reset. + /// + /// In particular persistent entries are not reset. pub fn reset(&mut self, rng: &mut impl Rng256) -> Result<(), Ctap2StatusCode> { - loop { - let index = { - let mut iter = self.store.iter().filter(|(_, entry)| should_reset(entry)); - match iter.next() { - None => break, - Some((index, _)) => index, - } - }; - self.store.delete(index)?; - } - self.init(rng); + self.store.clear(key::NUM_PERSISTENT_KEYS)?; + self.init(rng)?; Ok(()) } } -impl From for Ctap2StatusCode { - fn from(error: StoreError) -> Ctap2StatusCode { +impl From for Ctap2StatusCode { + fn from(error: persistent_store::StoreError) -> Ctap2StatusCode { + use persistent_store::StoreError::*; match error { - StoreError::StoreFull => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, - StoreError::InvalidTag => unreachable!(), - StoreError::InvalidPrecondition => unreachable!(), + // This error is expected. The store is full. + NoCapacity => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, + // This error is expected. The flash is out of life. + NoLifetime => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, + // This error is expected if we don't satisfy the store preconditions. For example we + // try to store a credential which is too long. + InvalidArgument => Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR, + // This error is not expected. The storage has been tempered with. We could erase the + // storage. + InvalidStorage => Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE, + // This error is not expected. The kernel is failing our syscalls. + StorageError => Ctap2StatusCode::CTAP1_ERR_OTHER, } } } -fn should_reset(entry: &StoreEntry<'_>) -> bool { - match entry.tag { - ATTESTATION_PRIVATE_KEY | ATTESTATION_CERTIFICATE | AAGUID => false, - _ => true, +/// Iterator for credentials. +struct IterCredentials<'a> { + /// The store being iterated. + store: &'a persistent_store::Store, + + /// The store iterator. + iter: persistent_store::StoreIter<'a, Storage>, + + /// The iteration result. + /// + /// It starts as success and gets written at most once with an error if something fails. The + /// iteration stops as soon as an error is encountered. + result: &'a mut Result<(), Ctap2StatusCode>, +} + +impl<'a> IterCredentials<'a> { + /// Creates a credential iterator. + fn new( + store: &'a persistent_store::Store, + result: &'a mut Result<(), Ctap2StatusCode>, + ) -> Result, Ctap2StatusCode> { + let iter = store.iter()?; + Ok(IterCredentials { + store, + iter, + result, + }) + } + + /// Marks the iteration as failed if the content is absent. + /// + /// For convenience, the function takes and returns ownership instead of taking a shared + /// reference and returning nothing. This permits to use it in both expressions and statements + /// instead of statements only. + fn unwrap(&mut self, x: Option) -> Option { + if x.is_none() { + *self.result = Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + } + x } } +impl<'a> Iterator for IterCredentials<'a> { + type Item = (usize, PublicKeyCredentialSource); + + fn next(&mut self) -> Option<(usize, PublicKeyCredentialSource)> { + if self.result.is_err() { + return None; + } + while let Some(next) = self.iter.next() { + let handle = self.unwrap(next.ok())?; + let key = handle.get_key(); + if !key::CREDENTIALS.contains(&key) { + continue; + } + let value = self.unwrap(handle.get_value(&self.store).ok())?; + let credential = self.unwrap(deserialize_credential(&value))?; + return Some((key, credential)); + } + None + } +} + +/// Deserializes a credential from storage representation. fn deserialize_credential(data: &[u8]) -> Option { let cbor = cbor::read(data).ok()?; cbor.try_into().ok() } +/// Serializes a credential to storage representation. fn serialize_credential(credential: PublicKeyCredentialSource) -> Result, Ctap2StatusCode> { let mut data = Vec::new(); if cbor::write(credential.into(), &mut data) { @@ -648,6 +578,7 @@ fn serialize_credential(credential: PublicKeyCredentialSource) -> Result } } +/// TODO: Help from reviewer needed for documentation. #[cfg(feature = "with_ctap2_1")] fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option> { let cbor = cbor::read(data).ok()?; @@ -659,6 +590,7 @@ fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option> { .ok() } +/// TODO: Help from reviewer needed for documentation. #[cfg(feature = "with_ctap2_1")] fn _serialize_min_pin_length_rp_ids(rp_ids: Vec) -> Result, Ctap2StatusCode> { let mut data = Vec::new(); @@ -693,28 +625,6 @@ mod test { } } - #[test] - fn format_overhead() { - // nRF52840 NVMC - const WORD_SIZE: usize = 4; - const PAGE_SIZE: usize = 0x1000; - const NUM_PAGES: usize = 100; - let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); - let options = embedded_flash::BufferOptions { - word_size: WORD_SIZE, - page_size: PAGE_SIZE, - max_word_writes: 2, - max_page_erases: 10000, - strict_write: true, - }; - let storage = Storage::new(store, options); - let store = embedded_flash::Store::new(storage, Config).unwrap(); - // We can replace 3 bytes with minimal overhead. - assert_eq!(store.replace_len(false, 0), 2 * WORD_SIZE); - assert_eq!(store.replace_len(false, 3), 3 * WORD_SIZE); - assert_eq!(store.replace_len(false, 4), 3 * WORD_SIZE); - } - #[test] fn test_store() { let mut rng = ThreadRng256 {}; @@ -974,21 +884,21 @@ mod test { let mut persistent_store = PersistentStore::new(&mut rng); // The pin retries is initially at the maximum. - assert_eq!(persistent_store.pin_retries().unwrap(), MAX_PIN_RETRIES); + assert_eq!(persistent_store.pin_retries(), Ok(MAX_PIN_RETRIES)); // Decrementing the pin retries decrements the pin retries. for pin_retries in (0..MAX_PIN_RETRIES).rev() { persistent_store.decr_pin_retries().unwrap(); - assert_eq!(persistent_store.pin_retries().unwrap(), pin_retries); + assert_eq!(persistent_store.pin_retries(), Ok(pin_retries)); } // Decrementing the pin retries after zero does not modify the pin retries. persistent_store.decr_pin_retries().unwrap(); - assert_eq!(persistent_store.pin_retries().unwrap(), 0); + assert_eq!(persistent_store.pin_retries(), Ok(0)); // Resetting the pin retries resets the pin retries. persistent_store.reset_pin_retries().unwrap(); - assert_eq!(persistent_store.pin_retries().unwrap(), MAX_PIN_RETRIES); + assert_eq!(persistent_store.pin_retries(), Ok(MAX_PIN_RETRIES)); } #[test] @@ -1018,7 +928,7 @@ mod test { // The persistent keys stay initialized and preserve their value after a reset. persistent_store.reset(&mut rng).unwrap(); assert_eq!( - persistent_store.attestation_private_key().unwrap().unwrap(), + &persistent_store.attestation_private_key().unwrap().unwrap(), key_material::ATTESTATION_PRIVATE_KEY ); assert_eq!( diff --git a/src/ctap/storage/key.rs b/src/ctap/storage/key.rs new file mode 100644 index 0000000..9796d5a --- /dev/null +++ b/src/ctap/storage/key.rs @@ -0,0 +1,135 @@ +// Copyright 2019-2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Number of keys that persist the CTAP reset command. +pub const NUM_PERSISTENT_KEYS: usize = 20; + +/// Defines a key given its name and value or range of values. +macro_rules! make_key { + ($(#[$doc: meta])* $name: ident = $key: literal..$end: literal) => { + $(#[$doc])* pub const $name: core::ops::Range = $key..$end; + }; + ($(#[$doc: meta])* $name: ident = $key: literal) => { + $(#[$doc])* pub const $name: usize = $key; + }; +} + +/// Returns the range of values of a key given its value description. +#[cfg(test)] +macro_rules! make_range { + ($key: literal..$end: literal) => { + $key..$end + }; + ($key: literal) => { + $key..$key + 1 + }; +} + +/// Helper to define keys as a partial partition of a range. +macro_rules! make_partition { + ($range: expr, + $( + $(#[$doc: meta])* + $name: ident = $key: literal $(.. $end: literal)?; + )*) => { + $( + make_key!($(#[$doc])* $name = $key $(.. $end)?); + )* + #[cfg(test)] + const KEY_RANGE: core::ops::Range = $range; + #[cfg(test)] + const ALL_KEYS: &[core::ops::Range] = &[$(make_range!($key $(.. $end)?)),*]; + }; + } + +make_partition! { + // We reserve 0 and 2048+ for possible migration purposes. We add persistent entries starting + // from 1 and going up. We add non-persistent entries starting from 2047 and going down. This + // way, we don't commit to a fixed number of persistent keys. + 1..2048, + + // WARNING: Keys should not be deleted but prefixed with `_` to avoid accidentally reusing them. + + /// The attestation private key. + ATTESTATION_PRIVATE_KEY = 1; + + /// The attestation certificate. + ATTESTATION_CERTIFICATE = 2; + + /// The aaguid. + AAGUID = 3; + + // This is the persistent key limit: + // - When adding a (persistent) key above this message, make sure its value is smaller than + // NUM_PERSISTENT_KEYS. + // - When adding a (non-persistent) key below this message, make sure its value is bigger or + // equal than NUM_PERSISTENT_KEYS. + + /// The credentials. + /// + /// Depending on `MAX_SUPPORTED_RESIDENTIAL_KEYS`, only a prefix of those keys is used. Each + /// board may configure `MAX_SUPPORTED_RESIDENTIAL_KEYS` depending on the storage size. + CREDENTIALS = 1700..2000; + + /// TODO: Help from reviewer needed for documentation. + _MIN_PIN_LENGTH_RP_IDS = 2042; + + /// The minimum PIN length. + #[cfg(feature = "with_ctap2_1")] + MIN_PIN_LENGTH = 2043; + + /// The number of PIN retries. + /// + /// If the entry is absent, the number of PIN retries is `MAX_PIN_RETRIES`. + PIN_RETRIES = 2044; + + /// The PIN hash. + /// + /// If the entry is absent, there is no PIN set. + PIN_HASH = 2045; + + /// The encryption and hmac keys. + /// + /// This entry is always present. It is generated at startup if absent. This is not a persistent + /// key because its value should change after a CTAP reset. + MASTER_KEYS = 2046; + + /// The global signature counter. + /// + /// If the entry is absent, the counter is 0. + GLOBAL_SIGNATURE_COUNTER = 2047; +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn enough_credentials() { + use super::super::MAX_SUPPORTED_RESIDENTIAL_KEYS; + assert!(MAX_SUPPORTED_RESIDENTIAL_KEYS <= CREDENTIALS.end - CREDENTIALS.start); + } + + #[test] + fn keys_are_disjoint() { + // Check that keys are in the range. + for keys in ALL_KEYS { + assert!(KEY_RANGE.start <= keys.start && keys.end <= KEY_RANGE.end); + } + // Check that keys are assigned at most once, essentially partitioning the range. + for key in KEY_RANGE { + assert!(ALL_KEYS.iter().filter(|keys| keys.contains(&key)).count() <= 1); + } + } +} diff --git a/src/embedded_flash/buffer.rs b/src/embedded_flash/buffer.rs deleted file mode 100644 index 0e7171a..0000000 --- a/src/embedded_flash/buffer.rs +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{Index, Storage, StorageError, StorageResult}; -use alloc::boxed::Box; -use alloc::vec; - -pub struct BufferStorage { - storage: Box<[u8]>, - options: BufferOptions, - word_writes: Box<[usize]>, - page_erases: Box<[usize]>, - snapshot: Snapshot, -} - -#[derive(Copy, Clone, Debug)] -pub struct BufferOptions { - /// Size of a word in bytes. - pub word_size: usize, - - /// Size of a page in bytes. - pub page_size: usize, - - /// How many times a word can be written between page erasures - pub max_word_writes: usize, - - /// How many times a page can be erased. - pub max_page_erases: usize, - - /// Bits cannot be written from 0 to 1. - pub strict_write: bool, -} - -impl BufferStorage { - /// Creates a fake embedded flash using a buffer. - /// - /// This implementation checks that no words are written more than `max_word_writes` between - /// page erasures and than no pages are erased more than `max_page_erases`. If `strict_write` is - /// true, it also checks that no bits are written from 0 to 1. It also permits to take snapshots - /// of the storage during write and erase operations (although words would still be written or - /// erased completely). - /// - /// # Panics - /// - /// The following preconditions must hold: - /// - `options.word_size` must be a power of two. - /// - `options.page_size` must be a power of two. - /// - `options.page_size` must be word-aligned. - /// - `storage.len()` must be page-aligned. - pub fn new(storage: Box<[u8]>, options: BufferOptions) -> BufferStorage { - assert!(options.word_size.is_power_of_two()); - assert!(options.page_size.is_power_of_two()); - let num_words = storage.len() / options.word_size; - let num_pages = storage.len() / options.page_size; - let buffer = BufferStorage { - storage, - options, - word_writes: vec![0; num_words].into_boxed_slice(), - page_erases: vec![0; num_pages].into_boxed_slice(), - snapshot: Snapshot::Ready, - }; - assert!(buffer.is_word_aligned(buffer.options.page_size)); - assert!(buffer.is_page_aligned(buffer.storage.len())); - buffer - } - - /// Takes a snapshot of the storage after a given amount of word operations. - /// - /// Each time a word is written or erased, the delay is decremented if positive. Otherwise, a - /// snapshot is taken before the operation is executed. - /// - /// # Panics - /// - /// Panics if a snapshot has been armed and not examined. - pub fn arm_snapshot(&mut self, delay: usize) { - self.snapshot.arm(delay); - } - - /// Unarms and returns the snapshot or the delay remaining. - /// - /// # Panics - /// - /// Panics if a snapshot was not armed. - pub fn get_snapshot(&mut self) -> Result, usize> { - self.snapshot.get() - } - - /// Takes a snapshot of the storage. - pub fn take_snapshot(&self) -> Box<[u8]> { - self.storage.clone() - } - - /// Returns the storage. - pub fn get_storage(self) -> Box<[u8]> { - self.storage - } - - fn is_word_aligned(&self, x: usize) -> bool { - x & (self.options.word_size - 1) == 0 - } - - fn is_page_aligned(&self, x: usize) -> bool { - x & (self.options.page_size - 1) == 0 - } - - /// Writes a slice to the storage. - /// - /// The slice `value` is written to `index`. The `erase` boolean specifies whether this is an - /// erase operation or a write operation which matters for the checks and updating the shadow - /// storage. This also takes a snapshot of the storage if a snapshot was armed and the delay has - /// elapsed. - /// - /// The following preconditions should hold: - /// - `index` is word-aligned. - /// - `value.len()` is word-aligned. - /// - /// The following checks are performed: - /// - The region of length `value.len()` starting at `index` fits in a storage page. - /// - A word is not written more than `max_word_writes`. - /// - A page is not erased more than `max_page_erases`. - /// - The new word only switches 1s to 0s (only if `strict_write` is set). - fn update_storage(&mut self, index: Index, value: &[u8], erase: bool) -> StorageResult<()> { - debug_assert!(self.is_word_aligned(index.byte) && self.is_word_aligned(value.len())); - let dst = index.range(value.len(), self)?.step_by(self.word_size()); - let src = value.chunks(self.word_size()); - // Check and update page shadow. - if erase { - let page = index.page; - assert!(self.page_erases[page] < self.max_page_erases()); - self.page_erases[page] += 1; - } - for (byte, val) in dst.zip(src) { - let range = byte..byte + self.word_size(); - // The driver doesn't write identical words. - if &self.storage[range.clone()] == val { - continue; - } - // Check and update word shadow. - let word = byte / self.word_size(); - if erase { - self.word_writes[word] = 0; - } else { - assert!(self.word_writes[word] < self.max_word_writes()); - self.word_writes[word] += 1; - } - // Check strict write. - if !erase && self.options.strict_write { - for (byte, &val) in range.clone().zip(val) { - assert_eq!(self.storage[byte] & val, val); - } - } - // Take snapshot if armed and delay expired. - self.snapshot.take(&self.storage); - // Write storage - self.storage[range].copy_from_slice(val); - } - Ok(()) - } -} - -impl Storage for BufferStorage { - fn word_size(&self) -> usize { - self.options.word_size - } - - fn page_size(&self) -> usize { - self.options.page_size - } - - fn num_pages(&self) -> usize { - self.storage.len() / self.options.page_size - } - - fn max_word_writes(&self) -> usize { - self.options.max_word_writes - } - - fn max_page_erases(&self) -> usize { - self.options.max_page_erases - } - - fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]> { - Ok(&self.storage[index.range(length, self)?]) - } - - fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()> { - if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) { - return Err(StorageError::NotAligned); - } - self.update_storage(index, value, false) - } - - fn erase_page(&mut self, page: usize) -> StorageResult<()> { - let index = Index { page, byte: 0 }; - let value = vec![0xff; self.page_size()]; - self.update_storage(index, &value, true) - } -} - -// Controls when a snapshot of the storage is taken. -// -// This can be used to simulate power-offs while the device is writing to the storage or erasing a -// page in the storage. -enum Snapshot { - // Mutable word operations have normal behavior. - Ready, - // If the delay is positive, mutable word operations decrement it. If the count is zero, mutable - // word operations take a snapshot of the storage. - Armed { delay: usize }, - // Mutable word operations have normal behavior. - Taken { storage: Box<[u8]> }, -} - -impl Snapshot { - fn arm(&mut self, delay: usize) { - match self { - Snapshot::Ready => *self = Snapshot::Armed { delay }, - _ => panic!(), - } - } - - fn get(&mut self) -> Result, usize> { - let mut snapshot = Snapshot::Ready; - core::mem::swap(self, &mut snapshot); - match snapshot { - Snapshot::Armed { delay } => Err(delay), - Snapshot::Taken { storage } => Ok(storage), - _ => panic!(), - } - } - - fn take(&mut self, storage: &[u8]) { - if let Snapshot::Armed { delay } = self { - if *delay == 0 { - let storage = storage.to_vec().into_boxed_slice(); - *self = Snapshot::Taken { storage }; - } else { - *delay -= 1; - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - const NUM_PAGES: usize = 2; - const OPTIONS: BufferOptions = BufferOptions { - word_size: 4, - page_size: 16, - max_word_writes: 2, - max_page_erases: 3, - strict_write: true, - }; - // Those words are decreasing bit patterns. Bits are only changed from 1 to 0 and at last one - // bit is changed. - const BLANK_WORD: &[u8] = &[0xff, 0xff, 0xff, 0xff]; - const FIRST_WORD: &[u8] = &[0xee, 0xdd, 0xbb, 0x77]; - const SECOND_WORD: &[u8] = &[0xca, 0xc9, 0xa9, 0x65]; - const THIRD_WORD: &[u8] = &[0x88, 0x88, 0x88, 0x44]; - - fn new_storage() -> Box<[u8]> { - vec![0xff; NUM_PAGES * OPTIONS.page_size].into_boxed_slice() - } - - #[test] - fn words_are_decreasing() { - fn assert_is_decreasing(prev: &[u8], next: &[u8]) { - for (&prev, &next) in prev.iter().zip(next.iter()) { - assert_eq!(prev & next, next); - assert!(prev != next); - } - } - assert_is_decreasing(BLANK_WORD, FIRST_WORD); - assert_is_decreasing(FIRST_WORD, SECOND_WORD); - assert_is_decreasing(SECOND_WORD, THIRD_WORD); - } - - #[test] - fn options_ok() { - let buffer = BufferStorage::new(new_storage(), OPTIONS); - assert_eq!(buffer.word_size(), OPTIONS.word_size); - assert_eq!(buffer.page_size(), OPTIONS.page_size); - assert_eq!(buffer.num_pages(), NUM_PAGES); - assert_eq!(buffer.max_word_writes(), OPTIONS.max_word_writes); - assert_eq!(buffer.max_page_erases(), OPTIONS.max_page_erases); - } - - #[test] - fn read_write_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let next_index = Index { page: 0, byte: 4 }; - assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD); - buffer.write_slice(index, FIRST_WORD).unwrap(); - assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD); - assert_eq!(buffer.read_slice(next_index, 4).unwrap(), BLANK_WORD); - } - - #[test] - fn erase_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let other_index = Index { page: 1, byte: 0 }; - buffer.write_slice(index, FIRST_WORD).unwrap(); - buffer.write_slice(other_index, FIRST_WORD).unwrap(); - assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD); - assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD); - buffer.erase_page(0).unwrap(); - assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD); - assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD); - } - - #[test] - fn invalid_range() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 12 }; - let half_index = Index { page: 0, byte: 14 }; - let over_index = Index { page: 0, byte: 16 }; - let bad_page = Index { page: 2, byte: 0 }; - - // Reading a word in the storage is ok. - assert!(buffer.read_slice(index, 4).is_ok()); - // Reading a half-word in the storage is ok. - assert!(buffer.read_slice(half_index, 2).is_ok()); - // Reading even a single byte outside a page is not ok. - assert!(buffer.read_slice(over_index, 1).is_err()); - // But reading an empty slice just after a page is ok. - assert!(buffer.read_slice(over_index, 0).is_ok()); - // Reading even an empty slice outside the storage is not ok. - assert!(buffer.read_slice(bad_page, 0).is_err()); - - // Writing a word in the storage is ok. - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - // Writing an unaligned word is not ok. - assert!(buffer.write_slice(half_index, FIRST_WORD).is_err()); - // Writing a word outside a page is not ok. - assert!(buffer.write_slice(over_index, FIRST_WORD).is_err()); - // But writing an empty slice just after a page is ok. - assert!(buffer.write_slice(over_index, &[]).is_ok()); - // Writing even an empty slice outside the storage is not ok. - assert!(buffer.write_slice(bad_page, &[]).is_err()); - - // Only pages in the storage can be erased. - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(2).is_err()); - } - - #[test] - fn write_twice_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 4 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - } - - #[test] - fn write_twice_and_once_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let next_index = Index { page: 0, byte: 4 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - assert!(buffer.write_slice(next_index, THIRD_WORD).is_ok()); - } - - #[test] - #[should_panic] - fn write_three_times_panics() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 4 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - let _ = buffer.write_slice(index, THIRD_WORD); - } - - #[test] - fn write_twice_then_once_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - } - - #[test] - fn erase_three_times_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - } - - #[test] - fn erase_three_times_and_once_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(1).is_ok()); - } - - #[test] - #[should_panic] - fn erase_four_times_panics() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - let _ = buffer.erase_page(0).is_ok(); - } - - #[test] - #[should_panic] - fn switch_zero_to_one_panics() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - let _ = buffer.write_slice(index, FIRST_WORD); - } - - #[test] - fn get_storage_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 4 }; - buffer.write_slice(index, FIRST_WORD).unwrap(); - let storage = buffer.get_storage(); - assert_eq!(&storage[..4], BLANK_WORD); - assert_eq!(&storage[4..8], FIRST_WORD); - } - - #[test] - fn snapshot_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let value = [FIRST_WORD, SECOND_WORD].concat(); - buffer.arm_snapshot(1); - buffer.write_slice(index, &value).unwrap(); - let storage = buffer.get_snapshot().unwrap(); - assert_eq!(&storage[..8], &[FIRST_WORD, BLANK_WORD].concat()[..]); - let storage = buffer.take_snapshot(); - assert_eq!(&storage[..8], &value[..]); - } -} diff --git a/src/embedded_flash/mod.rs b/src/embedded_flash/mod.rs index 05407c0..b16504b 100644 --- a/src/embedded_flash/mod.rs +++ b/src/embedded_flash/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2019-2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,16 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(any(test, feature = "ram_storage"))] -mod buffer; -mod storage; -mod store; #[cfg(not(any(test, feature = "ram_storage")))] mod syscall; -#[cfg(any(test, feature = "ram_storage"))] -pub use self::buffer::{BufferOptions, BufferStorage}; -pub use self::storage::{Index, Storage, StorageError, StorageResult}; -pub use self::store::{Store, StoreConfig, StoreEntry, StoreError, StoreIndex}; #[cfg(not(any(test, feature = "ram_storage")))] pub use self::syscall::SyscallStorage; diff --git a/src/embedded_flash/storage.rs b/src/embedded_flash/storage.rs deleted file mode 100644 index fe87ac4..0000000 --- a/src/embedded_flash/storage.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[derive(Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Debug))] -pub struct Index { - pub page: usize, - pub byte: usize, -} - -#[derive(Debug)] -pub enum StorageError { - BadFlash, - NotAligned, - OutOfBounds, - KernelError { code: isize }, -} - -pub type StorageResult = Result; - -/// Abstraction for embedded flash storage. -pub trait Storage { - /// Returns the size of a word in bytes. - fn word_size(&self) -> usize; - - /// Returns the size of a page in bytes. - fn page_size(&self) -> usize; - - /// Returns the number of pages in the storage. - fn num_pages(&self) -> usize; - - /// Returns how many times a word can be written between page erasures. - fn max_word_writes(&self) -> usize; - - /// Returns how many times a page can be erased in the lifetime of the flash. - fn max_page_erases(&self) -> usize; - - /// Reads a slice from the storage. - /// - /// The slice does not need to be word-aligned. - /// - /// # Errors - /// - /// The `index` must designate `length` bytes in the storage. - fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]>; - - /// Writes a word-aligned slice to the storage. - /// - /// The written words should not have been written too many times since last page erasure. - /// - /// # Errors - /// - /// The following preconditions must hold: - /// - `index` must be word-aligned. - /// - `value.len()` must be a multiple of the word size. - /// - `index` must designate `value.len()` bytes in the storage. - /// - `value` must be in memory until [read-only allow][tock_1274] is resolved. - /// - /// [tock_1274]: https://github.com/tock/tock/issues/1274. - fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()>; - - /// Erases a page of the storage. - /// - /// # Errors - /// - /// The `page` must be in the storage. - fn erase_page(&mut self, page: usize) -> StorageResult<()>; -} - -impl Index { - /// Returns whether a slice fits in a storage page. - fn is_valid(self, length: usize, storage: &impl Storage) -> bool { - self.page < storage.num_pages() - && storage - .page_size() - .checked_sub(length) - .map(|limit| self.byte <= limit) - .unwrap_or(false) - } - - /// Returns the range of a valid slice. - /// - /// The range starts at `self` with `length` bytes. - pub fn range( - self, - length: usize, - storage: &impl Storage, - ) -> StorageResult> { - if self.is_valid(length, storage) { - let start = self.page * storage.page_size() + self.byte; - Ok(start..start + length) - } else { - Err(StorageError::OutOfBounds) - } - } -} diff --git a/src/embedded_flash/store/bitfield.rs b/src/embedded_flash/store/bitfield.rs deleted file mode 100644 index 797c78b..0000000 --- a/src/embedded_flash/store/bitfield.rs +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/// Defines a consecutive sequence of bits. -#[derive(Copy, Clone)] -pub struct BitRange { - /// The first bit of the sequence. - pub start: usize, - - /// The length in bits of the sequence. - pub length: usize, -} - -impl BitRange { - /// Returns the first bit following a bit range. - pub fn end(self) -> usize { - self.start + self.length - } -} - -/// Defines a consecutive sequence of bytes. -/// -/// The bits in those bytes are ignored which essentially creates a gap in a sequence of bits. The -/// gap is necessarily at byte boundaries. This is used to ignore the user data in an entry -/// essentially providing a view of the entry information (header and footer). -#[derive(Copy, Clone)] -pub struct ByteGap { - pub start: usize, - pub length: usize, -} - -/// Empty gap. All bits count. -pub const NO_GAP: ByteGap = ByteGap { - start: 0, - length: 0, -}; - -impl ByteGap { - /// Translates a bit to skip the gap. - fn shift(self, bit: usize) -> usize { - if bit < 8 * self.start { - bit - } else { - bit + 8 * self.length - } - } - - /// Returns the slice of `data` corresponding to the gap. - pub fn slice(self, data: &[u8]) -> &[u8] { - &data[self.start..self.start + self.length] - } -} - -/// Returns whether a bit is set in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. -pub fn is_zero(bit: usize, data: &[u8], gap: ByteGap) -> bool { - let bit = gap.shift(bit); - debug_assert!(bit < 8 * data.len()); - data[bit / 8] & (1 << (bit % 8)) == 0 -} - -/// Sets a bit to zero in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. -pub fn set_zero(bit: usize, data: &mut [u8], gap: ByteGap) { - let bit = gap.shift(bit); - debug_assert!(bit < 8 * data.len()); - data[bit / 8] &= !(1 << (bit % 8)); -} - -/// Returns a little-endian value in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by -/// `range`. The value must fit in a `usize`. -pub fn get_range(range: BitRange, data: &[u8], gap: ByteGap) -> usize { - debug_assert!(range.length <= 8 * core::mem::size_of::()); - let mut result = 0; - for i in 0..range.length { - if !is_zero(range.start + i, data, gap) { - result |= 1 << i; - } - } - result -} - -/// Sets a little-endian value in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by -/// `range`. The bits set to 1 in `value` must also be set to `1` in the sequence of bits. -pub fn set_range(range: BitRange, data: &mut [u8], gap: ByteGap, value: usize) { - debug_assert!(range.length == 8 * core::mem::size_of::() || value < 1 << range.length); - for i in 0..range.length { - if value & 1 << i == 0 { - set_zero(range.start + i, data, gap); - } - } -} - -/// Tests the `is_zero` and `set_zero` pair of functions. -#[test] -fn zero_ok() { - const GAP: ByteGap = ByteGap { - start: 2, - length: 1, - }; - for i in 0..24 { - assert!(!is_zero(i, &[0xffu8, 0xff, 0x00, 0xff] as &[u8], GAP)); - } - // Tests reading and setting a bit. The result should have all bits set to 1 except for the bit - // to test and the gap. - fn test(bit: usize, result: &[u8]) { - assert!(is_zero(bit, result, GAP)); - let mut data = vec![0xff; result.len()]; - // Set the gap bits to 0. - for i in 0..GAP.length { - data[GAP.start + i] = 0x00; - } - set_zero(bit, &mut data, GAP); - assert_eq!(data, result); - } - test(0, &[0xfe, 0xff, 0x00, 0xff]); - test(1, &[0xfd, 0xff, 0x00, 0xff]); - test(2, &[0xfb, 0xff, 0x00, 0xff]); - test(7, &[0x7f, 0xff, 0x00, 0xff]); - test(8, &[0xff, 0xfe, 0x00, 0xff]); - test(15, &[0xff, 0x7f, 0x00, 0xff]); - test(16, &[0xff, 0xff, 0x00, 0xfe]); - test(17, &[0xff, 0xff, 0x00, 0xfd]); - test(23, &[0xff, 0xff, 0x00, 0x7f]); -} - -/// Tests the `get_range` and `set_range` pair of functions. -#[test] -fn range_ok() { - // Tests reading and setting a range. The result should have all bits set to 1 except for the - // range to test and the gap. - fn test(start: usize, length: usize, value: usize, result: &[u8], gap: ByteGap) { - let range = BitRange { start, length }; - assert_eq!(get_range(range, result, gap), value); - let mut data = vec![0xff; result.len()]; - for i in 0..gap.length { - data[gap.start + i] = 0x00; - } - set_range(range, &mut data, gap, value); - assert_eq!(data, result); - } - test(0, 8, 42, &[42], NO_GAP); - test(3, 12, 0b11_0101, &[0b1010_1111, 0b1000_0001], NO_GAP); - test(0, 16, 0x1234, &[0x34, 0x12], NO_GAP); - test(4, 16, 0x1234, &[0x4f, 0x23, 0xf1], NO_GAP); - let mut gap = ByteGap { - start: 1, - length: 1, - }; - test(3, 12, 0b11_0101, &[0b1010_1111, 0x00, 0b1000_0001], gap); - gap.length = 2; - test(0, 16, 0x1234, &[0x34, 0x00, 0x00, 0x12], gap); - gap.start = 2; - gap.length = 1; - test(4, 16, 0x1234, &[0x4f, 0x23, 0x00, 0xf1], gap); -} diff --git a/src/embedded_flash/store/format.rs b/src/embedded_flash/store/format.rs deleted file mode 100644 index 03787cf..0000000 --- a/src/embedded_flash/store/format.rs +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::super::{Index, Storage}; -use super::{bitfield, StoreConfig, StoreEntry, StoreError}; -use alloc::vec; -use alloc::vec::Vec; - -/// Whether a user entry is a replace entry. -pub enum IsReplace { - /// This is a replace entry. - Replace, - - /// This is an insert entry. - Insert, -} - -/// Helpers to parse the store format. -/// -/// See the store module-level documentation for information about the format. -pub struct Format { - pub word_size: usize, - pub page_size: usize, - pub num_pages: usize, - pub max_page_erases: usize, - pub num_tags: usize, - - /// Whether an entry is present. - /// - /// - 0 for entries (user entries or internal entries). - /// - 1 for free space until the end of the page. - present_bit: usize, - - /// Whether an entry is deleted. - /// - /// - 0 for deleted entries. - /// - 1 for alive entries. - deleted_bit: usize, - - /// Whether an entry is internal. - /// - /// - 0 for internal entries. - /// - 1 for user entries. - internal_bit: usize, - - /// Whether a user entry is a replace entry. - /// - /// - 0 for replace entries. - /// - 1 for insert entries. - replace_bit: usize, - - /// Whether a user entry has sensitive data. - /// - /// - 0 for sensitive data. - /// - 1 for non-sensitive data. - /// - /// When a user entry with sensitive data is deleted, the data is overwritten with zeroes. This - /// feature is subject to the same guarantees as all other features of the store, in particular - /// deleting a sensitive entry is atomic. See the store module-level documentation for more - /// information. - sensitive_bit: usize, - - /// The data length of a user entry. - length_range: bitfield::BitRange, - - /// The tag of a user entry. - tag_range: bitfield::BitRange, - - /// The page index of a replace entry. - replace_page_range: bitfield::BitRange, - - /// The byte index of a replace entry. - replace_byte_range: bitfield::BitRange, - - /// The index of the page to erase. - /// - /// This is only present for internal entries. - old_page_range: bitfield::BitRange, - - /// The current erase count of the page to erase. - /// - /// This is only present for internal entries. - saved_erase_count_range: bitfield::BitRange, - - /// Whether a page is initialized. - /// - /// - 0 for initialized pages. - /// - 1 for uninitialized pages. - initialized_bit: usize, - - /// The erase count of a page. - erase_count_range: bitfield::BitRange, - - /// Whether a page is being compacted. - /// - /// - 0 for pages being compacted. - /// - 1 otherwise. - compacting_bit: usize, - - /// The page index to which a page is being compacted. - new_page_range: bitfield::BitRange, -} - -impl Format { - /// Returns a helper to parse the store format for a given storage and config. - /// - /// # Errors - /// - /// Returns `None` if any of the following conditions does not hold: - /// - The word size must be a power of two. - /// - The page size must be a power of two. - /// - There should be at least 2 pages in the storage. - /// - It should be possible to write a word at least twice. - /// - It should be possible to erase a page at least once. - /// - There should be at least 1 tag. - pub fn new(storage: &S, config: &C) -> Option { - let word_size = storage.word_size(); - let page_size = storage.page_size(); - let num_pages = storage.num_pages(); - let max_word_writes = storage.max_word_writes(); - let max_page_erases = storage.max_page_erases(); - let num_tags = config.num_tags(); - if !(word_size.is_power_of_two() - && page_size.is_power_of_two() - && num_pages > 1 - && max_word_writes >= 2 - && max_page_erases > 0 - && num_tags > 0) - { - return None; - } - // Compute how many bits we need to store the fields. - let page_bits = num_bits(num_pages); - let byte_bits = num_bits(page_size); - let tag_bits = num_bits(num_tags); - let erase_bits = num_bits(max_page_erases + 1); - // Compute the bit position of the fields. - let present_bit = 0; - let deleted_bit = present_bit + 1; - let internal_bit = deleted_bit + 1; - let replace_bit = internal_bit + 1; - let sensitive_bit = replace_bit + 1; - let length_range = bitfield::BitRange { - start: sensitive_bit + 1, - length: byte_bits, - }; - let tag_range = bitfield::BitRange { - start: length_range.end(), - length: tag_bits, - }; - let replace_page_range = bitfield::BitRange { - start: tag_range.end(), - length: page_bits, - }; - let replace_byte_range = bitfield::BitRange { - start: replace_page_range.end(), - length: byte_bits, - }; - let old_page_range = bitfield::BitRange { - start: internal_bit + 1, - length: page_bits, - }; - let saved_erase_count_range = bitfield::BitRange { - start: old_page_range.end(), - length: erase_bits, - }; - let initialized_bit = 0; - let erase_count_range = bitfield::BitRange { - start: initialized_bit + 1, - length: erase_bits, - }; - let compacting_bit = erase_count_range.end(); - let new_page_range = bitfield::BitRange { - start: compacting_bit + 1, - length: page_bits, - }; - let format = Format { - word_size, - page_size, - num_pages, - max_page_erases, - num_tags, - present_bit, - deleted_bit, - internal_bit, - replace_bit, - sensitive_bit, - length_range, - tag_range, - replace_page_range, - replace_byte_range, - old_page_range, - saved_erase_count_range, - initialized_bit, - erase_count_range, - compacting_bit, - new_page_range, - }; - // Make sure all the following conditions hold: - // - The page header is one word. - // - The internal entry is one word. - // - The entry header fits in one word (which is equivalent to the entry header size being - // exactly one word for sensitive entries). - if format.page_header_size() != word_size - || format.internal_entry_size() != word_size - || format.header_size(true) != word_size - { - return None; - } - Some(format) - } - - /// Ensures a user entry is valid. - pub fn validate_entry(&self, entry: StoreEntry) -> Result<(), StoreError> { - if entry.tag >= self.num_tags { - return Err(StoreError::InvalidTag); - } - if entry.data.len() >= self.page_size { - return Err(StoreError::StoreFull); - } - Ok(()) - } - - /// Returns the entry header length in bytes. - /// - /// This is the smallest number of bytes necessary to store all fields of the entry info up to - /// and including `length`. For sensitive entries, the result is word-aligned. - pub fn header_size(&self, sensitive: bool) -> usize { - let mut size = self.bits_to_bytes(self.length_range.end()); - if sensitive { - // We need to align to the next word boundary so that wiping the user data will not - // count as a write to the header. - size = self.align_word(size); - } - size - } - - /// Returns the entry header length in bytes. - /// - /// This is a convenience function for `header_size` above. - fn header_offset(&self, entry: &[u8]) -> usize { - self.header_size(self.is_sensitive(entry)) - } - - /// Returns the entry info length in bytes. - /// - /// This is the number of bytes necessary to store all fields of the entry info. This also - /// includes the internal padding to protect the `committed` bit from the `deleted` bit and to - /// protect the entry info from the user data for sensitive entries. - fn info_size(&self, is_replace: IsReplace, sensitive: bool) -> usize { - let suffix_bits = 2; // committed + complete - let info_bits = match is_replace { - IsReplace::Replace => self.replace_byte_range.end() + suffix_bits, - IsReplace::Insert => self.tag_range.end() + suffix_bits, - }; - let mut info_size = self.bits_to_bytes(info_bits); - // If the suffix bits would end up in the header, we need to add one byte for them. - let header_size = self.header_size(sensitive); - if info_size <= header_size { - info_size = header_size + 1; - } - // If the entry is sensitive, we need to align to the next word boundary. - if sensitive { - info_size = self.align_word(info_size); - } - info_size - } - - /// Returns the length in bytes of an entry. - /// - /// This depends on the length of the user data and whether the entry replaces an old entry or - /// is an insertion. This also includes the internal padding to protect the `committed` bit from - /// the `deleted` bit. - pub fn entry_size(&self, is_replace: IsReplace, sensitive: bool, length: usize) -> usize { - let mut entry_size = length + self.info_size(is_replace, sensitive); - let word_size = self.word_size; - entry_size = self.align_word(entry_size); - // The entry must be at least 2 words such that the `committed` and `deleted` bits are on - // different words. - if entry_size == word_size { - entry_size += word_size; - } - entry_size - } - - /// Returns the length in bytes of an internal entry. - pub fn internal_entry_size(&self) -> usize { - let length = self.bits_to_bytes(self.saved_erase_count_range.end()); - self.align_word(length) - } - - pub fn is_present(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.present_bit, header, bitfield::NO_GAP) - } - - pub fn set_present(&self, header: &mut [u8]) { - bitfield::set_zero(self.present_bit, header, bitfield::NO_GAP) - } - - pub fn is_deleted(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.deleted_bit, header, bitfield::NO_GAP) - } - - /// Returns whether an entry is present and not deleted. - pub fn is_alive(&self, header: &[u8]) -> bool { - self.is_present(header) && !self.is_deleted(header) - } - - pub fn set_deleted(&self, header: &mut [u8]) { - bitfield::set_zero(self.deleted_bit, header, bitfield::NO_GAP) - } - - pub fn is_internal(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.internal_bit, header, bitfield::NO_GAP) - } - - pub fn set_internal(&self, header: &mut [u8]) { - bitfield::set_zero(self.internal_bit, header, bitfield::NO_GAP) - } - - pub fn is_replace(&self, header: &[u8]) -> IsReplace { - if bitfield::is_zero(self.replace_bit, header, bitfield::NO_GAP) { - IsReplace::Replace - } else { - IsReplace::Insert - } - } - - fn set_replace(&self, header: &mut [u8]) { - bitfield::set_zero(self.replace_bit, header, bitfield::NO_GAP) - } - - pub fn is_sensitive(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.sensitive_bit, header, bitfield::NO_GAP) - } - - pub fn set_sensitive(&self, header: &mut [u8]) { - bitfield::set_zero(self.sensitive_bit, header, bitfield::NO_GAP) - } - - pub fn get_length(&self, header: &[u8]) -> usize { - bitfield::get_range(self.length_range, header, bitfield::NO_GAP) - } - - fn set_length(&self, header: &mut [u8], length: usize) { - bitfield::set_range(self.length_range, header, bitfield::NO_GAP, length) - } - - pub fn get_data<'a>(&self, entry: &'a [u8]) -> &'a [u8] { - &entry[self.header_offset(entry)..][..self.get_length(entry)] - } - - /// Returns the span of user data in an entry. - /// - /// The complement of this gap in the entry is exactly the entry info. The header is before the - /// gap and the footer is after the gap. - pub fn entry_gap(&self, entry: &[u8]) -> bitfield::ByteGap { - let start = self.header_offset(entry); - let mut length = self.get_length(entry); - if self.is_sensitive(entry) { - length = self.align_word(length); - } - bitfield::ByteGap { start, length } - } - - pub fn get_tag(&self, entry: &[u8]) -> usize { - bitfield::get_range(self.tag_range, entry, self.entry_gap(entry)) - } - - fn set_tag(&self, entry: &mut [u8], tag: usize) { - bitfield::set_range(self.tag_range, entry, self.entry_gap(entry), tag) - } - - pub fn get_replace_index(&self, entry: &[u8]) -> Index { - let gap = self.entry_gap(entry); - let page = bitfield::get_range(self.replace_page_range, entry, gap); - let byte = bitfield::get_range(self.replace_byte_range, entry, gap); - Index { page, byte } - } - - fn set_replace_page(&self, entry: &mut [u8], page: usize) { - bitfield::set_range(self.replace_page_range, entry, self.entry_gap(entry), page) - } - - fn set_replace_byte(&self, entry: &mut [u8], byte: usize) { - bitfield::set_range(self.replace_byte_range, entry, self.entry_gap(entry), byte) - } - - /// Returns the bit position of the `committed` bit. - /// - /// This cannot be precomputed like other fields since it depends on the length of the entry. - fn committed_bit(&self, entry: &[u8]) -> usize { - 8 * entry.len() - 2 - } - - /// Returns the bit position of the `complete` bit. - /// - /// This cannot be precomputed like other fields since it depends on the length of the entry. - fn complete_bit(&self, entry: &[u8]) -> usize { - 8 * entry.len() - 1 - } - - pub fn is_committed(&self, entry: &[u8]) -> bool { - bitfield::is_zero(self.committed_bit(entry), entry, bitfield::NO_GAP) - } - - pub fn set_committed(&self, entry: &mut [u8]) { - bitfield::set_zero(self.committed_bit(entry), entry, bitfield::NO_GAP) - } - - pub fn is_complete(&self, entry: &[u8]) -> bool { - bitfield::is_zero(self.complete_bit(entry), entry, bitfield::NO_GAP) - } - - fn set_complete(&self, entry: &mut [u8]) { - bitfield::set_zero(self.complete_bit(entry), entry, bitfield::NO_GAP) - } - - pub fn get_old_page(&self, header: &[u8]) -> usize { - bitfield::get_range(self.old_page_range, header, bitfield::NO_GAP) - } - - pub fn set_old_page(&self, header: &mut [u8], old_page: usize) { - bitfield::set_range(self.old_page_range, header, bitfield::NO_GAP, old_page) - } - - pub fn get_saved_erase_count(&self, header: &[u8]) -> usize { - bitfield::get_range(self.saved_erase_count_range, header, bitfield::NO_GAP) - } - - pub fn set_saved_erase_count(&self, header: &mut [u8], erase_count: usize) { - bitfield::set_range( - self.saved_erase_count_range, - header, - bitfield::NO_GAP, - erase_count, - ) - } - - /// Builds an entry for replace or insert operations. - pub fn build_entry(&self, replace: Option, user_entry: StoreEntry) -> Vec { - let StoreEntry { - tag, - data, - sensitive, - } = user_entry; - let is_replace = match replace { - None => IsReplace::Insert, - Some(_) => IsReplace::Replace, - }; - let entry_len = self.entry_size(is_replace, sensitive, data.len()); - let mut entry = Vec::with_capacity(entry_len); - // Build the header. - entry.resize(self.header_size(sensitive), 0xff); - self.set_present(&mut entry[..]); - if sensitive { - self.set_sensitive(&mut entry[..]); - } - self.set_length(&mut entry[..], data.len()); - // Add the data. - entry.extend_from_slice(data); - // Build the footer. - entry.resize(entry_len, 0xff); - self.set_tag(&mut entry[..], tag); - self.set_complete(&mut entry[..]); - match replace { - None => self.set_committed(&mut entry[..]), - Some(Index { page, byte }) => { - self.set_replace(&mut entry[..]); - self.set_replace_page(&mut entry[..], page); - self.set_replace_byte(&mut entry[..], byte); - } - } - entry - } - - /// Builds an entry for replace or insert operations. - pub fn build_erase_entry(&self, old_page: usize, saved_erase_count: usize) -> Vec { - let mut entry = vec![0xff; self.internal_entry_size()]; - self.set_present(&mut entry[..]); - self.set_internal(&mut entry[..]); - self.set_old_page(&mut entry[..], old_page); - self.set_saved_erase_count(&mut entry[..], saved_erase_count); - entry - } - - /// Returns the length in bytes of a page header entry. - /// - /// This includes the word padding. - pub fn page_header_size(&self) -> usize { - self.align_word(self.bits_to_bytes(self.erase_count_range.end())) - } - - pub fn is_initialized(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.initialized_bit, header, bitfield::NO_GAP) - } - - pub fn set_initialized(&self, header: &mut [u8]) { - bitfield::set_zero(self.initialized_bit, header, bitfield::NO_GAP) - } - - pub fn get_erase_count(&self, header: &[u8]) -> usize { - bitfield::get_range(self.erase_count_range, header, bitfield::NO_GAP) - } - - pub fn set_erase_count(&self, header: &mut [u8], count: usize) { - bitfield::set_range(self.erase_count_range, header, bitfield::NO_GAP, count) - } - - pub fn is_compacting(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.compacting_bit, header, bitfield::NO_GAP) - } - - pub fn set_compacting(&self, header: &mut [u8]) { - bitfield::set_zero(self.compacting_bit, header, bitfield::NO_GAP) - } - - pub fn get_new_page(&self, header: &[u8]) -> usize { - bitfield::get_range(self.new_page_range, header, bitfield::NO_GAP) - } - - pub fn set_new_page(&self, header: &mut [u8], new_page: usize) { - bitfield::set_range(self.new_page_range, header, bitfield::NO_GAP, new_page) - } - - /// Returns the smallest word boundary greater or equal to a value. - fn align_word(&self, value: usize) -> usize { - let word_size = self.word_size; - (value + word_size - 1) / word_size * word_size - } - - /// Returns the minimum number of bytes to represent a given number of bits. - fn bits_to_bytes(&self, bits: usize) -> usize { - (bits + 7) / 8 - } -} - -/// Returns the number of bits necessary to write numbers smaller than `x`. -fn num_bits(x: usize) -> usize { - x.next_power_of_two().trailing_zeros() as usize -} - -#[test] -fn num_bits_ok() { - assert_eq!(num_bits(0), 0); - assert_eq!(num_bits(1), 0); - assert_eq!(num_bits(2), 1); - assert_eq!(num_bits(3), 2); - assert_eq!(num_bits(4), 2); - assert_eq!(num_bits(5), 3); - assert_eq!(num_bits(8), 3); - assert_eq!(num_bits(9), 4); - assert_eq!(num_bits(16), 4); -} diff --git a/src/embedded_flash/store/mod.rs b/src/embedded_flash/store/mod.rs deleted file mode 100644 index 170fd87..0000000 --- a/src/embedded_flash/store/mod.rs +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Provides a multi-purpose data-structure. -//! -//! # Description -//! -//! The `Store` data-structure permits to iterate, find, insert, delete, and replace entries in a -//! multi-set. The mutable operations (insert, delete, and replace) are atomic, in the sense that if -//! power is lost during the operation, then the operation might either succeed or fail but the -//! store remains in a coherent state. The data-structure is flash-efficient, in the sense that it -//! tries to minimize the number of times a page is erased. -//! -//! An _entry_ is made of a _tag_, which is a number, and a _data_, which is a slice of bytes. The -//! tag is stored efficiently by using unassigned bits of the entry header and footer. For example, -//! it can be used to decide how to deserialize the data. It is not necessary to use tags since a -//! prefix of the data could be used to decide how to deserialize the rest. -//! -//! Entries can also be associated to a set of _keys_. The find operation permits to retrieve all -//! entries associated to a given key. The same key can be associated to multiple entries and the -//! same entry can be associated to multiple keys. -//! -//! # Storage -//! -//! The data-structure is parametric over its storage which must implement the `Storage` trait. -//! There are currently 2 implementations of this trait: -//! - `SyscallStorage` using the `embedded_flash` syscall API for production builds. -//! - `BufferStorage` using a heap-allocated buffer for testing. -//! -//! # Configuration -//! -//! The data-structure can be configured with the `StoreConfig` trait. By implementing this trait, -//! the number of possible tags and the association between keys and entries are defined. -//! -//! # Properties -//! -//! The data-structure provides the following properties: -//! - When an operation returns success, then the represented multi-set is updated accordingly. For -//! example, an inserted entry can be found without alteration until replaced or deleted. -//! - When an operation returns an error, the resulting multi-set state is described in the error -//! documentation. -//! - When power is lost before an operation returns, the operation will either succeed or be -//! rolled-back on the next initialization. So the multi-set would be either left unchanged or -//! updated accordingly. -//! -//! Those properties rely on the following assumptions: -//! - Writing a word to flash is atomic. When power is lost, the word is either fully written or not -//! written at all. -//! - Reading a word from flash is deterministic. When power is lost while writing or erasing a word -//! (erasing a page containing that word), reading that word repeatedly returns the same result -//! (until it is written or its page is erased). -//! - To decide whether a page has been erased, it is enough to test if all its bits are equal to 1. -//! -//! The properties may still hold outside those assumptions but with weaker probabilities as the -//! usage diverges from the assumptions. -//! -//! # Implementation -//! -//! The store is a page-aligned sequence of bits. It matches the following grammar: -//! -//! ```text -//! Store := Page* -//! Page := PageHeader (Entry | InternalEntry)* Padding(page) -//! PageHeader := // must fit in one word -//! initialized:1 -//! erase_count:erase_bits -//! compacting:1 -//! new_page:page_bits -//! Padding(word) -//! Entry := Header Data Footer -//! // Let X be the byte (word-aligned for sensitive queries) following `length` in `Info`. -//! Header := Info[..X] // must fit in one word -//! Footer := Info[X..] // must fit in one word -//! Info := -//! present=0 -//! deleted:1 -//! internal=1 -//! replace:1 -//! sensitive:1 -//! length:byte_bits -//! tag:tag_bits -//! [ // present if `replace` is 0 -//! replace_page:page_bits -//! replace_byte:byte_bits -//! ] -//! [Padding(bit)] // until `complete` is the last bit of a different word than `present` -//! committed:1 -//! complete=0 -//! InternalEntry := -//! present=0 -//! deleted:1 -//! internal=0 -//! old_page:page_bits -//! saved_erase_count:erase_bits -//! Padding(word) -//! Padding(X) := 1* until X-aligned -//! ``` -//! -//! For bit flags, a value of 0 means true and a value of 1 means false. So when erased, bits are -//! false. They can be set to true by writing 0. -//! -//! The `Entry` rule is for user entries and the `InternalEntry` rule is for internal entries of the -//! store. Currently, there is only one kind of internal entry: an entry to erase the page being -//! compacted. -//! -//! The `Header` and `Footer` rules are computed from the `Info` rule. An entry could simply be the -//! concatenation of internal metadata and the user data. However, to optimize the size in flash, we -//! splice the user data in the middle of the metadata. The reason is that we can only write twice -//! the same word and for replace entries we need to write the deleted bit and the committed bit -//! independently. Also, this is important for the complete bit to be the last written bit (since -//! slices are written to flash from low to high addresses). Here is the representation of a -//! specific replace entry for a specific configuration: -//! -//! ```text -//! page_bits=6 -//! byte_bits=9 -//! tag_bits=5 -//! -//! byte.bit name -//! 0.0 present -//! 0.1 deleted -//! 0.2 internal -//! 0.3 replace -//! 0.4 sensitive -//! 0.5 length (9 bits) -//! 1.6 tag (least significant 2 bits out of 5) -//! (the header ends at the first byte boundary after `length`) -//! 2.0 (2 bytes in this example) -//! (the footer starts immediately after the user data) -//! 4.0 tag (most significant 3 bits out of 5) -//! 4.3 replace_page (6 bits) -//! 5.1 replace_byte (9 bits) -//! 6.2 padding (make sure the 2 properties below hold) -//! 7.6 committed -//! 7.7 complete (on a different word than `present`) -//! 8.0 (word-aligned) -//! ``` -//! -//! The store should always contain at least one blank page, so that it is always possible to -//! compact. - -// TODO(cretin): We don't need inner padding for insert entries. The store format can be: -// InsertEntry | ReplaceEntry | InternalEntry (maybe rename to EraseEntry) -// InsertEntry padding is until `complete` is the last bit of a word. -// ReplaceEntry padding is until `complete` is the last bit of a different word than `present`. -// TODO(cretin): Add checksum (may play the same role as the completed bit) and recovery strategy? -// TODO(cretin): Add corruption (deterministic but undetermined reads) to fuzzing. -// TODO(cretin): Add more complex transactions? (this does not seem necessary yet) -// TODO(cretin): Add possibility to shred an entry (force compact page after delete)? - -mod bitfield; -mod format; - -use self::format::{Format, IsReplace}; -use super::{Index, Storage}; -#[cfg(any(test, feature = "ram_storage"))] -use crate::embedded_flash::BufferStorage; -#[cfg(any(test, feature = "ram_storage"))] -use alloc::boxed::Box; -use alloc::collections::BTreeMap; -use alloc::vec; -use alloc::vec::Vec; - -/// Configures a store. -pub trait StoreConfig { - /// How entries are keyed. - /// - /// To disable keys, this may be defined to `()` or even better a custom empty enum. - type Key: Ord; - - /// Number of entry tags. - /// - /// All tags must be smaller than this value. - /// - /// To disable tags, this function should return `1`. The only valid tag would then be `0`. - fn num_tags(&self) -> usize; - - /// Specifies the set of keys of an entry. - /// - /// If keys are not used, this function can immediately return. Otherwise, it should call - /// `associate_key` for each key that should be associated to `entry`. - fn keys(&self, entry: StoreEntry, associate_key: impl FnMut(Self::Key)); -} - -/// Errors returned by store operations. -#[derive(Debug, PartialEq, Eq)] -pub enum StoreError { - /// The operation could not proceed because the store is full. - StoreFull, - - /// The operation could not proceed because the provided tag is invalid. - InvalidTag, - - /// The operation could not proceed because the preconditions do not hold. - InvalidPrecondition, -} - -/// The position of an entry in the store. -#[cfg_attr(feature = "std", derive(Debug))] -#[derive(Copy, Clone)] -pub struct StoreIndex { - /// The index of this entry in the storage. - index: Index, - - /// The generation at which this index is valid. - /// - /// See the documentation of the field with the same name in the `Store` struct. - generation: usize, -} - -/// A user entry. -#[cfg_attr(feature = "std", derive(Debug, PartialEq, Eq))] -#[derive(Copy, Clone)] -pub struct StoreEntry<'a> { - /// The tag of the entry. - /// - /// Must be smaller than the configured number of tags. - pub tag: usize, - - /// The data of the entry. - pub data: &'a [u8], - - /// Whether the data is sensitive. - /// - /// Sensitive data is overwritten with zeroes when the entry is deleted. - pub sensitive: bool, -} - -/// Implements a configurable multi-set on top of any storage. -pub struct Store { - storage: S, - config: C, - format: Format, - - /// The index of the blank page reserved for compaction. - blank_page: usize, - - /// Counts the number of compactions since the store creation. - /// - /// A `StoreIndex` is valid only if they originate from the same generation. This is checked by - /// operations that take a `StoreIndex` as argument. - generation: usize, -} - -impl Store { - /// Creates a new store. - /// - /// Initializes the storage if it is fresh (filled with `0xff`). Rolls-back or completes an - /// operation if the store was powered off in the middle of that operation. In other words, - /// operations are atomic. - /// - /// # Errors - /// - /// Returns `None` if `storage` and/or `config` are not supported. - pub fn new(storage: S, config: C) -> Option> { - let format = Format::new(&storage, &config)?; - let blank_page = format.num_pages; - let mut store = Store { - storage, - config, - format, - blank_page, - generation: 0, - }; - // Finish any ongoing page compaction. - store.recover_compact_page(); - // Finish or roll-back any other entry-level operations. - store.recover_entry_operations(); - // Initialize uninitialized pages. - store.initialize_storage(); - Some(store) - } - - /// Iterates over all entries in the store. - pub fn iter(&self) -> impl Iterator { - Iter::new(self).filter_map(move |(index, entry)| { - if self.format.is_alive(entry) { - Some(( - StoreIndex { - index, - generation: self.generation, - }, - StoreEntry { - tag: self.format.get_tag(entry), - data: self.format.get_data(entry), - sensitive: self.format.is_sensitive(entry), - }, - )) - } else { - None - } - }) - } - - /// Iterates over all entries matching a key in the store. - pub fn find_all<'a>( - &'a self, - key: &'a C::Key, - ) -> impl Iterator + 'a { - self.iter().filter(move |&(_, entry)| { - let mut has_match = false; - self.config.keys(entry, |k| has_match |= key == &k); - has_match - }) - } - - /// Returns the first entry matching a key in the store. - /// - /// This is a convenience function for when at most one entry should match the key. - /// - /// # Panics - /// - /// In debug mode, panics if more than one entry matches the key. - pub fn find_one<'a>(&'a self, key: &'a C::Key) -> Option<(StoreIndex, StoreEntry<'a>)> { - let mut iter = self.find_all(key); - let first = iter.next()?; - let has_only_one_element = iter.next().is_none(); - debug_assert!(has_only_one_element); - Some(first) - } - - /// Deletes an entry from the store. - pub fn delete(&mut self, index: StoreIndex) -> Result<(), StoreError> { - if self.generation != index.generation { - return Err(StoreError::InvalidPrecondition); - } - self.delete_index(index.index); - Ok(()) - } - - /// Replaces an entry with another with the same tag in the store. - /// - /// This operation (like others) is atomic. If it returns successfully, then the old entry is - /// deleted and the new is inserted. If it fails, the old entry is not deleted and the new entry - /// is not inserted. If power is lost during the operation, during next startup, the operation - /// is either rolled-back (like in case of failure) or completed (like in case of success). - /// - /// # Errors - /// - /// Returns: - /// - `StoreFull` if the new entry does not fit in the store. - /// - `InvalidTag` if the tag of the new entry is not smaller than the configured number of - /// tags. - pub fn replace(&mut self, old: StoreIndex, new: StoreEntry) -> Result<(), StoreError> { - if self.generation != old.generation { - return Err(StoreError::InvalidPrecondition); - } - self.format.validate_entry(new)?; - let mut old_index = old.index; - // Find a slot. - let entry_len = self.replace_len(new.sensitive, new.data.len()); - let index = self.find_slot_for_write(entry_len, Some(&mut old_index))?; - // Build a new entry replacing the old one. - let entry = self.format.build_entry(Some(old_index), new); - debug_assert_eq!(entry.len(), entry_len); - // Write the new entry. - self.write_entry(index, &entry); - // Commit the new entry, which both deletes the old entry and commits the new one. - self.commit_index(index); - Ok(()) - } - - /// Inserts an entry in the store. - /// - /// # Errors - /// - /// Returns: - /// - `StoreFull` if the new entry does not fit in the store. - /// - `InvalidTag` if the tag of the new entry is not smaller than the configured number of - /// tags. - pub fn insert(&mut self, entry: StoreEntry) -> Result<(), StoreError> { - self.format.validate_entry(entry)?; - // Build entry. - let entry = self.format.build_entry(None, entry); - // Find a slot. - let index = self.find_slot_for_write(entry.len(), None)?; - // Write entry. - self.write_entry(index, &entry); - Ok(()) - } - - /// Returns the byte cost of a replace operation. - /// - /// Computes the length in bytes that would be used in the storage if a replace operation is - /// executed provided the data of the new entry has `length` bytes and whether this data is - /// sensitive. - pub fn replace_len(&self, sensitive: bool, length: usize) -> usize { - self.format - .entry_size(IsReplace::Replace, sensitive, length) - } - - /// Returns the byte cost of an insert operation. - /// - /// Computes the length in bytes that would be used in the storage if an insert operation is - /// executed provided the data of the inserted entry has `length` bytes and whether this data is - /// sensitive. - #[allow(dead_code)] - pub fn insert_len(&self, sensitive: bool, length: usize) -> usize { - self.format.entry_size(IsReplace::Insert, sensitive, length) - } - - /// Returns the erase count of all pages. - /// - /// The value at index `page` of the result is the number of times page `page` was erased. This - /// number is an underestimate in case power was lost when this page was erased. - #[allow(dead_code)] - pub fn compaction_info(&self) -> Vec { - let mut info = Vec::with_capacity(self.format.num_pages); - for page in 0..self.format.num_pages { - let (page_header, _) = self.read_page_header(page); - let erase_count = self.format.get_erase_count(page_header); - info.push(erase_count); - } - info - } - - /// Completes any ongoing page compaction. - fn recover_compact_page(&mut self) { - for page in 0..self.format.num_pages { - let (page_header, _) = self.read_page_header(page); - if self.format.is_compacting(page_header) { - let new_page = self.format.get_new_page(page_header); - self.compact_page(page, new_page); - } - } - } - - /// Rolls-back or completes any ongoing operation. - fn recover_entry_operations(&mut self) { - for page in 0..self.format.num_pages { - let (page_header, mut index) = self.read_page_header(page); - if !self.format.is_initialized(page_header) { - // Skip uninitialized pages. - continue; - } - while index.byte < self.format.page_size { - let entry_index = index; - let entry = self.read_entry(index); - index.byte += entry.len(); - if !self.format.is_present(entry) { - // Reached the end of the page. - } else if self.format.is_deleted(entry) { - // Wipe sensitive data if needed. - self.wipe_sensitive_data(entry_index); - } else if self.format.is_internal(entry) { - // Finish page compaction. - self.erase_page(entry_index); - } else if !self.format.is_complete(entry) { - // Roll-back incomplete operations. - self.delete_index(entry_index); - } else if !self.format.is_committed(entry) { - // Finish complete but uncommitted operations. - self.commit_index(entry_index) - } - } - } - } - - /// Initializes uninitialized pages. - fn initialize_storage(&mut self) { - for page in 0..self.format.num_pages { - let (header, index) = self.read_page_header(page); - if self.format.is_initialized(header) { - // Update blank page. - let first_entry = self.read_entry(index); - if !self.format.is_present(first_entry) { - self.blank_page = page; - } - } else { - // We set the erase count to zero the very first time we initialize a page. - self.initialize_page(page, 0); - } - } - debug_assert!(self.blank_page != self.format.num_pages); - } - - /// Marks an entry as deleted. - /// - /// The provided index must point to the beginning of an entry. - fn delete_index(&mut self, index: Index) { - self.update_word(index, |format, word| format.set_deleted(word)); - self.wipe_sensitive_data(index); - } - - /// Wipes the data of a sensitive entry. - /// - /// If the entry at the provided index is sensitive, overwrites the data with zeroes. Otherwise, - /// does nothing. - fn wipe_sensitive_data(&mut self, mut index: Index) { - let entry = self.read_entry(index); - debug_assert!(self.format.is_present(entry)); - debug_assert!(self.format.is_deleted(entry)); - if self.format.is_internal(entry) || !self.format.is_sensitive(entry) { - // No need to wipe the data. - return; - } - let gap = self.format.entry_gap(entry); - let data = gap.slice(entry); - if data.iter().all(|&byte| byte == 0x00) { - // The data is already wiped. - return; - } - index.byte += gap.start; - self.storage - .write_slice(index, &vec![0; gap.length]) - .unwrap(); - } - - /// Finds a page with enough free space. - /// - /// Returns an index to the free space of a page which can hold an entry of `length` bytes. If - /// necessary, pages may be compacted to free space. In that case, if provided, the `old_index` - /// is updated according to compaction. - fn find_slot_for_write( - &mut self, - length: usize, - mut old_index: Option<&mut Index>, - ) -> Result { - loop { - if let Some(index) = self.choose_slot_for_write(length) { - return Ok(index); - } - match self.choose_page_for_compact() { - None => return Err(StoreError::StoreFull), - Some(page) => { - let blank_page = self.blank_page; - // Compact the chosen page and update the old index to point to the entry in the - // new page if it happened to be in the old page. This is essentially a way to - // avoid index invalidation due to compaction. - let map = self.compact_page(page, blank_page); - if let Some(old_index) = &mut old_index { - map_index(page, blank_page, &map, old_index); - } - } - } - } - } - - /// Returns whether a page has enough free space. - /// - /// Returns an index to the free space of a page with smallest free space that may hold `length` - /// bytes. - fn choose_slot_for_write(&self, length: usize) -> Option { - Iter::new(self) - .filter(|(index, entry)| { - index.page != self.blank_page - && !self.format.is_present(entry) - && length <= entry.len() - }) - .min_by_key(|(_, entry)| entry.len()) - .map(|(index, _)| index) - } - - /// Returns the page that should be compacted. - fn choose_page_for_compact(&self) -> Option { - // TODO(cretin): This could be optimized by using some cost function depending on: - // - the erase count - // - the length of the free space - // - the length of the alive entries - // We want to minimize this cost. We could also take into account the length of the entry we - // want to write to bound the number of compaction before failing with StoreFull. - // - // We should also make sure that all pages (including if they have no deleted entries and no - // free space) are eventually compacted (ideally to a heavily used page) to benefit from the - // low erase count of those pages. - (0..self.format.num_pages) - .map(|page| (page, self.page_info(page))) - .filter(|&(page, ref info)| { - page != self.blank_page - && info.erase_count < self.format.max_page_erases - && info.deleted_length > self.format.internal_entry_size() - }) - .min_by(|(_, lhs_info), (_, rhs_info)| lhs_info.compare_for_compaction(rhs_info)) - .map(|(page, _)| page) - } - - fn page_info(&self, page: usize) -> PageInfo { - let (page_header, mut index) = self.read_page_header(page); - let mut info = PageInfo { - erase_count: self.format.get_erase_count(page_header), - deleted_length: 0, - free_length: 0, - }; - while index.byte < self.format.page_size { - let entry = self.read_entry(index); - index.byte += entry.len(); - if !self.format.is_present(entry) { - debug_assert_eq!(info.free_length, 0); - info.free_length = entry.len(); - } else if self.format.is_deleted(entry) { - info.deleted_length += entry.len(); - } - } - debug_assert_eq!(index.page, page); - info - } - - fn read_slice(&self, index: Index, length: usize) -> &[u8] { - self.storage.read_slice(index, length).unwrap() - } - - /// Reads an entry (with header and footer) at a given index. - /// - /// If no entry is present, returns the free space up to the end of the page. - fn read_entry(&self, index: Index) -> &[u8] { - let first_byte = self.read_slice(index, 1); - let max_length = self.format.page_size - index.byte; - let mut length = if !self.format.is_present(first_byte) { - max_length - } else if self.format.is_internal(first_byte) { - self.format.internal_entry_size() - } else { - // We don't know if the entry is sensitive or not, but it doesn't matter here. We just - // need to read the replace, sensitive, and length fields. - let header = self.read_slice(index, self.format.header_size(false)); - let replace = self.format.is_replace(header); - let sensitive = self.format.is_sensitive(header); - let length = self.format.get_length(header); - self.format.entry_size(replace, sensitive, length) - }; - // Truncate the length to fit the page. This can only happen in case of corruption or - // partial writes. - length = core::cmp::min(length, max_length); - self.read_slice(index, length) - } - - /// Reads a page header. - /// - /// Also returns the index after the page header. - fn read_page_header(&self, page: usize) -> (&[u8], Index) { - let mut index = Index { page, byte: 0 }; - let page_header = self.read_slice(index, self.format.page_header_size()); - index.byte += page_header.len(); - (page_header, index) - } - - /// Updates a word at a given index. - /// - /// The `update` function is called with the word at `index`. The input value is the current - /// value of the word. The output value is the value that will be written. It should only change - /// bits from 1 to 0. - fn update_word(&mut self, index: Index, update: impl FnOnce(&Format, &mut [u8])) { - let word_size = self.format.word_size; - let mut word = self.read_slice(index, word_size).to_vec(); - update(&self.format, &mut word); - self.storage.write_slice(index, &word).unwrap(); - } - - fn write_entry(&mut self, index: Index, entry: &[u8]) { - self.storage.write_slice(index, entry).unwrap(); - } - - /// Initializes a page by writing the page header. - /// - /// If the page is not erased, it is first erased. - fn initialize_page(&mut self, page: usize, erase_count: usize) { - let index = Index { page, byte: 0 }; - let page = self.read_slice(index, self.format.page_size); - if !page.iter().all(|&byte| byte == 0xff) { - self.storage.erase_page(index.page).unwrap(); - } - self.update_word(index, |format, header| { - format.set_initialized(header); - format.set_erase_count(header, erase_count); - }); - self.blank_page = index.page; - } - - /// Commits a replace entry. - /// - /// Deletes the old entry and commits the new entry. - fn commit_index(&mut self, mut index: Index) { - let entry = self.read_entry(index); - index.byte += entry.len(); - let word_size = self.format.word_size; - debug_assert!(entry.len() >= 2 * word_size); - match self.format.is_replace(entry) { - IsReplace::Replace => { - let delete_index = self.format.get_replace_index(entry); - self.delete_index(delete_index); - } - IsReplace::Insert => debug_assert!(false), - }; - index.byte -= word_size; - self.update_word(index, |format, word| format.set_committed(word)); - } - - /// Compacts a page to an other. - /// - /// Returns the mapping from the alive entries in the old page to their index in the new page. - fn compact_page(&mut self, old_page: usize, new_page: usize) -> BTreeMap { - // Write the old page as being compacted to the new page. - let mut erase_count = 0; - self.update_word( - Index { - page: old_page, - byte: 0, - }, - |format, header| { - erase_count = format.get_erase_count(header); - format.set_compacting(header); - format.set_new_page(header, new_page); - }, - ); - // Copy alive entries from the old page to the new page. - let page_header_size = self.format.page_header_size(); - let mut old_index = Index { - page: old_page, - byte: page_header_size, - }; - let mut new_index = Index { - page: new_page, - byte: page_header_size, - }; - let mut map = BTreeMap::new(); - while old_index.byte < self.format.page_size { - let old_entry = self.read_entry(old_index); - let old_entry_index = old_index.byte; - old_index.byte += old_entry.len(); - if !self.format.is_alive(old_entry) { - continue; - } - let previous_mapping = map.insert(old_entry_index, new_index.byte); - debug_assert!(previous_mapping.is_none()); - // We need to copy the old entry because it is in the storage and we are going to write - // to the storage. Rust cannot tell that both entries don't overlap. - let old_entry = old_entry.to_vec(); - self.write_entry(new_index, &old_entry); - new_index.byte += old_entry.len(); - } - // Save the old page index and erase count to the new page. - let erase_index = new_index; - let erase_entry = self.format.build_erase_entry(old_page, erase_count); - self.write_entry(new_index, &erase_entry); - // Erase the page. - self.erase_page(erase_index); - // Increase generation. - self.generation += 1; - map - } - - /// Commits an internal entry. - /// - /// The only kind of internal entry is to erase a page, which first erases the page, then - /// initializes it with the saved erase count, and finally deletes the internal entry. - fn erase_page(&mut self, erase_index: Index) { - let erase_entry = self.read_entry(erase_index); - debug_assert!(self.format.is_present(erase_entry)); - debug_assert!(!self.format.is_deleted(erase_entry)); - debug_assert!(self.format.is_internal(erase_entry)); - let old_page = self.format.get_old_page(erase_entry); - let erase_count = self.format.get_saved_erase_count(erase_entry) + 1; - // Erase the page. - self.storage.erase_page(old_page).unwrap(); - // Initialize the page. - self.initialize_page(old_page, erase_count); - // Delete the internal entry. - self.delete_index(erase_index); - } -} - -// Those functions are not meant for production. -#[cfg(any(test, feature = "ram_storage"))] -impl Store { - /// Takes a snapshot of the storage after a given amount of word operations. - pub fn arm_snapshot(&mut self, delay: usize) { - self.storage.arm_snapshot(delay); - } - - /// Unarms and returns the snapshot or the delay remaining. - pub fn get_snapshot(&mut self) -> Result, usize> { - self.storage.get_snapshot() - } - - /// Takes a snapshot of the storage. - pub fn take_snapshot(&self) -> Box<[u8]> { - self.storage.take_snapshot() - } - - /// Returns the storage. - pub fn get_storage(self) -> Box<[u8]> { - self.storage.get_storage() - } - - /// Erases and initializes a page with a given erase count. - pub fn set_erase_count(&mut self, page: usize, erase_count: usize) { - self.initialize_page(page, erase_count); - } - - /// Returns whether all deleted sensitive entries have been wiped. - pub fn deleted_entries_are_wiped(&self) -> bool { - for (_, entry) in Iter::new(self) { - if !self.format.is_present(entry) - || !self.format.is_deleted(entry) - || self.format.is_internal(entry) - || !self.format.is_sensitive(entry) - { - continue; - } - let gap = self.format.entry_gap(entry); - let data = gap.slice(entry); - if !data.iter().all(|&byte| byte == 0x00) { - return false; - } - } - true - } -} - -/// Maps an index from an old page to a new page if needed. -fn map_index(old_page: usize, new_page: usize, map: &BTreeMap, index: &mut Index) { - if index.page == old_page { - index.page = new_page; - index.byte = *map.get(&index.byte).unwrap(); - } -} - -/// Page information for compaction. -struct PageInfo { - /// How many times the page was erased. - erase_count: usize, - - /// Cumulative length of deleted entries (including header and footer). - deleted_length: usize, - - /// Length of the free space. - free_length: usize, -} - -impl PageInfo { - /// Returns whether a page should be compacted before another. - fn compare_for_compaction(&self, rhs: &PageInfo) -> core::cmp::Ordering { - self.erase_count - .cmp(&rhs.erase_count) - .then(rhs.deleted_length.cmp(&self.deleted_length)) - .then(self.free_length.cmp(&rhs.free_length)) - } -} - -/// Iterates over all entries (including free space) of a store. -struct Iter<'a, S: Storage, C: StoreConfig> { - store: &'a Store, - index: Index, -} - -impl<'a, S: Storage, C: StoreConfig> Iter<'a, S, C> { - fn new(store: &'a Store) -> Iter<'a, S, C> { - let index = Index { - page: 0, - byte: store.format.page_header_size(), - }; - Iter { store, index } - } -} - -impl<'a, S: Storage, C: StoreConfig> Iterator for Iter<'a, S, C> { - type Item = (Index, &'a [u8]); - - fn next(&mut self) -> Option<(Index, &'a [u8])> { - if self.index.byte == self.store.format.page_size { - self.index.page += 1; - self.index.byte = self.store.format.page_header_size(); - } - if self.index.page == self.store.format.num_pages { - return None; - } - let index = self.index; - let entry = self.store.read_entry(self.index); - self.index.byte += entry.len(); - Some((index, entry)) - } -} - -#[cfg(test)] -mod tests { - use super::super::{BufferOptions, BufferStorage}; - use super::*; - - struct Config; - - const WORD_SIZE: usize = 4; - const PAGE_SIZE: usize = 8 * WORD_SIZE; - const NUM_PAGES: usize = 3; - - impl StoreConfig for Config { - type Key = u8; - - fn num_tags(&self) -> usize { - 1 - } - - fn keys(&self, entry: StoreEntry, mut add: impl FnMut(u8)) { - assert_eq!(entry.tag, 0); - if !entry.data.is_empty() { - add(entry.data[0]); - } - } - } - - fn new_buffer(storage: Box<[u8]>) -> BufferStorage { - let options = BufferOptions { - word_size: WORD_SIZE, - page_size: PAGE_SIZE, - max_word_writes: 2, - max_page_erases: 2, - strict_write: true, - }; - BufferStorage::new(storage, options) - } - - fn new_store() -> Store { - let storage = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); - Store::new(new_buffer(storage), Config).unwrap() - } - - #[test] - fn insert_ok() { - let mut store = new_store(); - assert_eq!(store.iter().count(), 0); - let tag = 0; - let key = 1; - let data = &[key, 2]; - let entry = StoreEntry { - tag, - data, - sensitive: false, - }; - store.insert(entry).unwrap(); - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_one(&key).unwrap().1, entry); - } - - #[test] - fn insert_sensitive_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let data = &[key, 4]; - let entry = StoreEntry { - tag, - data, - sensitive: true, - }; - store.insert(entry).unwrap(); - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_one(&key).unwrap().1, entry); - } - - #[test] - fn delete_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let entry = StoreEntry { - tag, - data: &[key, 2], - sensitive: false, - }; - store.insert(entry).unwrap(); - assert_eq!(store.find_all(&key).count(), 1); - let (index, _) = store.find_one(&key).unwrap(); - store.delete(index).unwrap(); - assert_eq!(store.find_all(&key).count(), 0); - assert_eq!(store.iter().count(), 0); - } - - #[test] - fn delete_sensitive_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let entry = StoreEntry { - tag, - data: &[key, 2], - sensitive: true, - }; - store.insert(entry).unwrap(); - assert_eq!(store.find_all(&key).count(), 1); - let (index, _) = store.find_one(&key).unwrap(); - store.delete(index).unwrap(); - assert_eq!(store.find_all(&key).count(), 0); - assert_eq!(store.iter().count(), 0); - assert!(store.deleted_entries_are_wiped()); - } - - #[test] - fn insert_until_full() { - let mut store = new_store(); - let tag = 0; - let mut key = 0; - while store - .insert(StoreEntry { - tag, - data: &[key, 0], - sensitive: false, - }) - .is_ok() - { - key += 1; - } - assert!(key > 0); - } - - #[test] - fn compact_ok() { - let mut store = new_store(); - let tag = 0; - let mut key = 0; - while store - .insert(StoreEntry { - tag, - data: &[key, 0], - sensitive: false, - }) - .is_ok() - { - key += 1; - } - let (index, _) = store.find_one(&0).unwrap(); - store.delete(index).unwrap(); - store - .insert(StoreEntry { - tag: 0, - data: &[key, 0], - sensitive: false, - }) - .unwrap(); - for k in 1..=key { - assert_eq!(store.find_all(&k).count(), 1); - } - } - - #[test] - fn reboot_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let data = &[key, 2]; - let entry = StoreEntry { - tag, - data, - sensitive: false, - }; - store.insert(entry).unwrap(); - - // Reboot the store. - let store = store.get_storage(); - let store = Store::new(new_buffer(store), Config).unwrap(); - - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_one(&key).unwrap().1, entry); - } - - #[test] - fn replace_atomic() { - let tag = 0; - let key = 1; - let old_entry = StoreEntry { - tag, - data: &[key, 2, 3, 4, 5, 6], - sensitive: false, - }; - let new_entry = StoreEntry { - tag, - data: &[key, 7, 8, 9], - sensitive: false, - }; - let mut delay = 0; - loop { - let mut store = new_store(); - store.insert(old_entry).unwrap(); - store.arm_snapshot(delay); - let (index, _) = store.find_one(&key).unwrap(); - store.replace(index, new_entry).unwrap(); - let (complete, store) = match store.get_snapshot() { - Err(_) => (true, store.get_storage()), - Ok(store) => (false, store), - }; - let store = Store::new(new_buffer(store), Config).unwrap(); - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_all(&key).count(), 1); - let (_, cur_entry) = store.find_one(&key).unwrap(); - assert!((cur_entry == old_entry && !complete) || cur_entry == new_entry); - if complete { - break; - } - delay += 1; - } - } - - #[test] - fn compact_atomic() { - let tag = 0; - let mut delay = 0; - loop { - let mut store = new_store(); - let mut key = 0; - while store - .insert(StoreEntry { - tag, - data: &[key, 0], - sensitive: false, - }) - .is_ok() - { - key += 1; - } - let (index, _) = store.find_one(&0).unwrap(); - store.delete(index).unwrap(); - let (index, _) = store.find_one(&1).unwrap(); - store.arm_snapshot(delay); - store - .replace( - index, - StoreEntry { - tag, - data: &[1, 1], - sensitive: false, - }, - ) - .unwrap(); - let (complete, store) = match store.get_snapshot() { - Err(_) => (true, store.get_storage()), - Ok(store) => (false, store), - }; - let store = Store::new(new_buffer(store), Config).unwrap(); - assert_eq!(store.iter().count(), key as usize - 1); - for k in 2..key { - assert_eq!(store.find_all(&k).count(), 1); - assert_eq!( - store.find_one(&k).unwrap().1, - StoreEntry { - tag, - data: &[k, 0], - sensitive: false, - } - ); - } - assert_eq!(store.find_all(&1).count(), 1); - let (_, entry) = store.find_one(&1).unwrap(); - assert_eq!(entry.tag, tag); - assert!((entry.data == [1, 0] && !complete) || entry.data == [1, 1]); - if complete { - break; - } - delay += 1; - } - } - - #[test] - fn invalid_tag() { - let mut store = new_store(); - let entry = StoreEntry { - tag: 1, - data: &[], - sensitive: false, - }; - assert_eq!(store.insert(entry), Err(StoreError::InvalidTag)); - } - - #[test] - fn invalid_length() { - let mut store = new_store(); - let entry = StoreEntry { - tag: 0, - data: &[0; PAGE_SIZE], - sensitive: false, - }; - assert_eq!(store.insert(entry), Err(StoreError::StoreFull)); - } -} diff --git a/src/embedded_flash/syscall.rs b/src/embedded_flash/syscall.rs index 5eae561..e043772 100644 --- a/src/embedded_flash/syscall.rs +++ b/src/embedded_flash/syscall.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2019-2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{Index, Storage, StorageError, StorageResult}; use alloc::vec::Vec; use libtock_core::syscalls; +use persistent_store::{Storage, StorageError, StorageIndex, StorageResult}; const DRIVER_NUMBER: usize = 0x50003; @@ -42,15 +42,13 @@ mod memop_nr { fn get_info(nr: usize, arg: usize) -> StorageResult { let code = syscalls::command(DRIVER_NUMBER, command_nr::GET_INFO, nr, arg); - code.map_err(|e| StorageError::KernelError { - code: e.return_code, - }) + code.map_err(|_| StorageError::CustomError) } fn memop(nr: u32, arg: usize) -> StorageResult { let code = unsafe { syscalls::raw::memop(nr, arg) }; if code < 0 { - Err(StorageError::KernelError { code }) + Err(StorageError::CustomError) } else { Ok(code as usize) } @@ -70,7 +68,7 @@ impl SyscallStorage { /// /// # Errors /// - /// Returns `BadFlash` if any of the following conditions do not hold: + /// Returns `CustomError` if any of the following conditions do not hold: /// - The word size is a power of two. /// - The page size is a power of two. /// - The page size is a multiple of the word size. @@ -90,13 +88,13 @@ impl SyscallStorage { || !syscall.page_size.is_power_of_two() || !syscall.is_word_aligned(syscall.page_size) { - return Err(StorageError::BadFlash); + return Err(StorageError::CustomError); } for i in 0..memop(memop_nr::STORAGE_CNT, 0)? { let storage_ptr = memop(memop_nr::STORAGE_PTR, i)?; let max_storage_len = memop(memop_nr::STORAGE_LEN, i)?; if !syscall.is_page_aligned(storage_ptr) || !syscall.is_page_aligned(max_storage_len) { - return Err(StorageError::BadFlash); + return Err(StorageError::CustomError); } let storage_len = core::cmp::min(num_pages * syscall.page_size, max_storage_len); num_pages -= storage_len / syscall.page_size; @@ -141,12 +139,12 @@ impl Storage for SyscallStorage { self.max_page_erases } - fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]> { + fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]> { let start = index.range(length, self)?.start; find_slice(&self.storage_locations, start, length) } - fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()> { + fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()> { if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) { return Err(StorageError::NotAligned); } @@ -163,28 +161,24 @@ impl Storage for SyscallStorage { ) }; if code < 0 { - return Err(StorageError::KernelError { code }); + return Err(StorageError::CustomError); } let code = syscalls::command(DRIVER_NUMBER, command_nr::WRITE_SLICE, ptr, value.len()); - if let Err(e) = code { - return Err(StorageError::KernelError { - code: e.return_code, - }); + if code.is_err() { + return Err(StorageError::CustomError); } Ok(()) } fn erase_page(&mut self, page: usize) -> StorageResult<()> { - let index = Index { page, byte: 0 }; + let index = StorageIndex { page, byte: 0 }; let length = self.page_size(); let ptr = self.read_slice(index, length)?.as_ptr() as usize; let code = syscalls::command(DRIVER_NUMBER, command_nr::ERASE_PAGE, ptr, length); - if let Err(e) = code { - return Err(StorageError::KernelError { - code: e.return_code, - }); + if code.is_err() { + return Err(StorageError::CustomError); } Ok(()) } From 1db73c699be15102323d82a9cc9f26c3d050b8d2 Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Tue, 1 Dec 2020 11:29:52 +0100 Subject: [PATCH 02/20] Apply review comments --- src/ctap/status_code.rs | 2 +- src/ctap/storage.rs | 29 +++++++++++++++-------------- src/ctap/storage/key.rs | 5 ++++- 3 files changed, 20 insertions(+), 16 deletions(-) diff --git a/src/ctap/status_code.rs b/src/ctap/status_code.rs index b4f78fd..638caef 100644 --- a/src/ctap/status_code.rs +++ b/src/ctap/status_code.rs @@ -85,7 +85,7 @@ pub enum Ctap2StatusCode { /// /// There can be multiple reasons: /// - The persistent storage has not been erased before its first usage. - /// - The persistent storage has been tempered by a third party. + /// - The persistent storage has been tempered with by a third party. /// - The flash is malfunctioning (including the Tock driver). /// /// In the first 2 cases the persistent storage should be completely erased. If the error diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 158bc4f..054d3c8 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -122,7 +122,7 @@ impl PersistentStore { page_size: PAGE_SIZE, max_word_writes: 2, max_page_erases: 10000, - strict_write: true, + strict_mode: true, }; Storage::new(store, options) } @@ -180,9 +180,8 @@ impl PersistentStore { ) -> Result, Ctap2StatusCode> { let mut iter_result = Ok(()); let iter = self.iter_credentials(&mut iter_result)?; - // TODO(reviewer): Should we return an error if we find more than one matching credential? - // We did not use to in the previous version (panic in debug mode, nothing in release mode) - // but I don't remember why. Let's document it. + // We don't check whether there is more than one matching credential to be able to exit + // early. let result = iter.map(|(_, credential)| credential).find(|credential| { credential.rp_id == rp_id && credential.credential_id == credential_id }); @@ -388,7 +387,7 @@ impl PersistentStore { Ok(self.store.insert(key::MIN_PIN_LENGTH, &[min_pin_length])?) } - /// TODO: Help from reviewer needed for documentation. + /// Returns a list of RP IDs that used to check if reading the minimum PIN length is allowed. #[cfg(feature = "with_ctap2_1")] pub fn _min_pin_length_rp_ids(&self) -> Result, Ctap2StatusCode> { let rp_ids = self @@ -401,7 +400,7 @@ impl PersistentStore { Ok(rp_ids.unwrap_or(vec![])) } - /// TODO: Help from reviewer needed for documentation. + /// Set a list of RP IDs that used to check if reading the minimum PIN length is allowed. #[cfg(feature = "with_ctap2_1")] pub fn _set_min_pin_length_rp_ids( &mut self, @@ -508,20 +507,22 @@ impl PersistentStore { impl From for Ctap2StatusCode { fn from(error: persistent_store::StoreError) -> Ctap2StatusCode { - use persistent_store::StoreError::*; + use persistent_store::StoreError; match error { // This error is expected. The store is full. - NoCapacity => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, + StoreError::NoCapacity => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, // This error is expected. The flash is out of life. - NoLifetime => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, + StoreError::NoLifetime => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, // This error is expected if we don't satisfy the store preconditions. For example we // try to store a credential which is too long. - InvalidArgument => Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR, + StoreError::InvalidArgument => Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR, // This error is not expected. The storage has been tempered with. We could erase the // storage. - InvalidStorage => Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE, + StoreError::InvalidStorage => { + Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE + } // This error is not expected. The kernel is failing our syscalls. - StorageError => Ctap2StatusCode::CTAP1_ERR_OTHER, + StoreError::StorageError => Ctap2StatusCode::CTAP1_ERR_OTHER, } } } @@ -605,7 +606,7 @@ fn serialize_credential(credential: PublicKeyCredentialSource) -> Result } } -/// TODO: Help from reviewer needed for documentation. +/// Deserializes a list of RP IDs from storage representation. #[cfg(feature = "with_ctap2_1")] fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option> { let cbor = cbor::read(data).ok()?; @@ -617,7 +618,7 @@ fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option> { .ok() } -/// TODO: Help from reviewer needed for documentation. +/// Serializes a list of RP IDs to storage representation. #[cfg(feature = "with_ctap2_1")] fn _serialize_min_pin_length_rp_ids(rp_ids: Vec) -> Result, Ctap2StatusCode> { let mut data = Vec::new(); diff --git a/src/ctap/storage/key.rs b/src/ctap/storage/key.rs index 9796d5a..3e86d8b 100644 --- a/src/ctap/storage/key.rs +++ b/src/ctap/storage/key.rs @@ -82,10 +82,13 @@ make_partition! { /// board may configure `MAX_SUPPORTED_RESIDENTIAL_KEYS` depending on the storage size. CREDENTIALS = 1700..2000; - /// TODO: Help from reviewer needed for documentation. + /// List of RP IDs allowed to read the minimum PIN length. + #[cfg(feature = "with_ctap2_1")] _MIN_PIN_LENGTH_RP_IDS = 2042; /// The minimum PIN length. + /// + /// If the entry is absent, the minimum PIN length is `DEFAULT_MIN_PIN_LENGTH`. #[cfg(feature = "with_ctap2_1")] MIN_PIN_LENGTH = 2043; From b55d4320439fe1c59dd88d81a5a946738817e3f5 Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Tue, 1 Dec 2020 15:39:51 +0100 Subject: [PATCH 03/20] Apply review comments --- src/ctap/storage.rs | 5 +++-- src/ctap/storage/key.rs | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 054d3c8..0f1c73f 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -387,7 +387,8 @@ impl PersistentStore { Ok(self.store.insert(key::MIN_PIN_LENGTH, &[min_pin_length])?) } - /// Returns a list of RP IDs that used to check if reading the minimum PIN length is allowed. + /// Returns the list of RP IDs that are used to check if reading the minimum PIN length is + /// allowed. #[cfg(feature = "with_ctap2_1")] pub fn _min_pin_length_rp_ids(&self) -> Result, Ctap2StatusCode> { let rp_ids = self @@ -400,7 +401,7 @@ impl PersistentStore { Ok(rp_ids.unwrap_or(vec![])) } - /// Set a list of RP IDs that used to check if reading the minimum PIN length is allowed. + /// Sets the list of RP IDs that are used to check if reading the minimum PIN length is allowed. #[cfg(feature = "with_ctap2_1")] pub fn _set_min_pin_length_rp_ids( &mut self, diff --git a/src/ctap/storage/key.rs b/src/ctap/storage/key.rs index 3e86d8b..e37e470 100644 --- a/src/ctap/storage/key.rs +++ b/src/ctap/storage/key.rs @@ -104,8 +104,7 @@ make_partition! { /// The encryption and hmac keys. /// - /// This entry is always present. It is generated at startup if absent. This is not a persistent - /// key because its value should change after a CTAP reset. + /// This entry is always present. It is generated at startup if absent. MASTER_KEYS = 2046; /// The global signature counter. From 042108e3d9d753fb0d0820733021c5c493f9eb4c Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Tue, 1 Dec 2020 17:46:28 +0100 Subject: [PATCH 04/20] Reserve 700 additional keys for credential-related stuff --- src/ctap/storage/key.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/ctap/storage/key.rs b/src/ctap/storage/key.rs index e37e470..6dab699 100644 --- a/src/ctap/storage/key.rs +++ b/src/ctap/storage/key.rs @@ -76,6 +76,12 @@ make_partition! { // - When adding a (non-persistent) key below this message, make sure its value is bigger or // equal than NUM_PERSISTENT_KEYS. + /// Reserved for future credential-related objects. + /// + /// In particular, additional credentials could be added there by reducing the lower bound of + /// the credential range below as well as the upper bound of this range in a similar manner. + _RESERVED_CREDENTIALS = 1000..1700; + /// The credentials. /// /// Depending on `MAX_SUPPORTED_RESIDENTIAL_KEYS`, only a prefix of those keys is used. Each From 16c0196b1d3a97abd00e6b52615a78b7ac0ad11c Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Fri, 4 Dec 2020 14:42:16 +0100 Subject: [PATCH 05/20] Check global counter length --- src/ctap/storage.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 0f1c73f..a10f2eb 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -294,10 +294,11 @@ impl PersistentStore { /// Returns the global signature counter. pub fn global_signature_counter(&self) -> Result { - Ok(match self.store.find(key::GLOBAL_SIGNATURE_COUNTER)? { - None => 0, - Some(value) => u32::from_ne_bytes(*array_ref!(&value, 0, 4)), - }) + match self.store.find(key::GLOBAL_SIGNATURE_COUNTER)? { + None => Ok(0), + Some(value) if value.len() == 4 => Ok(u32::from_ne_bytes(*array_ref!(&value, 0, 4))), + Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE), + } } /// Increments the global signature counter. From 90def7dfd3c913d04fe107373b926d1d012655ce Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Tue, 8 Dec 2020 18:12:48 +0100 Subject: [PATCH 06/20] implicitly generate HMAC-secret --- src/ctap/ctap1.rs | 53 +++++---------- src/ctap/data_formats.rs | 20 +----- src/ctap/mod.rs | 128 ++++++++++-------------------------- src/ctap/pin_protocol_v1.rs | 30 ++------- src/ctap/storage.rs | 59 +++++++++++++++-- 5 files changed, 112 insertions(+), 178 deletions(-) diff --git a/src/ctap/ctap1.rs b/src/ctap/ctap1.rs index e434c36..47d2120 100644 --- a/src/ctap/ctap1.rs +++ b/src/ctap/ctap1.rs @@ -291,7 +291,7 @@ impl Ctap1Command { let sk = crypto::ecdsa::SecKey::gensk(ctap_state.rng); let pk = sk.genpk(); let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) + .encrypt_key_handle(sk, &application) .map_err(|_| Ctap1StatusCode::SW_COMMAND_ABORTED)?; if key_handle.len() > 0xFF { // This is just being defensive with unreachable code. @@ -386,7 +386,7 @@ impl Ctap1Command { #[cfg(test)] mod test { - use super::super::{key_material, CREDENTIAL_ID_BASE_SIZE, USE_SIGNATURE_COUNTER}; + use super::super::{key_material, CREDENTIAL_ID_SIZE, USE_SIGNATURE_COUNTER}; use super::*; use crypto::rng256::ThreadRng256; use crypto::Hash256; @@ -426,12 +426,12 @@ mod test { 0x00, 0x00, 0x00, - 65 + CREDENTIAL_ID_BASE_SIZE as u8, + 65 + CREDENTIAL_ID_SIZE as u8, ]; let challenge = [0x0C; 32]; message.extend(&challenge); message.extend(application); - message.push(CREDENTIAL_ID_BASE_SIZE as u8); + message.push(CREDENTIAL_ID_SIZE as u8); message.extend(key_handle); message } @@ -471,15 +471,12 @@ mod test { let response = Ctap1Command::process_command(&message, &mut ctap_state, START_CLOCK_VALUE).unwrap(); assert_eq!(response[0], Ctap1Command::LEGACY_BYTE); - assert_eq!(response[66], CREDENTIAL_ID_BASE_SIZE as u8); + assert_eq!(response[66], CREDENTIAL_ID_SIZE as u8); assert!(ctap_state - .decrypt_credential_source( - response[67..67 + CREDENTIAL_ID_BASE_SIZE].to_vec(), - &application - ) + .decrypt_credential_source(response[67..67 + CREDENTIAL_ID_SIZE].to_vec(), &application) .unwrap() .is_some()); - const CERT_START: usize = 67 + CREDENTIAL_ID_BASE_SIZE; + const CERT_START: usize = 67 + CREDENTIAL_ID_SIZE; assert_eq!( &response[CERT_START..CERT_START + fake_cert.len()], &fake_cert[..] @@ -528,9 +525,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let message = create_authenticate_message(&application, Ctap1Flags::CheckOnly, &key_handle); let response = Ctap1Command::process_command(&message, &mut ctap_state, START_CLOCK_VALUE); @@ -546,9 +541,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let application = [0x55; 32]; let message = create_authenticate_message(&application, Ctap1Flags::CheckOnly, &key_handle); @@ -565,9 +558,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let mut message = create_authenticate_message(&application, Ctap1Flags::CheckOnly, &key_handle); @@ -591,9 +582,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let mut message = create_authenticate_message(&application, Ctap1Flags::CheckOnly, &key_handle); message[0] = 0xEE; @@ -611,9 +600,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let mut message = create_authenticate_message(&application, Ctap1Flags::CheckOnly, &key_handle); message[1] = 0xEE; @@ -631,9 +618,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let mut message = create_authenticate_message(&application, Ctap1Flags::CheckOnly, &key_handle); message[2] = 0xEE; @@ -659,9 +644,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let message = create_authenticate_message(&application, Ctap1Flags::EnforceUpAndSign, &key_handle); @@ -688,9 +671,7 @@ mod test { let rp_id = "example.com"; let application = crypto::sha256::Sha256::hash(rp_id.as_bytes()); - let key_handle = ctap_state - .encrypt_key_handle(sk, &application, None) - .unwrap(); + let key_handle = ctap_state.encrypt_key_handle(sk, &application).unwrap(); let message = create_authenticate_message( &application, Ctap1Flags::DontEnforceUpAndSign, @@ -712,7 +693,7 @@ mod test { #[test] fn test_process_authenticate_bad_key_handle() { let application = [0x0A; 32]; - let key_handle = vec![0x00; CREDENTIAL_ID_BASE_SIZE]; + let key_handle = vec![0x00; CREDENTIAL_ID_SIZE]; let message = create_authenticate_message(&application, Ctap1Flags::EnforceUpAndSign, &key_handle); @@ -729,7 +710,7 @@ mod test { #[test] fn test_process_authenticate_without_up() { let application = [0x0A; 32]; - let key_handle = vec![0x00; CREDENTIAL_ID_BASE_SIZE]; + let key_handle = vec![0x00; CREDENTIAL_ID_SIZE]; let message = create_authenticate_message(&application, Ctap1Flags::EnforceUpAndSign, &key_handle); diff --git a/src/ctap/data_formats.rs b/src/ctap/data_formats.rs index e7419fd..a2b490d 100644 --- a/src/ctap/data_formats.rs +++ b/src/ctap/data_formats.rs @@ -498,7 +498,6 @@ pub struct PublicKeyCredentialSource { pub rp_id: String, pub user_handle: Vec, // not optional, but nullable pub user_display_name: Option, - pub cred_random: Option>, pub cred_protect_policy: Option, pub creation_order: u64, pub user_name: Option, @@ -513,14 +512,14 @@ enum PublicKeyCredentialSourceField { RpId = 2, UserHandle = 3, UserDisplayName = 4, - CredRandom = 5, CredProtectPolicy = 6, CreationOrder = 7, UserName = 8, UserIcon = 9, // When a field is removed, its tag should be reserved and not used for new fields. We document // those reserved tags below. - // Reserved tags: none. + // Reserved tags: + // - CredRandom = 5, } impl From for cbor::KeyType { @@ -539,7 +538,6 @@ impl From for cbor::Value { PublicKeyCredentialSourceField::RpId => Some(credential.rp_id), PublicKeyCredentialSourceField::UserHandle => Some(credential.user_handle), PublicKeyCredentialSourceField::UserDisplayName => credential.user_display_name, - PublicKeyCredentialSourceField::CredRandom => credential.cred_random, PublicKeyCredentialSourceField::CredProtectPolicy => credential.cred_protect_policy, PublicKeyCredentialSourceField::CreationOrder => credential.creation_order, PublicKeyCredentialSourceField::UserName => credential.user_name, @@ -559,7 +557,6 @@ impl TryFrom for PublicKeyCredentialSource { PublicKeyCredentialSourceField::RpId => rp_id, PublicKeyCredentialSourceField::UserHandle => user_handle, PublicKeyCredentialSourceField::UserDisplayName => user_display_name, - PublicKeyCredentialSourceField::CredRandom => cred_random, PublicKeyCredentialSourceField::CredProtectPolicy => cred_protect_policy, PublicKeyCredentialSourceField::CreationOrder => creation_order, PublicKeyCredentialSourceField::UserName => user_name, @@ -577,7 +574,6 @@ impl TryFrom for PublicKeyCredentialSource { let rp_id = extract_text_string(ok_or_missing(rp_id)?)?; let user_handle = extract_byte_string(ok_or_missing(user_handle)?)?; let user_display_name = user_display_name.map(extract_text_string).transpose()?; - let cred_random = cred_random.map(extract_byte_string).transpose()?; let cred_protect_policy = cred_protect_policy .map(CredentialProtectionPolicy::try_from) .transpose()?; @@ -601,7 +597,6 @@ impl TryFrom for PublicKeyCredentialSource { rp_id, user_handle, user_display_name, - cred_random, cred_protect_policy, creation_order, user_name, @@ -1373,7 +1368,6 @@ mod test { rp_id: "example.com".to_string(), user_handle: b"foo".to_vec(), user_display_name: None, - cred_random: None, cred_protect_policy: None, creation_order: 0, user_name: None, @@ -1395,16 +1389,6 @@ mod test { Ok(credential.clone()) ); - let credential = PublicKeyCredentialSource { - cred_random: Some(vec![0x00; 32]), - ..credential - }; - - assert_eq!( - PublicKeyCredentialSource::try_from(cbor::Value::from(credential.clone())), - Ok(credential.clone()) - ); - let credential = PublicKeyCredentialSource { cred_protect_policy: Some(CredentialProtectionPolicy::UserVerificationOptional), ..credential diff --git a/src/ctap/mod.rs b/src/ctap/mod.rs index ea42caf..67a0e27 100644 --- a/src/ctap/mod.rs +++ b/src/ctap/mod.rs @@ -86,10 +86,8 @@ pub const INITIAL_SIGNATURE_COUNTER: u32 = 1; // - 16 byte initialization vector for AES-256, // - 32 byte ECDSA private key for the credential, // - 32 byte relying party ID hashed with SHA256, -// - (optional) 32 byte for HMAC-secret, // - 32 byte HMAC-SHA256 over everything else. -pub const CREDENTIAL_ID_BASE_SIZE: usize = 112; -pub const CREDENTIAL_ID_MAX_SIZE: usize = CREDENTIAL_ID_BASE_SIZE + 32; +pub const CREDENTIAL_ID_SIZE: usize = 112; // Set this bit when checking user presence. const UP_FLAG: u8 = 0x01; // Set this bit when checking user verification. @@ -142,6 +140,7 @@ struct AssertionInput { client_data_hash: Vec, auth_data: Vec, hmac_secret_input: Option, + has_uv: bool, } struct AssertionState { @@ -231,7 +230,6 @@ where &mut self, private_key: crypto::ecdsa::SecKey, application: &[u8; 32], - cred_random: Option<&[u8; 32]>, ) -> Result, Ctap2StatusCode> { let master_keys = self.persistent_store.master_keys()?; let aes_enc_key = crypto::aes256::EncryptionKey::new(&master_keys.encryption); @@ -240,16 +238,12 @@ where let mut iv = [0; 16]; iv.copy_from_slice(&self.rng.gen_uniform_u8x32()[..16]); - let block_len = if cred_random.is_some() { 6 } else { 4 }; + let block_len = 4; let mut blocks = vec![[0u8; 16]; block_len]; blocks[0].copy_from_slice(&sk_bytes[..16]); blocks[1].copy_from_slice(&sk_bytes[16..]); blocks[2].copy_from_slice(&application[..16]); blocks[3].copy_from_slice(&application[16..]); - if let Some(cred_random) = cred_random { - blocks[4].copy_from_slice(&cred_random[..16]); - blocks[5].copy_from_slice(&cred_random[16..]); - } cbc_encrypt(&aes_enc_key, iv, &mut blocks); let mut encrypted_id = Vec::with_capacity(16 * (block_len + 3)); @@ -270,11 +264,9 @@ where credential_id: Vec, rp_id_hash: &[u8], ) -> Result, Ctap2StatusCode> { - let has_cred_random = match credential_id.len() { - CREDENTIAL_ID_BASE_SIZE => false, - CREDENTIAL_ID_MAX_SIZE => true, - _ => return Ok(None), - }; + if credential_id.len() != CREDENTIAL_ID_SIZE { + return Ok(None); + } let master_keys = self.persistent_store.master_keys()?; let payload_size = credential_id.len() - 32; if !verify_hmac_256::( @@ -288,7 +280,7 @@ where let aes_dec_key = crypto::aes256::DecryptionKey::new(&aes_enc_key); let mut iv = [0; 16]; iv.copy_from_slice(&credential_id[..16]); - let block_len = if has_cred_random { 6 } else { 4 }; + let block_len = 4; let mut blocks = vec![[0u8; 16]; block_len]; for i in 0..block_len { blocks[i].copy_from_slice(&credential_id[16 * (i + 1)..16 * (i + 2)]); @@ -301,15 +293,6 @@ where decrypted_sk[16..].clone_from_slice(&blocks[1]); decrypted_rp_id_hash[..16].clone_from_slice(&blocks[2]); decrypted_rp_id_hash[16..].clone_from_slice(&blocks[3]); - let cred_random = if has_cred_random { - let mut decrypted_cred_random = [0; 32]; - decrypted_cred_random[..16].clone_from_slice(&blocks[4]); - decrypted_cred_random[16..].clone_from_slice(&blocks[5]); - Some(decrypted_cred_random.to_vec()) - } else { - None - }; - if rp_id_hash != decrypted_rp_id_hash { return Ok(None); } @@ -322,7 +305,6 @@ where rp_id: String::from(""), user_handle: vec![], user_display_name: None, - cred_random, cred_protect_policy: None, creation_order: 0, user_name: None, @@ -464,11 +446,6 @@ where (false, DEFAULT_CRED_PROTECT) }; - let cred_random = if use_hmac_extension { - Some(self.rng.gen_uniform_u8x32()) - } else { - None - }; let has_extension_output = use_hmac_extension || cred_protect_policy.is_some(); let rp_id = rp.rp_id; @@ -543,7 +520,6 @@ where user_display_name: user .user_display_name .map(|s| truncate_to_char_boundary(&s, 64).to_string()), - cred_random: cred_random.map(|c| c.to_vec()), cred_protect_policy, creation_order: self.persistent_store.new_creation_order()?, user_name: user @@ -556,7 +532,7 @@ where self.persistent_store.store_credential(credential_source)?; random_id } else { - self.encrypt_key_handle(sk.clone(), &rp_id_hash, cred_random.as_ref())? + self.encrypt_key_handle(sk.clone(), &rp_id_hash)? }; let mut auth_data = self.generate_auth_data(&rp_id_hash, flags)?; @@ -622,10 +598,25 @@ where )) } + // Generates a different per-credential secret for each UV mode. + // The computation is deterministic, and private_key expected to be unique. + fn generate_cred_random( + &mut self, + private_key: &crypto::ecdsa::SecKey, + has_uv: bool, + ) -> Result<[u8; 32], Ctap2StatusCode> { + let mut private_key_bytes = [0u8; 32]; + private_key.to_bytes(&mut private_key_bytes); + let salt = crypto::sha256::Sha256::hash(&private_key_bytes); + // TODO(kaczmarczyck) KDF? hash salt together with rp_id_hash? + let key = self.persistent_store.cred_random_secret(has_uv)?; + Ok(hmac_256::(&key, &salt[..])) + } + // Processes the input of a get_assertion operation for a given credential // and returns the correct Get(Next)Assertion response. fn assertion_response( - &self, + &mut self, credential: PublicKeyCredentialSource, assertion_input: AssertionInput, number_of_credentials: Option, @@ -634,13 +625,15 @@ where client_data_hash, mut auth_data, hmac_secret_input, + has_uv, } = assertion_input; // Process extensions. if let Some(hmac_secret_input) = hmac_secret_input { + let cred_random = self.generate_cred_random(&credential.private_key, has_uv)?; let encrypted_output = self .pin_protocol_v1 - .process_hmac_secret(hmac_secret_input, &credential.cred_random)?; + .process_hmac_secret(hmac_secret_input, &cred_random)?; let extensions_output = cbor_map! { "hmac-secret" => encrypted_output, }; @@ -807,6 +800,7 @@ where client_data_hash, auth_data: self.generate_auth_data(&rp_id_hash, flags)?, hmac_secret_input, + has_uv, }; let number_of_credentials = if applicable_credentials.is_empty() { None @@ -872,7 +866,7 @@ where max_credential_count_in_list: MAX_CREDENTIAL_COUNT_IN_LIST.map(|c| c as u64), // #TODO(106) update with version 2.1 of HMAC-secret #[cfg(feature = "with_ctap2_1")] - max_credential_id_length: Some(CREDENTIAL_ID_BASE_SIZE as u64 + 32), + max_credential_id_length: Some(CREDENTIAL_ID_SIZE as u64), #[cfg(feature = "with_ctap2_1")] transports: Some(vec![AuthenticatorTransport::Usb]), #[cfg(feature = "with_ctap2_1")] @@ -1010,7 +1004,7 @@ mod test { #[cfg(feature = "with_ctap2_1")] expected_response.extend( [ - 0x08, 0x18, 0x90, 0x09, 0x81, 0x63, 0x75, 0x73, 0x62, 0x0A, 0x81, 0xA2, 0x63, 0x61, + 0x08, 0x18, 0x70, 0x09, 0x81, 0x63, 0x75, 0x73, 0x62, 0x0A, 0x81, 0xA2, 0x63, 0x61, 0x6C, 0x67, 0x26, 0x64, 0x74, 0x79, 0x70, 0x65, 0x6A, 0x70, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x2D, 0x6B, 0x65, 0x79, 0x0D, 0x04, ] @@ -1141,7 +1135,7 @@ mod test { ]; expected_auth_data.push(INITIAL_SIGNATURE_COUNTER as u8); expected_auth_data.extend(&ctap_state.persistent_store.aaguid().unwrap()); - expected_auth_data.extend(&[0x00, CREDENTIAL_ID_BASE_SIZE as u8]); + expected_auth_data.extend(&[0x00, CREDENTIAL_ID_SIZE as u8]); assert_eq!( auth_data[0..expected_auth_data.len()], expected_auth_data[..] @@ -1186,7 +1180,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![], user_display_name: None, - cred_random: None, cred_protect_policy: None, creation_order: 0, user_name: None, @@ -1291,7 +1284,7 @@ mod test { ]; expected_auth_data.push(INITIAL_SIGNATURE_COUNTER as u8); expected_auth_data.extend(&ctap_state.persistent_store.aaguid().unwrap()); - expected_auth_data.extend(&[0x00, CREDENTIAL_ID_MAX_SIZE as u8]); + expected_auth_data.extend(&[0x00, CREDENTIAL_ID_SIZE as u8]); assert_eq!( auth_data[0..expected_auth_data.len()], expected_auth_data[..] @@ -1488,8 +1481,8 @@ mod test { let auth_data = make_credential_response.auth_data; let offset = 37 + ctap_state.persistent_store.aaguid().unwrap().len(); assert_eq!(auth_data[offset], 0x00); - assert_eq!(auth_data[offset + 1] as usize, CREDENTIAL_ID_MAX_SIZE); - auth_data[offset + 2..offset + 2 + CREDENTIAL_ID_MAX_SIZE].to_vec() + assert_eq!(auth_data[offset + 1] as usize, CREDENTIAL_ID_SIZE); + auth_data[offset + 2..offset + 2 + CREDENTIAL_ID_SIZE].to_vec() } _ => panic!("Invalid response type"), }; @@ -1604,7 +1597,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![0x1D], user_display_name: None, - cred_random: None, cred_protect_policy: Some( CredentialProtectionPolicy::UserVerificationOptionalWithCredentialIdList, ), @@ -1669,7 +1661,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![0x1D], user_display_name: None, - cred_random: None, cred_protect_policy: Some(CredentialProtectionPolicy::UserVerificationRequired), creation_order: 0, user_name: None, @@ -1942,7 +1933,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![], user_display_name: None, - cred_random: None, cred_protect_policy: None, creation_order: 0, user_name: None, @@ -2013,7 +2003,7 @@ mod test { // We are not testing the correctness of our SHA256 here, only if it is checked. let rp_id_hash = [0x55; 32]; let encrypted_id = ctap_state - .encrypt_key_handle(private_key.clone(), &rp_id_hash, None) + .encrypt_key_handle(private_key.clone(), &rp_id_hash) .unwrap(); let decrypted_source = ctap_state .decrypt_credential_source(encrypted_id, &rp_id_hash) @@ -2023,29 +2013,6 @@ mod test { assert_eq!(private_key, decrypted_source.private_key); } - #[test] - fn test_encrypt_decrypt_credential_with_cred_random() { - let mut rng = ThreadRng256 {}; - let user_immediately_present = |_| Ok(()); - let private_key = crypto::ecdsa::SecKey::gensk(&mut rng); - let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE); - - // Usually, the relying party ID or its hash is provided by the client. - // We are not testing the correctness of our SHA256 here, only if it is checked. - let rp_id_hash = [0x55; 32]; - let cred_random = [0xC9; 32]; - let encrypted_id = ctap_state - .encrypt_key_handle(private_key.clone(), &rp_id_hash, Some(&cred_random)) - .unwrap(); - let decrypted_source = ctap_state - .decrypt_credential_source(encrypted_id, &rp_id_hash) - .unwrap() - .unwrap(); - - assert_eq!(private_key, decrypted_source.private_key); - assert_eq!(Some(cred_random.to_vec()), decrypted_source.cred_random); - } - #[test] fn test_encrypt_decrypt_bad_hmac() { let mut rng = ThreadRng256 {}; @@ -2056,30 +2023,7 @@ mod test { // Same as above. let rp_id_hash = [0x55; 32]; let encrypted_id = ctap_state - .encrypt_key_handle(private_key, &rp_id_hash, None) - .unwrap(); - for i in 0..encrypted_id.len() { - let mut modified_id = encrypted_id.clone(); - modified_id[i] ^= 0x01; - assert!(ctap_state - .decrypt_credential_source(modified_id, &rp_id_hash) - .unwrap() - .is_none()); - } - } - - #[test] - fn test_encrypt_decrypt_bad_hmac_with_cred_random() { - let mut rng = ThreadRng256 {}; - let user_immediately_present = |_| Ok(()); - let private_key = crypto::ecdsa::SecKey::gensk(&mut rng); - let mut ctap_state = CtapState::new(&mut rng, user_immediately_present, DUMMY_CLOCK_VALUE); - - // Same as above. - let rp_id_hash = [0x55; 32]; - let cred_random = [0xC9; 32]; - let encrypted_id = ctap_state - .encrypt_key_handle(private_key, &rp_id_hash, Some(&cred_random)) + .encrypt_key_handle(private_key, &rp_id_hash) .unwrap(); for i in 0..encrypted_id.len() { let mut modified_id = encrypted_id.clone(); diff --git a/src/ctap/pin_protocol_v1.rs b/src/ctap/pin_protocol_v1.rs index 44aad71..410dac7 100644 --- a/src/ctap/pin_protocol_v1.rs +++ b/src/ctap/pin_protocol_v1.rs @@ -56,23 +56,16 @@ fn verify_pin_auth(hmac_key: &[u8], hmac_contents: &[u8], pin_auth: &[u8]) -> bo fn encrypt_hmac_secret_output( shared_secret: &[u8; 32], salt_enc: &[u8], - cred_random: &[u8], + cred_random: &[u8; 32], ) -> Result, Ctap2StatusCode> { if salt_enc.len() != 32 && salt_enc.len() != 64 { return Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_EXTENSION); } - if cred_random.len() != 32 { - // We are strict here. We need at least 32 byte, but expect exactly 32. - return Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_EXTENSION); - } let aes_enc_key = crypto::aes256::EncryptionKey::new(shared_secret); let aes_dec_key = crypto::aes256::DecryptionKey::new(&aes_enc_key); // The specification specifically asks for a zero IV. let iv = [0u8; 16]; - let mut cred_random_secret = [0u8; 32]; - cred_random_secret.copy_from_slice(cred_random); - // With the if clause restriction above, block_len can only be 2 or 4. let block_len = salt_enc.len() / 16; let mut blocks = vec![[0u8; 16]; block_len]; @@ -84,7 +77,7 @@ fn encrypt_hmac_secret_output( let mut decrypted_salt1 = [0u8; 32]; decrypted_salt1[..16].copy_from_slice(&blocks[0]); decrypted_salt1[16..].copy_from_slice(&blocks[1]); - let output1 = hmac_256::(&cred_random_secret, &decrypted_salt1[..]); + let output1 = hmac_256::(&cred_random[..], &decrypted_salt1[..]); for i in 0..2 { blocks[i].copy_from_slice(&output1[16 * i..16 * (i + 1)]); } @@ -93,7 +86,7 @@ fn encrypt_hmac_secret_output( let mut decrypted_salt2 = [0u8; 32]; decrypted_salt2[..16].copy_from_slice(&blocks[2]); decrypted_salt2[16..].copy_from_slice(&blocks[3]); - let output2 = hmac_256::(&cred_random_secret, &decrypted_salt2[..]); + let output2 = hmac_256::(&cred_random[..], &decrypted_salt2[..]); for i in 0..2 { blocks[i + 2].copy_from_slice(&output2[16 * i..16 * (i + 1)]); } @@ -588,7 +581,7 @@ impl PinProtocolV1 { pub fn process_hmac_secret( &self, hmac_secret_input: GetAssertionHmacSecretInput, - cred_random: &Option>, + cred_random: &[u8; 32], ) -> Result, Ctap2StatusCode> { let GetAssertionHmacSecretInput { key_agreement, @@ -602,12 +595,7 @@ impl PinProtocolV1 { // Hard to tell what the correct error code here is. return Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_EXTENSION); } - - match cred_random { - Some(cr) => encrypt_hmac_secret_output(&shared_secret, &salt_enc[..], cr), - // This is the case if the credential was not created with HMAC-secret. - None => Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_EXTENSION), - } + encrypt_hmac_secret_output(&shared_secret, &salt_enc[..], cred_random) } #[cfg(feature = "with_ctap2_1")] @@ -1195,14 +1183,6 @@ mod test { let output = encrypt_hmac_secret_output(&shared_secret, &salt_enc, &cred_random); assert_eq!(output.unwrap().len(), 64); - let salt_enc = [0x5E; 32]; - let cred_random = [0xC9; 33]; - let output = encrypt_hmac_secret_output(&shared_secret, &salt_enc, &cred_random); - assert_eq!( - output, - Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_EXTENSION) - ); - let mut salt_enc = [0x00; 32]; let cred_random = [0xC9; 32]; diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 911bdd7..ae38219 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -72,9 +72,10 @@ const AAGUID: usize = 7; const MIN_PIN_LENGTH: usize = 8; #[cfg(feature = "with_ctap2_1")] const MIN_PIN_LENGTH_RP_IDS: usize = 9; +const CRED_RANDOM_SECRET: usize = 10; // Different NUM_TAGS depending on the CTAP version make the storage incompatible, // so we use the maximum. -const NUM_TAGS: usize = 10; +const NUM_TAGS: usize = 11; const MAX_PIN_RETRIES: u8 = 8; #[cfg(feature = "with_ctap2_1")] @@ -108,6 +109,7 @@ enum Key { MinPinLength, #[cfg(feature = "with_ctap2_1")] MinPinLengthRpIds, + CredRandomSecret, } pub struct MasterKeys { @@ -166,6 +168,7 @@ impl StoreConfig for Config { MIN_PIN_LENGTH => add(Key::MinPinLength), #[cfg(feature = "with_ctap2_1")] MIN_PIN_LENGTH_RP_IDS => add(Key::MinPinLengthRpIds), + CRED_RANDOM_SECRET => add(Key::CredRandomSecret), _ => debug_assert!(false), } } @@ -230,6 +233,22 @@ impl PersistentStore { }) .unwrap(); } + + if self.store.find_one(&Key::CredRandomSecret).is_none() { + let cred_random_with_uv = rng.gen_uniform_u8x32(); + let cred_random_without_uv = rng.gen_uniform_u8x32(); + let mut cred_random = Vec::with_capacity(64); + cred_random.extend_from_slice(&cred_random_without_uv); + cred_random.extend_from_slice(&cred_random_with_uv); + self.store + .insert(StoreEntry { + tag: CRED_RANDOM_SECRET, + data: &cred_random, + sensitive: true, + }) + .unwrap(); + } + // TODO(jmichel): remove this when vendor command is in place #[cfg(not(test))] self.load_attestation_data_from_firmware(); @@ -411,6 +430,15 @@ impl PersistentStore { }) } + pub fn cred_random_secret(&self, has_uv: bool) -> Result<[u8; 32], Ctap2StatusCode> { + let (_, entry) = self.store.find_one(&Key::CredRandomSecret).unwrap(); + if entry.data.len() != 64 { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + } + let offset = if has_uv { 32 } else { 0 }; + Ok(*array_ref![entry.data, offset, 32]) + } + pub fn pin_hash(&self) -> Result, Ctap2StatusCode> { let data = match self.store.find_one(&Key::PinHash) { None => return Ok(None), @@ -721,7 +749,6 @@ mod test { rp_id: String::from(rp_id), user_handle, user_display_name: None, - cred_random: None, cred_protect_policy: None, creation_order: 0, user_name: None, @@ -900,7 +927,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![0x00], user_display_name: None, - cred_random: None, cred_protect_policy: Some( CredentialProtectionPolicy::UserVerificationOptionalWithCredentialIdList, ), @@ -946,7 +972,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![0x00], user_display_name: None, - cred_random: None, cred_protect_policy: None, creation_order: 0, user_name: None, @@ -968,7 +993,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![0x00], user_display_name: None, - cred_random: None, cred_protect_policy: Some(CredentialProtectionPolicy::UserVerificationRequired), creation_order: 0, user_name: None, @@ -987,7 +1011,7 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - // Master keys stay the same between resets. + // Master keys stay the same between calls. let master_keys_1 = persistent_store.master_keys().unwrap(); let master_keys_2 = persistent_store.master_keys().unwrap(); assert_eq!(master_keys_2.encryption, master_keys_1.encryption); @@ -1003,6 +1027,28 @@ mod test { assert!(master_keys_3.hmac != master_hmac_key.as_slice()); } + #[test] + fn test_cred_random_secret() { + let mut rng = ThreadRng256 {}; + let mut persistent_store = PersistentStore::new(&mut rng); + + // Master keys stay the same between calls. + let cred_random_with_uv_1 = persistent_store.cred_random_secret(true).unwrap(); + let cred_random_without_uv_1 = persistent_store.cred_random_secret(false).unwrap(); + let cred_random_with_uv_2 = persistent_store.cred_random_secret(true).unwrap(); + let cred_random_without_uv_2 = persistent_store.cred_random_secret(false).unwrap(); + assert_eq!(cred_random_with_uv_1, cred_random_with_uv_2); + assert_eq!(cred_random_without_uv_1, cred_random_without_uv_2); + + // Master keys change after reset. This test may fail if the random generator produces the + // same keys. + persistent_store.reset(&mut rng).unwrap(); + let cred_random_with_uv_3 = persistent_store.cred_random_secret(true).unwrap(); + let cred_random_without_uv_3 = persistent_store.cred_random_secret(false).unwrap(); + assert!(cred_random_with_uv_1 != cred_random_with_uv_3); + assert!(cred_random_without_uv_1 != cred_random_without_uv_3); + } + #[test] fn test_pin_hash() { let mut rng = ThreadRng256 {}; @@ -1170,7 +1216,6 @@ mod test { rp_id: String::from("example.com"), user_handle: vec![0x00], user_display_name: None, - cred_random: None, cred_protect_policy: None, creation_order: 0, user_name: None, From fcbaf1e973f396eb2911bb9f53f37ff650f1b0ce Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Tue, 8 Dec 2020 19:31:56 +0100 Subject: [PATCH 07/20] fixes comments --- src/ctap/storage.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index ae38219..7be33f1 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -1011,7 +1011,7 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - // Master keys stay the same between calls. + // Master keys stay the same within the same CTAP reset cycle. let master_keys_1 = persistent_store.master_keys().unwrap(); let master_keys_2 = persistent_store.master_keys().unwrap(); assert_eq!(master_keys_2.encryption, master_keys_1.encryption); @@ -1032,7 +1032,7 @@ mod test { let mut rng = ThreadRng256 {}; let mut persistent_store = PersistentStore::new(&mut rng); - // Master keys stay the same between calls. + // CredRandom secrets stay the same within the same CTAP reset cycle. let cred_random_with_uv_1 = persistent_store.cred_random_secret(true).unwrap(); let cred_random_without_uv_1 = persistent_store.cred_random_secret(false).unwrap(); let cred_random_with_uv_2 = persistent_store.cred_random_secret(true).unwrap(); @@ -1040,7 +1040,7 @@ mod test { assert_eq!(cred_random_with_uv_1, cred_random_with_uv_2); assert_eq!(cred_random_without_uv_1, cred_random_without_uv_2); - // Master keys change after reset. This test may fail if the random generator produces the + // CredRandom secrets change after reset. This test may fail if the random generator produces the // same keys. persistent_store.reset(&mut rng).unwrap(); let cred_random_with_uv_3 = persistent_store.cred_random_secret(true).unwrap(); From 8965c6c8fb2d3879dcb84ec9efdd2af8899947c0 Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Tue, 8 Dec 2020 20:45:27 +0100 Subject: [PATCH 08/20] Rename and use HARDWARE_FAILURE error --- src/ctap/status_code.rs | 13 +++---------- src/ctap/storage.rs | 28 +++++++++++++--------------- 2 files changed, 16 insertions(+), 25 deletions(-) diff --git a/src/ctap/status_code.rs b/src/ctap/status_code.rs index 638caef..097d7ec 100644 --- a/src/ctap/status_code.rs +++ b/src/ctap/status_code.rs @@ -81,17 +81,10 @@ pub enum Ctap2StatusCode { /// This type of error is unexpected and the current state is undefined. CTAP2_ERR_VENDOR_INTERNAL_ERROR = 0xF2, - /// The persistent storage invariant is broken. + /// The hardware is malfunctioning. /// - /// There can be multiple reasons: - /// - The persistent storage has not been erased before its first usage. - /// - The persistent storage has been tempered with by a third party. - /// - The flash is malfunctioning (including the Tock driver). - /// - /// In the first 2 cases the persistent storage should be completely erased. If the error - /// reproduces, it may indicate a software bug or a hardware deficiency. In both cases, the - /// error should be reported. - CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE = 0xF3, + /// It may be possible that some of those errors are actually internal errors. + CTAP2_ERR_VENDOR_HARDWARE_FAILURE = 0xF3, CTAP2_ERR_VENDOR_LAST = 0xFF, } diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 40ec89a..0660b6c 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -216,7 +216,7 @@ impl PersistentStore { && credential.user_handle == new_credential.user_handle { if old_key.is_some() { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } old_key = Some(key); } @@ -231,7 +231,7 @@ impl PersistentStore { None => key::CREDENTIALS .take(MAX_SUPPORTED_RESIDENTIAL_KEYS) .find(|key| !keys.contains(key)) - .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?, + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?, // This is an existing credential being updated, we reuse its key. Some(x) => x, }; @@ -298,7 +298,7 @@ impl PersistentStore { match self.store.find(key::GLOBAL_SIGNATURE_COUNTER)? { None => Ok(INITIAL_SIGNATURE_COUNTER), Some(value) if value.len() == 4 => Ok(u32::from_ne_bytes(*array_ref!(&value, 0, 4))), - Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE), + Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), } } @@ -317,9 +317,9 @@ impl PersistentStore { let master_keys = self .store .find(key::MASTER_KEYS)? - .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?; + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?; if master_keys.len() != 64 { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } Ok(MasterKeys { encryption: *array_ref![master_keys, 0, 32], @@ -334,7 +334,7 @@ impl PersistentStore { Some(pin_hash) => pin_hash, }; if pin_hash.len() != PIN_AUTH_LENGTH { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } Ok(Some(*array_ref![pin_hash, 0, PIN_AUTH_LENGTH])) } @@ -354,7 +354,7 @@ impl PersistentStore { match self.store.find(key::PIN_RETRIES)? { None => Ok(MAX_PIN_RETRIES), Some(value) if value.len() == 1 => Ok(value[0]), - _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE), + _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), } } @@ -379,7 +379,7 @@ impl PersistentStore { match self.store.find(key::MIN_PIN_LENGTH)? { None => Ok(DEFAULT_MIN_PIN_LENGTH), Some(value) if value.len() == 1 => Ok(value[0]), - _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE), + _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), } } @@ -437,7 +437,7 @@ impl PersistentStore { key_material::ATTESTATION_PRIVATE_KEY_LENGTH ])) } - Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE), + Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), } } @@ -481,9 +481,9 @@ impl PersistentStore { let aaguid = self .store .find(key::AAGUID)? - .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?; + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?; if aaguid.len() != key_material::AAGUID_LENGTH { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } Ok(*array_ref![aaguid, 0, key_material::AAGUID_LENGTH]) } @@ -521,9 +521,7 @@ impl From for Ctap2StatusCode { StoreError::InvalidArgument => Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR, // This error is not expected. The storage has been tempered with. We could erase the // storage. - StoreError::InvalidStorage => { - Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE - } + StoreError::InvalidStorage => Ctap2StatusCode::CTAP2_ERR_VENDOR_HARDWARE_FAILURE, // This error is not expected. The kernel is failing our syscalls. StoreError::StorageError => Ctap2StatusCode::CTAP1_ERR_OTHER, } @@ -566,7 +564,7 @@ impl<'a> IterCredentials<'a> { /// instead of statements only. fn unwrap(&mut self, x: Option) -> Option { if x.is_none() { - *self.result = Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE); + *self.result = Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } x } From 776093a68b54bf82c249c54777184f5e2990ed35 Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Wed, 9 Dec 2020 10:52:51 +0100 Subject: [PATCH 09/20] Find the next free key in a linear way --- src/ctap/storage.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 0660b6c..f48b70a 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -23,7 +23,6 @@ use crate::ctap::status_code::Ctap2StatusCode; use crate::ctap::INITIAL_SIGNATURE_COUNTER; #[cfg(feature = "with_ctap2_1")] use alloc::string::String; -#[cfg(any(test, feature = "ram_storage", feature = "with_ctap2_1"))] use alloc::vec; use alloc::vec::Vec; use arrayref::array_ref; @@ -206,12 +205,19 @@ impl PersistentStore { ) -> Result<(), Ctap2StatusCode> { // Holds the key of the existing credential if this is an update. let mut old_key = None; - // Holds the unordered list of used keys. - let mut keys = Vec::new(); + let min_key = key::CREDENTIALS.start; + // Holds whether a key is used (indices are shifted by min_key). + let mut keys = vec![false; MAX_SUPPORTED_RESIDENTIAL_KEYS]; let mut iter_result = Ok(()); let iter = self.iter_credentials(&mut iter_result)?; for (key, credential) in iter { - keys.push(key); + if key < min_key + || key - min_key >= MAX_SUPPORTED_RESIDENTIAL_KEYS + || keys[key - min_key] + { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + } + keys[key - min_key] = true; if credential.rp_id == new_credential.rp_id && credential.user_handle == new_credential.user_handle { @@ -222,15 +228,17 @@ impl PersistentStore { } } iter_result?; - if old_key.is_none() && keys.len() >= MAX_SUPPORTED_RESIDENTIAL_KEYS { + if old_key.is_none() + && keys.iter().filter(|&&x| x).count() >= MAX_SUPPORTED_RESIDENTIAL_KEYS + { return Err(Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL); } let key = match old_key { // This is a new credential being added, we need to allocate a free key. We choose the - // first available key. This is quadratic in the number of existing keys. + // first available key. None => key::CREDENTIALS .take(MAX_SUPPORTED_RESIDENTIAL_KEYS) - .find(|key| !keys.contains(key)) + .find(|key| !keys[key - min_key]) .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?, // This is an existing credential being updated, we reuse its key. Some(x) => x, From 62dd088cd0ba55e0299dffe47fb20521c8874dee Mon Sep 17 00:00:00 2001 From: Jean-Michel Picod Date: Wed, 9 Dec 2020 18:55:08 +0100 Subject: [PATCH 10/20] Add missing license header. --- src/ctap/apdu.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/ctap/apdu.rs b/src/ctap/apdu.rs index 949151e..bb94a91 100644 --- a/src/ctap/apdu.rs +++ b/src/ctap/apdu.rs @@ -1,3 +1,17 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use alloc::vec::Vec; use byteorder::{BigEndian, ByteOrder}; use core::convert::TryFrom; From 863bf521deabf4fe02ac2ac665a907181c942c87 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Wed, 9 Dec 2020 19:05:03 +0100 Subject: [PATCH 11/20] removes extra sha256 --- src/ctap/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/ctap/mod.rs b/src/ctap/mod.rs index 67a0e27..55494a0 100644 --- a/src/ctap/mod.rs +++ b/src/ctap/mod.rs @@ -607,10 +607,8 @@ where ) -> Result<[u8; 32], Ctap2StatusCode> { let mut private_key_bytes = [0u8; 32]; private_key.to_bytes(&mut private_key_bytes); - let salt = crypto::sha256::Sha256::hash(&private_key_bytes); - // TODO(kaczmarczyck) KDF? hash salt together with rp_id_hash? let key = self.persistent_store.cred_random_secret(has_uv)?; - Ok(hmac_256::(&key, &salt[..])) + Ok(hmac_256::(&key, &private_key_bytes[..])) } // Processes the input of a get_assertion operation for a given credential From d942f0173f5cb88d395d96ca9d83a65835bd4916 Mon Sep 17 00:00:00 2001 From: Fabian Kaczmarczyck Date: Wed, 9 Dec 2020 20:09:49 +0100 Subject: [PATCH 12/20] reverts block_len to a fixed number --- src/ctap/mod.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/ctap/mod.rs b/src/ctap/mod.rs index 55494a0..a00fecf 100644 --- a/src/ctap/mod.rs +++ b/src/ctap/mod.rs @@ -238,15 +238,14 @@ where let mut iv = [0; 16]; iv.copy_from_slice(&self.rng.gen_uniform_u8x32()[..16]); - let block_len = 4; - let mut blocks = vec![[0u8; 16]; block_len]; + let mut blocks = [[0u8; 16]; 4]; blocks[0].copy_from_slice(&sk_bytes[..16]); blocks[1].copy_from_slice(&sk_bytes[16..]); blocks[2].copy_from_slice(&application[..16]); blocks[3].copy_from_slice(&application[16..]); cbc_encrypt(&aes_enc_key, iv, &mut blocks); - let mut encrypted_id = Vec::with_capacity(16 * (block_len + 3)); + let mut encrypted_id = Vec::with_capacity(0x70); encrypted_id.extend(&iv); for b in &blocks { encrypted_id.extend(b); @@ -280,9 +279,8 @@ where let aes_dec_key = crypto::aes256::DecryptionKey::new(&aes_enc_key); let mut iv = [0; 16]; iv.copy_from_slice(&credential_id[..16]); - let block_len = 4; - let mut blocks = vec![[0u8; 16]; block_len]; - for i in 0..block_len { + let mut blocks = [[0u8; 16]; 4]; + for i in 0..4 { blocks[i].copy_from_slice(&credential_id[16 * (i + 1)..16 * (i + 2)]); } @@ -608,7 +606,7 @@ where let mut private_key_bytes = [0u8; 32]; private_key.to_bytes(&mut private_key_bytes); let key = self.persistent_store.cred_random_secret(has_uv)?; - Ok(hmac_256::(&key, &private_key_bytes[..])) + Ok(hmac_256::(&key, &private_key_bytes)) } // Processes the input of a get_assertion operation for a given credential From c2de3e7ed9da29e43a9c6fee8dd085bbc4b3c52c Mon Sep 17 00:00:00 2001 From: Jean-Michel Picod Date: Thu, 10 Dec 2020 10:02:48 +0100 Subject: [PATCH 13/20] Use a vscode workspace instead of local settings. --- .vscode/extensions.json | 7 ----- .vscode/settings.json | 27 ------------------- OpenSK.code-workspace | 57 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 34 deletions(-) delete mode 100644 .vscode/extensions.json delete mode 100644 .vscode/settings.json create mode 100644 OpenSK.code-workspace diff --git a/.vscode/extensions.json b/.vscode/extensions.json deleted file mode 100644 index cce0ec5..0000000 --- a/.vscode/extensions.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "recommendations": [ - "davidanson.vscode-markdownlint", - "rust-lang.rust", - "ms-python.python" - ] -} diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 138097a..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "clang-format.fallbackStyle": "google", - "editor.detectIndentation": true, - "editor.formatOnPaste": false, - "editor.formatOnSave": true, - "editor.formatOnType": true, - "editor.insertSpaces": true, - "editor.tabSize": 4, - "files.insertFinalNewline": true, - "files.trimTrailingWhitespace": true, - "rust-client.channel": "nightly", - // The toolchain is updated from time to time so let's make sure that RLS is updated too - "rust-client.updateOnStartup": true, - "rust.clippy_preference": "on", - // Try to make VSCode formating as close as possible to the Google style. - "python.formatting.provider": "yapf", - "python.formatting.yapfArgs": [ - "--style=yapf" - ], - "python.linting.enabled": true, - "python.linting.lintOnSave": true, - "python.linting.pylintEnabled": true, - "python.linting.pylintPath": "pylint", - "[python]": { - "editor.tabSize": 2 - }, -} diff --git a/OpenSK.code-workspace b/OpenSK.code-workspace new file mode 100644 index 0000000..c367c89 --- /dev/null +++ b/OpenSK.code-workspace @@ -0,0 +1,57 @@ +{ + "folders": [ + { + "name": "OpenSK", + "path": "." + }, + { + "name": "tock", + "path": "third_party/tock" + }, + { + "name": "libtock-rs", + "path": "third_party/libtock-rs" + }, + { + "name": "libtock-drivers", + "path": "third_party/libtock-drivers" + } + ], + "settings": { + "clang-format.fallbackStyle": "google", + "editor.detectIndentation": true, + "editor.formatOnPaste": false, + "editor.formatOnSave": true, + "editor.formatOnType": true, + "editor.insertSpaces": true, + "editor.tabSize": 4, + "files.insertFinalNewline": true, + "files.trimTrailingWhitespace": true, + // Ensure we use the toolchain we set in rust-toolchain file + "rust-client.channel": "default", + // The toolchain is updated from time to time so let's make sure that RLS is updated too + "rust-client.updateOnStartup": true, + "rust.clippy_preference": "on", + "rust.target": "thumbv7em-none-eabi", + "rust.all_targets": false, + // Try to make VSCode formating as close as possible to the Google style. + "python.formatting.provider": "yapf", + "python.formatting.yapfArgs": [ + "--style=yapf" + ], + "python.linting.enabled": true, + "python.linting.lintOnSave": true, + "python.linting.pylintEnabled": true, + "python.linting.pylintPath": "pylint", + "[python]": { + "editor.tabSize": 2 + }, + }, + "extensions": { + "recommendations": [ + "davidanson.vscode-markdownlint", + "rust-lang.rust", + "ms-python.python" + ] + } +} From 4253854cf145722839fe9c53331ae8d418e7a171 Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Thu, 10 Dec 2020 13:06:05 +0100 Subject: [PATCH 14/20] Remove ram_storage feature We don't need to build a production key without persistent storage. Tests and fuzzing continue to use the std feature to use the RAM implementation (that does sanity checks). --- .github/workflows/cargo_check.yml | 6 ------ Cargo.toml | 1 - deploy.py | 8 -------- fuzz/fuzz_helper/Cargo.toml | 2 +- libraries/persistent_store/src/lib.rs | 2 ++ run_desktop_tests.sh | 1 - src/ctap/storage.rs | 18 ++++++------------ src/embedded_flash/mod.rs | 4 ++-- 8 files changed, 11 insertions(+), 31 deletions(-) diff --git a/.github/workflows/cargo_check.yml b/.github/workflows/cargo_check.yml index 6676e16..fd39614 100644 --- a/.github/workflows/cargo_check.yml +++ b/.github/workflows/cargo_check.yml @@ -66,12 +66,6 @@ jobs: command: check args: --target thumbv7em-none-eabi --release --features debug_allocations - - name: Check OpenSK ram_storage - uses: actions-rs/cargo@v1 - with: - command: check - args: --target thumbv7em-none-eabi --release --features ram_storage - - name: Check OpenSK verbose uses: actions-rs/cargo@v1 with: diff --git a/Cargo.toml b/Cargo.toml index 5b01795..ed293cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,6 @@ debug_allocations = ["lang_items/debug_allocations"] debug_ctap = ["crypto/derive_debug", "libtock_drivers/debug_ctap"] panic_console = ["lang_items/panic_console"] std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std", "persistent_store/std"] -ram_storage = [] verbose = ["debug_ctap", "libtock_drivers/verbose_usb"] with_ctap1 = ["crypto/with_ctap1"] with_ctap2_1 = [] diff --git a/deploy.py b/deploy.py index e1ec38f..b28e645 100755 --- a/deploy.py +++ b/deploy.py @@ -863,14 +863,6 @@ if __name__ == "__main__": "This is useful to allow flashing multiple OpenSK authenticators " "in a row without them being considered clones."), ) - main_parser.add_argument( - "--no-persistent-storage", - action="append_const", - const="ram_storage", - dest="features", - help=("Compiles and installs the OpenSK application without persistent " - "storage (i.e. unplugging the key will reset the key)."), - ) main_parser.add_argument( "--elf2tab-output", diff --git a/fuzz/fuzz_helper/Cargo.toml b/fuzz/fuzz_helper/Cargo.toml index 3b70f43..2f2a5c1 100644 --- a/fuzz/fuzz_helper/Cargo.toml +++ b/fuzz/fuzz_helper/Cargo.toml @@ -10,5 +10,5 @@ arrayref = "0.3.6" libtock_drivers = { path = "../../third_party/libtock-drivers" } crypto = { path = "../../libraries/crypto", features = ['std'] } cbor = { path = "../../libraries/cbor", features = ['std'] } -ctap2 = { path = "../..", features = ['std', 'ram_storage'] } +ctap2 = { path = "../..", features = ['std'] } lang_items = { path = "../../third_party/lang-items", features = ['std'] } diff --git a/libraries/persistent_store/src/lib.rs b/libraries/persistent_store/src/lib.rs index 9b87bd4..c8be44b 100644 --- a/libraries/persistent_store/src/lib.rs +++ b/libraries/persistent_store/src/lib.rs @@ -348,6 +348,7 @@ #[macro_use] extern crate alloc; +#[cfg(feature = "std")] mod buffer; #[cfg(feature = "std")] mod driver; @@ -357,6 +358,7 @@ mod model; mod storage; mod store; +#[cfg(feature = "std")] pub use self::buffer::{BufferCorruptFunction, BufferOptions, BufferStorage}; #[cfg(feature = "std")] pub use self::driver::{ diff --git a/run_desktop_tests.sh b/run_desktop_tests.sh index 7f3b8f3..2e80b3d 100755 --- a/run_desktop_tests.sh +++ b/run_desktop_tests.sh @@ -48,7 +48,6 @@ cargo check --release --target=thumbv7em-none-eabi --features with_ctap2_1 cargo check --release --target=thumbv7em-none-eabi --features debug_ctap cargo check --release --target=thumbv7em-none-eabi --features panic_console cargo check --release --target=thumbv7em-none-eabi --features debug_allocations -cargo check --release --target=thumbv7em-none-eabi --features ram_storage cargo check --release --target=thumbv7em-none-eabi --features verbose cargo check --release --target=thumbv7em-none-eabi --features debug_ctap,with_ctap1 cargo check --release --target=thumbv7em-none-eabi --features debug_ctap,with_ctap1,panic_console,debug_allocations,verbose diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 87c4c6c..5e42ec7 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -31,9 +31,9 @@ use cbor::cbor_array_vec; use core::convert::TryInto; use crypto::rng256::Rng256; -#[cfg(any(test, feature = "ram_storage"))] +#[cfg(feature = "std")] type Storage = persistent_store::BufferStorage; -#[cfg(not(any(test, feature = "ram_storage")))] +#[cfg(not(feature = "std"))] type Storage = crate::embedded_flash::SyscallStorage; // Those constants may be modified before compilation to tune the behavior of the key. @@ -54,9 +54,6 @@ type Storage = crate::embedded_flash::SyscallStorage; // We have: I = (P * 4084 - 5107 - K * S) / 8 * C // // With P=20 and K=150, we have I=2M which is enough for 500 increments per day for 10 years. -#[cfg(feature = "ram_storage")] -const NUM_PAGES: usize = 3; -#[cfg(not(feature = "ram_storage"))] const NUM_PAGES: usize = 20; const MAX_SUPPORTED_RESIDENTIAL_KEYS: usize = 150; @@ -92,9 +89,9 @@ impl PersistentStore { /// /// This should be at most one instance of persistent store per program lifetime. pub fn new(rng: &mut impl Rng256) -> PersistentStore { - #[cfg(not(any(test, feature = "ram_storage")))] + #[cfg(not(feature = "std"))] let storage = PersistentStore::new_prod_storage(); - #[cfg(any(test, feature = "ram_storage"))] + #[cfg(feature = "std")] let storage = PersistentStore::new_test_storage(); let mut store = PersistentStore { store: persistent_store::Store::new(storage).ok().unwrap(), @@ -104,17 +101,14 @@ impl PersistentStore { } /// Creates a syscall storage in flash. - #[cfg(not(any(test, feature = "ram_storage")))] + #[cfg(not(feature = "std"))] fn new_prod_storage() -> Storage { Storage::new(NUM_PAGES).unwrap() } /// Creates a buffer storage in RAM. - #[cfg(any(test, feature = "ram_storage"))] + #[cfg(feature = "std")] fn new_test_storage() -> Storage { - #[cfg(not(test))] - const PAGE_SIZE: usize = 0x100; - #[cfg(test)] const PAGE_SIZE: usize = 0x1000; let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); let options = persistent_store::BufferOptions { diff --git a/src/embedded_flash/mod.rs b/src/embedded_flash/mod.rs index b16504b..862b37e 100644 --- a/src/embedded_flash/mod.rs +++ b/src/embedded_flash/mod.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(not(any(test, feature = "ram_storage")))] +#[cfg(not(feature = "std"))] mod syscall; -#[cfg(not(any(test, feature = "ram_storage")))] +#[cfg(not(feature = "std"))] pub use self::syscall::SyscallStorage; From ae08221cdb887ed4ffc831b833c17c97201743eb Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Thu, 10 Dec 2020 13:31:25 +0100 Subject: [PATCH 15/20] Add latency example --- deploy.py | 6 ++ examples/store_latency.rs | 126 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+) create mode 100644 examples/store_latency.rs diff --git a/deploy.py b/deploy.py index e1ec38f..42f3e44 100755 --- a/deploy.py +++ b/deploy.py @@ -907,6 +907,12 @@ if __name__ == "__main__": const="crypto_bench", help=("Compiles and installs the crypto_bench example that benchmarks " "the performance of the cryptographic algorithms on the board.")) + apps_group.add_argument( + "--store_latency", + dest="application", + action="store_const", + const="store_latency", + help=("Compiles and installs the store_latency example.")) apps_group.add_argument( "--panic_test", dest="application", diff --git a/examples/store_latency.rs b/examples/store_latency.rs new file mode 100644 index 0000000..f85d14d --- /dev/null +++ b/examples/store_latency.rs @@ -0,0 +1,126 @@ +// Copyright 2019-2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] + +extern crate alloc; +extern crate lang_items; + +use alloc::vec; +use core::fmt::Write; +use ctap2::embedded_flash::SyscallStorage; +use libtock_drivers::console::Console; +use libtock_drivers::timer::{self, Duration, Timer, Timestamp}; +use persistent_store::{Storage, Store}; + +fn timestamp(timer: &Timer) -> Timestamp { + Timestamp::::from_clock_value(timer.get_current_clock().ok().unwrap()) +} + +fn measure(timer: &Timer, operation: impl FnOnce() -> T) -> (T, Duration) { + let before = timestamp(timer); + let result = operation(); + let after = timestamp(timer); + (result, after - before) +} + +// Only use one store at a time. +unsafe fn boot_store(num_pages: usize, erase: bool) -> Store { + let mut storage = SyscallStorage::new(num_pages).unwrap(); + if erase { + for page in 0..storage.num_pages() { + storage.erase_page(page).unwrap(); + } + } + Store::new(storage).ok().unwrap() +} + +fn compute_latency(timer: &Timer, num_pages: usize, key_increment: usize, word_length: usize) { + let mut console = Console::new(); + writeln!( + console, + "\nLatency for num_pages={} key_increment={} word_length={}.", + num_pages, key_increment, word_length + ) + .unwrap(); + + let mut store = unsafe { boot_store(num_pages, true) }; + let total_capacity = store.capacity().unwrap().total(); + + // Burn N words to align the end of the user capacity with the virtual capacity. + store.insert(0, &vec![0; 4 * (num_pages - 1)]).unwrap(); + store.remove(0).unwrap(); + + // Insert entries until there is space for one more. + let count = total_capacity / (1 + word_length) - 1; + let ((), time) = measure(timer, || { + for i in 0..count { + let key = 1 + key_increment * i; + // For some reason the kernel sometimes fails. + while store.insert(key, &vec![0; 4 * word_length]).is_err() { + // We never enter this loop in practice, but we still need it for the kernel. + writeln!(console, "Retry insert.").unwrap(); + } + } + }); + writeln!(console, "Setup: {:.1}ms for {} entries.", time.ms(), count).unwrap(); + + // Measure latency of insert. + let key = 1 + key_increment * count; + let ((), time) = measure(&timer, || { + store.insert(key, &vec![0; 4 * word_length]).unwrap() + }); + writeln!(console, "Insert: {:.1}ms.", time.ms()).unwrap(); + + // Measure latency of boot. + let (mut store, time) = measure(&timer, || unsafe { boot_store(num_pages, false) }); + writeln!(console, "Boot: {:.1}ms.", time.ms()).unwrap(); + + // Measure latency of remove. + let ((), time) = measure(&timer, || store.remove(key).unwrap()); + writeln!(console, "Remove: {:.1}ms.", time.ms()).unwrap(); + + // Measure latency of compaction. + let length = total_capacity + num_pages - store.lifetime().unwrap().used(); + if length > 0 { + // Fill the store such that compaction is needed for one word. + store.insert(0, &vec![0; 4 * (length - 1)]).unwrap(); + store.remove(0).unwrap(); + } + let ((), time) = measure(timer, || store.prepare(1).unwrap()); + writeln!(console, "Compaction: {:.1}ms.", time.ms()).unwrap(); +} + +fn main() { + let mut with_callback = timer::with_callback(|_, _| {}); + let timer = with_callback.init().ok().unwrap(); + + writeln!(Console::new(), "\nRunning 4 tests...").unwrap(); + // Those non-overwritten 50 words entries simulate credentials. + compute_latency(&timer, 3, 1, 50); + compute_latency(&timer, 20, 1, 50); + // Those overwritten 1 word entries simulate counters. + compute_latency(&timer, 3, 0, 1); + compute_latency(&timer, 6, 0, 1); + writeln!(Console::new(), "\nDone.").unwrap(); + + // Results on nrf52840dk: + // + // | Pages | Overwrite | Length | Boot | Compaction | Insert | Remove | + // | ----- | --------- | --------- | ------- | ---------- | ------ | ------- | + // | 3 | no | 50 words | 2.0 ms | 132.5 ms | 4.8 ms | 1.2 ms | + // | 20 | no | 50 words | 7.4 ms | 135.5 ms | 10.2 ms | 3.9 ms | + // | 3 | yes | 1 word | 21.9 ms | 94.5 ms | 12.4 ms | 5.9 ms | + // | 6 | yes | 1 word | 55.2 ms | 100.8 ms | 24.8 ms | 12.1 ms | +} From 19ebacec15d808d4fd05e30b0ba93e214ffd526b Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Thu, 10 Dec 2020 13:36:33 +0100 Subject: [PATCH 16/20] Do not use delay_map anymore This permits to avoid copies. Before we used to do one copy per storage operation. Now we do one copy per store operation. --- libraries/persistent_store/fuzz/src/store.rs | 162 +++---------------- libraries/persistent_store/src/driver.rs | 81 ++-------- 2 files changed, 38 insertions(+), 205 deletions(-) diff --git a/libraries/persistent_store/fuzz/src/store.rs b/libraries/persistent_store/fuzz/src/store.rs index c18eb51..3113f77 100644 --- a/libraries/persistent_store/fuzz/src/store.rs +++ b/libraries/persistent_store/fuzz/src/store.rs @@ -26,6 +26,9 @@ use std::convert::TryInto; // NOTE: We should be able to improve coverage by only checking the last operation. Because // operations before the last could be checked with a shorter entropy. +// NOTE: Maybe we should split the fuzz target in smaller parts (like one per init). We should also +// name the fuzz targets with action names. + /// Checks the store against a sequence of manipulations. /// /// The entropy to generate the sequence of manipulation should be provided in `data`. Debugging @@ -181,7 +184,7 @@ impl<'a> Fuzzer<'a> { println!("Power on the store."); } self.increment(StatKey::PowerOnCount); - let interruption = self.interruption(driver.delay_map()); + let interruption = self.interruption(driver.count_operations()); match driver.partial_power_on(interruption) { Err((storage, _)) if self.init.is_dirty() => { self.entropy.consume_all(); @@ -198,7 +201,7 @@ impl<'a> Fuzzer<'a> { if self.debug { println!("{:?}", operation); } - let interruption = self.interruption(driver.delay_map(&operation)); + let interruption = self.interruption(driver.count_operations(&operation)); match driver.partial_apply(operation, interruption) { Err((store, _)) if self.init.is_dirty() => { self.entropy.consume_all(); @@ -334,59 +337,48 @@ impl<'a> Fuzzer<'a> { /// Generates an interruption. /// - /// The `delay_map` describes the number of modified bits by the upcoming sequence of store - /// operations. - // TODO(ia0): We use too much CPU to compute the delay map. We should be able to just count the - // number of storage operations by checking the remaining delay. We can then use the entropy - // directly from the corruption function because it's called at most once. - fn interruption( - &mut self, - delay_map: Result, (usize, BufferStorage)>, - ) -> StoreInterruption { + /// The `max_delay` describes the number of storage operations. + fn interruption(&mut self, max_delay: Option) -> StoreInterruption { if self.init.is_dirty() { // We only test that the store can power on without crashing. If it would get // interrupted then it's like powering up with a different initial state, which would be // tested with another fuzzing input. return StoreInterruption::none(); } - let delay_map = match delay_map { - Ok(x) => x, - Err((delay, storage)) => { - print!("{}", storage); - panic!("delay={}", delay); - } + let max_delay = match max_delay { + Some(x) => x, + None => return StoreInterruption::none(), }; - let delay = self.entropy.read_range(0, delay_map.len() - 1); - let mut complete_bits = BitStack::default(); - for _ in 0..delay_map[delay] { - complete_bits.push(self.entropy.read_bit()); - } + let delay = self.entropy.read_range(0, max_delay); if self.debug { - if delay == delay_map.len() - 1 { - assert!(complete_bits.is_empty()); + if delay == max_delay { println!("Do not interrupt."); } else { - println!( - "Interrupt after {} operations with complete mask {}.", - delay, complete_bits - ); + println!("Interrupt after {} operations.", delay); } } - if delay < delay_map.len() - 1 { + if delay < max_delay { self.increment(StatKey::InterruptionCount); } let corrupt = Box::new(move |old: &mut [u8], new: &[u8]| { + let mut count = 0; + let mut total = 0; for (old, new) in old.iter_mut().zip(new.iter()) { for bit in 0..8 { let mask = 1 << bit; if *old & mask == *new & mask { continue; } - if complete_bits.pop().unwrap() { + total += 1; + if self.entropy.read_bit() { + count += 1; *old ^= mask; } } } + if self.debug { + println!("Flip {} bits out of {}.", count, total); + } }); StoreInterruption { delay, corrupt } } @@ -432,113 +424,3 @@ impl Init { } } } - -/// Compact stack of bits. -// NOTE: This would probably go away once the delay map is simplified. -#[derive(Default, Clone, Debug)] -struct BitStack { - /// Bits stored in little-endian (for bytes and bits). - /// - /// The last byte only contains `len` bits. - data: Vec, - - /// Number of bits stored in the last byte. - /// - /// It is 0 if the last byte is full, not 8. - len: usize, -} - -impl BitStack { - /// Returns whether the stack is empty. - fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the length of the stack. - fn len(&self) -> usize { - if self.len == 0 { - 8 * self.data.len() - } else { - 8 * (self.data.len() - 1) + self.len - } - } - - /// Pushes a bit to the stack. - fn push(&mut self, value: bool) { - if self.len == 0 { - self.data.push(0); - } - if value { - *self.data.last_mut().unwrap() |= 1 << self.len; - } - self.len += 1; - if self.len == 8 { - self.len = 0; - } - } - - /// Pops a bit from the stack. - fn pop(&mut self) -> Option { - if self.len == 0 { - if self.data.is_empty() { - return None; - } - self.len = 8; - } - self.len -= 1; - let result = self.data.last().unwrap() & 1 << self.len; - if self.len == 0 { - self.data.pop().unwrap(); - } - Some(result != 0) - } -} - -impl std::fmt::Display for BitStack { - fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { - let mut bits = self.clone(); - while let Some(bit) = bits.pop() { - write!(f, "{}", bit as usize)?; - } - write!(f, " ({} bits)", self.len())?; - Ok(()) - } -} - -#[test] -fn bit_stack_ok() { - let mut bits = BitStack::default(); - - assert_eq!(bits.pop(), None); - - bits.push(true); - assert_eq!(bits.pop(), Some(true)); - assert_eq!(bits.pop(), None); - - bits.push(false); - assert_eq!(bits.pop(), Some(false)); - assert_eq!(bits.pop(), None); - - bits.push(true); - bits.push(false); - assert_eq!(bits.pop(), Some(false)); - assert_eq!(bits.pop(), Some(true)); - assert_eq!(bits.pop(), None); - - bits.push(false); - bits.push(true); - assert_eq!(bits.pop(), Some(true)); - assert_eq!(bits.pop(), Some(false)); - assert_eq!(bits.pop(), None); - - let n = 27; - for i in 0..n { - assert_eq!(bits.len(), i); - bits.push(true); - } - for i in (0..n).rev() { - assert_eq!(bits.pop(), Some(true)); - assert_eq!(bits.len(), i); - } - assert_eq!(bits.pop(), None); -} diff --git a/libraries/persistent_store/src/driver.rs b/libraries/persistent_store/src/driver.rs index e529274..f17f576 100644 --- a/libraries/persistent_store/src/driver.rs +++ b/libraries/persistent_store/src/driver.rs @@ -311,31 +311,15 @@ impl StoreDriverOff { }) } - /// Returns a mapping from delay time to number of modified bits. + /// Returns the number of storage operations to power on. /// - /// For example if the `i`-th value is `n`, it means that the `i`-th operation modifies `n` bits - /// in the storage. For convenience, the vector always ends with `0` for one past the last - /// operation. This permits to choose a random index in the vector and then a random set of bit - /// positions among the number of modified bits to simulate any possible corruption (including - /// no corruption with the last index). - pub fn delay_map(&self) -> Result, (usize, BufferStorage)> { - let mut result = Vec::new(); - loop { - let delay = result.len(); - let mut storage = self.storage.clone(); - storage.arm_interruption(delay); - match Store::new(storage) { - Err((StoreError::StorageError, x)) => storage = x, - Err((StoreError::InvalidStorage, mut storage)) => { - storage.reset_interruption(); - return Err((delay, storage)); - } - Ok(_) | Err(_) => break, - } - result.push(count_modified_bits(&mut storage)); - } - result.push(0); - Ok(result) + /// Returns `None` if the store cannot power on successfully. + pub fn count_operations(&self) -> Option { + let initial_delay = usize::MAX; + let mut storage = self.storage.clone(); + storage.arm_interruption(initial_delay); + let mut store = Store::new(storage).ok()?; + Some(initial_delay - store.storage_mut().disarm_interruption()) } } @@ -422,29 +406,15 @@ impl StoreDriverOn { }) } - /// Returns a mapping from delay time to number of modified bits. + /// Returns the number of storage operations to apply a store operation. /// - /// See the documentation of [`StoreDriverOff::delay_map`] for details. - /// - /// [`StoreDriverOff::delay_map`]: struct.StoreDriverOff.html#method.delay_map - pub fn delay_map( - &self, - operation: &StoreOperation, - ) -> Result, (usize, BufferStorage)> { - let mut result = Vec::new(); - loop { - let delay = result.len(); - let mut store = self.store.clone(); - store.storage_mut().arm_interruption(delay); - match store.apply(operation).1 { - Err(StoreError::StorageError) => (), - Err(StoreError::InvalidStorage) => return Err((delay, store.extract_storage())), - Ok(()) | Err(_) => break, - } - result.push(count_modified_bits(store.storage_mut())); - } - result.push(0); - Ok(result) + /// Returns `None` if the store cannot apply the operation successfully. + pub fn count_operations(&self, operation: &StoreOperation) -> Option { + let initial_delay = usize::MAX; + let mut store = self.store.clone(); + store.storage_mut().arm_interruption(initial_delay); + store.apply(operation).1.ok()?; + Some(initial_delay - store.storage_mut().disarm_interruption()) } /// Powers off the store. @@ -629,22 +599,3 @@ impl<'a> StoreInterruption<'a> { } } } - -/// Counts the number of bits modified by an interrupted operation. -/// -/// # Panics -/// -/// Panics if an interruption did not trigger. -fn count_modified_bits(storage: &mut BufferStorage) -> usize { - let mut modified_bits = 0; - storage.corrupt_operation(Box::new(|before, after| { - modified_bits = before - .iter() - .zip(after.iter()) - .map(|(x, y)| (x ^ y).count_ones() as usize) - .sum(); - })); - // We should never write the same slice or erase an erased page. - assert!(modified_bits > 0); - modified_bits -} From d4b20a5acc65d10b5b09b67b9a26b9cf8b6cd552 Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Thu, 10 Dec 2020 16:14:17 +0100 Subject: [PATCH 17/20] Fix linked_list_allocator version to fix build They released version 0.8.7 today which breaks our assumption that Heap::empty is callable in const context. --- third_party/lang-items/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/lang-items/Cargo.toml b/third_party/lang-items/Cargo.toml index 39ffbf0..d4b3e33 100644 --- a/third_party/lang-items/Cargo.toml +++ b/third_party/lang-items/Cargo.toml @@ -11,7 +11,7 @@ edition = "2018" [dependencies] libtock_core = { path = "../../third_party/libtock-rs/core", default-features = false, features = ["alloc_init", "custom_panic_handler", "custom_alloc_error_handler"] } libtock_drivers = { path = "../libtock-drivers" } -linked_list_allocator = { version = "0.8.1", default-features = false } +linked_list_allocator = { version = "=0.8.1", default-features = false } [features] debug_allocations = [] From 7a641d639199c108007149eb01cedd26e4e0e655 Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Thu, 10 Dec 2020 16:20:26 +0100 Subject: [PATCH 18/20] Use the new const_mut_refs default feature of linked_list_allocator This is necessary for Heap::empty() to be const. --- third_party/lang-items/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/lang-items/Cargo.toml b/third_party/lang-items/Cargo.toml index d4b3e33..5d109f3 100644 --- a/third_party/lang-items/Cargo.toml +++ b/third_party/lang-items/Cargo.toml @@ -11,7 +11,7 @@ edition = "2018" [dependencies] libtock_core = { path = "../../third_party/libtock-rs/core", default-features = false, features = ["alloc_init", "custom_panic_handler", "custom_alloc_error_handler"] } libtock_drivers = { path = "../libtock-drivers" } -linked_list_allocator = { version = "=0.8.1", default-features = false } +linked_list_allocator = { version = "0.8.7", default-features = false, features = ["const_mut_refs"] } [features] debug_allocations = [] From 869e93234952fd09c5e85acd61430c5d079cb1fa Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Thu, 10 Dec 2020 16:44:00 +0100 Subject: [PATCH 19/20] Add asserts to make sure we compact --- examples/store_latency.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/examples/store_latency.rs b/examples/store_latency.rs index f85d14d..e664964 100644 --- a/examples/store_latency.rs +++ b/examples/store_latency.rs @@ -57,10 +57,14 @@ fn compute_latency(timer: &Timer, num_pages: usize, key_increment: usize, word_l let mut store = unsafe { boot_store(num_pages, true) }; let total_capacity = store.capacity().unwrap().total(); + assert_eq!(store.capacity().unwrap().used(), 0); + assert_eq!(store.lifetime().unwrap().used(), 0); // Burn N words to align the end of the user capacity with the virtual capacity. store.insert(0, &vec![0; 4 * (num_pages - 1)]).unwrap(); store.remove(0).unwrap(); + assert_eq!(store.capacity().unwrap().used(), 0); + assert_eq!(store.lifetime().unwrap().used(), num_pages); // Insert entries until there is space for one more. let count = total_capacity / (1 + word_length) - 1; @@ -82,6 +86,10 @@ fn compute_latency(timer: &Timer, num_pages: usize, key_increment: usize, word_l store.insert(key, &vec![0; 4 * word_length]).unwrap() }); writeln!(console, "Insert: {:.1}ms.", time.ms()).unwrap(); + assert_eq!( + store.lifetime().unwrap().used(), + num_pages + (1 + count) * (1 + word_length) + ); // Measure latency of boot. let (mut store, time) = measure(&timer, || unsafe { boot_store(num_pages, false) }); @@ -98,8 +106,11 @@ fn compute_latency(timer: &Timer, num_pages: usize, key_increment: usize, word_l store.insert(0, &vec![0; 4 * (length - 1)]).unwrap(); store.remove(0).unwrap(); } + assert!(store.capacity().unwrap().remaining() > 0); + assert_eq!(store.lifetime().unwrap().used(), num_pages + total_capacity); let ((), time) = measure(timer, || store.prepare(1).unwrap()); writeln!(console, "Compaction: {:.1}ms.", time.ms()).unwrap(); + assert!(store.lifetime().unwrap().used() > total_capacity + num_pages); } fn main() { From 371b8af22425689dc5af6ea4a8745a4cc86a1d8c Mon Sep 17 00:00:00 2001 From: Julien Cretin Date: Thu, 10 Dec 2020 18:04:25 +0100 Subject: [PATCH 20/20] Move choice between prod and test storage to embedded_flash module This way all users of storage can share the logic to choose between flash or RAM storage depending on the "std" feature. This is needed because the store_latency example assumes flash storage but is built when running `cargo test --features=std`. --- examples/store_latency.rs | 11 ++++++----- src/ctap/storage.rs | 32 ++------------------------------ src/embedded_flash/mod.rs | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 35 deletions(-) diff --git a/examples/store_latency.rs b/examples/store_latency.rs index e664964..c3f3e71 100644 --- a/examples/store_latency.rs +++ b/examples/store_latency.rs @@ -19,10 +19,10 @@ extern crate lang_items; use alloc::vec; use core::fmt::Write; -use ctap2::embedded_flash::SyscallStorage; +use ctap2::embedded_flash::{new_storage, Storage}; use libtock_drivers::console::Console; use libtock_drivers::timer::{self, Duration, Timer, Timestamp}; -use persistent_store::{Storage, Store}; +use persistent_store::Store; fn timestamp(timer: &Timer) -> Timestamp { Timestamp::::from_clock_value(timer.get_current_clock().ok().unwrap()) @@ -36,10 +36,11 @@ fn measure(timer: &Timer, operation: impl FnOnce() -> T) -> (T, Duration } // Only use one store at a time. -unsafe fn boot_store(num_pages: usize, erase: bool) -> Store { - let mut storage = SyscallStorage::new(num_pages).unwrap(); +unsafe fn boot_store(num_pages: usize, erase: bool) -> Store { + let mut storage = new_storage(num_pages); if erase { - for page in 0..storage.num_pages() { + for page in 0..num_pages { + use persistent_store::Storage; storage.erase_page(page).unwrap(); } } diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 5e42ec7..5f17052 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -21,6 +21,7 @@ use crate::ctap::key_material; use crate::ctap::pin_protocol_v1::PIN_AUTH_LENGTH; use crate::ctap::status_code::Ctap2StatusCode; use crate::ctap::INITIAL_SIGNATURE_COUNTER; +use crate::embedded_flash::{new_storage, Storage}; #[cfg(feature = "with_ctap2_1")] use alloc::string::String; use alloc::vec; @@ -31,11 +32,6 @@ use cbor::cbor_array_vec; use core::convert::TryInto; use crypto::rng256::Rng256; -#[cfg(feature = "std")] -type Storage = persistent_store::BufferStorage; -#[cfg(not(feature = "std"))] -type Storage = crate::embedded_flash::SyscallStorage; - // Those constants may be modified before compilation to tune the behavior of the key. // // The number of pages should be at least 3 and at most what the flash can hold. There should be no @@ -89,10 +85,7 @@ impl PersistentStore { /// /// This should be at most one instance of persistent store per program lifetime. pub fn new(rng: &mut impl Rng256) -> PersistentStore { - #[cfg(not(feature = "std"))] - let storage = PersistentStore::new_prod_storage(); - #[cfg(feature = "std")] - let storage = PersistentStore::new_test_storage(); + let storage = new_storage(NUM_PAGES); let mut store = PersistentStore { store: persistent_store::Store::new(storage).ok().unwrap(), }; @@ -100,27 +93,6 @@ impl PersistentStore { store } - /// Creates a syscall storage in flash. - #[cfg(not(feature = "std"))] - fn new_prod_storage() -> Storage { - Storage::new(NUM_PAGES).unwrap() - } - - /// Creates a buffer storage in RAM. - #[cfg(feature = "std")] - fn new_test_storage() -> Storage { - const PAGE_SIZE: usize = 0x1000; - let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); - let options = persistent_store::BufferOptions { - word_size: 4, - page_size: PAGE_SIZE, - max_word_writes: 2, - max_page_erases: 10000, - strict_mode: true, - }; - Storage::new(store, options) - } - /// Initializes the store by creating missing objects. fn init(&mut self, rng: &mut impl Rng256) -> Result<(), Ctap2StatusCode> { // Generate and store the master keys if they are missing. diff --git a/src/embedded_flash/mod.rs b/src/embedded_flash/mod.rs index 862b37e..e307a55 100644 --- a/src/embedded_flash/mod.rs +++ b/src/embedded_flash/mod.rs @@ -17,3 +17,36 @@ mod syscall; #[cfg(not(feature = "std"))] pub use self::syscall::SyscallStorage; + +/// Storage definition for production. +#[cfg(not(feature = "std"))] +mod prod { + pub type Storage = super::SyscallStorage; + + pub fn new_storage(num_pages: usize) -> Storage { + Storage::new(num_pages).unwrap() + } +} +#[cfg(not(feature = "std"))] +pub use self::prod::{new_storage, Storage}; + +/// Storage definition for testing. +#[cfg(feature = "std")] +mod test { + pub type Storage = persistent_store::BufferStorage; + + pub fn new_storage(num_pages: usize) -> Storage { + const PAGE_SIZE: usize = 0x1000; + let store = vec![0xff; num_pages * PAGE_SIZE].into_boxed_slice(); + let options = persistent_store::BufferOptions { + word_size: 4, + page_size: PAGE_SIZE, + max_word_writes: 2, + max_page_erases: 10000, + strict_mode: true, + }; + Storage::new(store, options) + } +} +#[cfg(feature = "std")] +pub use self::test::{new_storage, Storage};