diff --git a/Cargo.toml b/Cargo.toml index 8faf8dd..5b01795 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ libtock_drivers = { path = "third_party/libtock-drivers" } lang_items = { path = "third_party/lang-items" } cbor = { path = "libraries/cbor" } crypto = { path = "libraries/crypto" } +persistent_store = { path = "libraries/persistent_store" } byteorder = { version = "1", default-features = false } arrayref = "0.3.6" subtle = { version = "2.2", default-features = false, features = ["nightly"] } @@ -23,7 +24,7 @@ subtle = { version = "2.2", default-features = false, features = ["nightly"] } debug_allocations = ["lang_items/debug_allocations"] debug_ctap = ["crypto/derive_debug", "libtock_drivers/debug_ctap"] panic_console = ["lang_items/panic_console"] -std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std"] +std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std", "persistent_store/std"] ram_storage = [] verbose = ["debug_ctap", "libtock_drivers/verbose_usb"] with_ctap1 = ["crypto/with_ctap1"] diff --git a/src/ctap/ctap1.rs b/src/ctap/ctap1.rs index 47d2120..22b4b90 100644 --- a/src/ctap/ctap1.rs +++ b/src/ctap/ctap1.rs @@ -325,7 +325,7 @@ impl Ctap1Command { signature_data.extend(key_handle); signature_data.extend_from_slice(&user_pk); - let attestation_key = crypto::ecdsa::SecKey::from_bytes(private_key).unwrap(); + let attestation_key = crypto::ecdsa::SecKey::from_bytes(&private_key).unwrap(); let signature = attestation_key.sign_rfc6979::(&signature_data); response.extend(signature.to_asn1_der()); diff --git a/src/ctap/mod.rs b/src/ctap/mod.rs index a00fecf..4c5687a 100644 --- a/src/ctap/mod.rs +++ b/src/ctap/mod.rs @@ -566,7 +566,7 @@ where .attestation_private_key()? .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?; let attestation_key = - crypto::ecdsa::SecKey::from_bytes(attestation_private_key).unwrap(); + crypto::ecdsa::SecKey::from_bytes(&attestation_private_key).unwrap(); let attestation_certificate = self .persistent_store .attestation_certificate()? diff --git a/src/ctap/status_code.rs b/src/ctap/status_code.rs index adb84fd..097d7ec 100644 --- a/src/ctap/status_code.rs +++ b/src/ctap/status_code.rs @@ -81,5 +81,10 @@ pub enum Ctap2StatusCode { /// This type of error is unexpected and the current state is undefined. CTAP2_ERR_VENDOR_INTERNAL_ERROR = 0xF2, + /// The hardware is malfunctioning. + /// + /// It may be possible that some of those errors are actually internal errors. + CTAP2_ERR_VENDOR_HARDWARE_FAILURE = 0xF3, + CTAP2_ERR_VENDOR_LAST = 0xFF, } diff --git a/src/ctap/storage.rs b/src/ctap/storage.rs index 7be33f1..87c4c6c 100644 --- a/src/ctap/storage.rs +++ b/src/ctap/storage.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod key; + #[cfg(feature = "with_ctap2_1")] use crate::ctap::data_formats::{extract_array, extract_text_string}; use crate::ctap::data_formats::{CredentialProtectionPolicy, PublicKeyCredentialSource}; @@ -19,9 +21,8 @@ use crate::ctap::key_material; use crate::ctap::pin_protocol_v1::PIN_AUTH_LENGTH; use crate::ctap::status_code::Ctap2StatusCode; use crate::ctap::INITIAL_SIGNATURE_COUNTER; -use crate::embedded_flash::{self, StoreConfig, StoreEntry, StoreError}; +#[cfg(feature = "with_ctap2_1")] use alloc::string::String; -#[cfg(any(test, feature = "ram_storage", feature = "with_ctap2_1"))] use alloc::vec; use alloc::vec::Vec; use arrayref::array_ref; @@ -31,16 +32,16 @@ use core::convert::TryInto; use crypto::rng256::Rng256; #[cfg(any(test, feature = "ram_storage"))] -type Storage = embedded_flash::BufferStorage; +type Storage = persistent_store::BufferStorage; #[cfg(not(any(test, feature = "ram_storage")))] -type Storage = embedded_flash::SyscallStorage; +type Storage = crate::embedded_flash::SyscallStorage; // Those constants may be modified before compilation to tune the behavior of the key. // -// The number of pages should be at least 2 and at most what the flash can hold. There should be no -// reason to put a small number here, except that the latency of flash operations depends on the -// number of pages. This will improve in the future. Currently, using 20 pages gives 65ms per -// operation. The rule of thumb is 3.5ms per additional page. +// The number of pages should be at least 3 and at most what the flash can hold. There should be no +// reason to put a small number here, except that the latency of flash operations is linear in the +// number of pages. This may improve in the future. Currently, using 20 pages gives between 20ms and +// 240ms per operation. The rule of thumb is between 1ms and 12ms per additional page. // // Limiting the number of residential keys permits to ensure a minimum number of counter increments. // Let: @@ -50,33 +51,15 @@ type Storage = embedded_flash::SyscallStorage; // - C the number of erase cycles (10000) // - I the minimum number of counter increments // -// We have: I = ((P - 1) * 4092 - K * S) / 12 * C +// We have: I = (P * 4084 - 5107 - K * S) / 8 * C // -// With P=20 and K=150, we have I > 2M which is enough for 500 increments per day for 10 years. +// With P=20 and K=150, we have I=2M which is enough for 500 increments per day for 10 years. #[cfg(feature = "ram_storage")] -const NUM_PAGES: usize = 2; +const NUM_PAGES: usize = 3; #[cfg(not(feature = "ram_storage"))] const NUM_PAGES: usize = 20; const MAX_SUPPORTED_RESIDENTIAL_KEYS: usize = 150; -// List of tags. They should all be unique. And there should be less than NUM_TAGS. -const TAG_CREDENTIAL: usize = 0; -const GLOBAL_SIGNATURE_COUNTER: usize = 1; -const MASTER_KEYS: usize = 2; -const PIN_HASH: usize = 3; -const PIN_RETRIES: usize = 4; -const ATTESTATION_PRIVATE_KEY: usize = 5; -const ATTESTATION_CERTIFICATE: usize = 6; -const AAGUID: usize = 7; -#[cfg(feature = "with_ctap2_1")] -const MIN_PIN_LENGTH: usize = 8; -#[cfg(feature = "with_ctap2_1")] -const MIN_PIN_LENGTH_RP_IDS: usize = 9; -const CRED_RANDOM_SECRET: usize = 10; -// Different NUM_TAGS depending on the CTAP version make the storage incompatible, -// so we use the maximum. -const NUM_TAGS: usize = 11; - const MAX_PIN_RETRIES: u8 = 8; #[cfg(feature = "with_ctap2_1")] const DEFAULT_MIN_PIN_LENGTH: u8 = 4; @@ -88,94 +71,18 @@ const _DEFAULT_MIN_PIN_LENGTH_RP_IDS: Vec = Vec::new(); #[cfg(feature = "with_ctap2_1")] const _MAX_RP_IDS_LENGTH: usize = 8; -#[allow(clippy::enum_variant_names)] -#[derive(PartialEq, Eq, PartialOrd, Ord)] -enum Key { - // TODO(cretin): Test whether this doesn't consume too much memory. Otherwise, we can use less - // keys. Either only a simple enum value for all credentials, or group by rp_id. - Credential { - rp_id: Option, - credential_id: Option>, - user_handle: Option>, - }, - GlobalSignatureCounter, - MasterKeys, - PinHash, - PinRetries, - AttestationPrivateKey, - AttestationCertificate, - Aaguid, - #[cfg(feature = "with_ctap2_1")] - MinPinLength, - #[cfg(feature = "with_ctap2_1")] - MinPinLengthRpIds, - CredRandomSecret, -} - +/// Wrapper for master keys. pub struct MasterKeys { + /// Master encryption key. pub encryption: [u8; 32], + + /// Master hmac key. pub hmac: [u8; 32], } -struct Config; - -impl StoreConfig for Config { - type Key = Key; - - fn num_tags(&self) -> usize { - NUM_TAGS - } - - fn keys(&self, entry: StoreEntry, mut add: impl FnMut(Key)) { - match entry.tag { - TAG_CREDENTIAL => { - let credential = match deserialize_credential(entry.data) { - None => { - debug_assert!(false); - return; - } - Some(credential) => credential, - }; - add(Key::Credential { - rp_id: Some(credential.rp_id.clone()), - credential_id: Some(credential.credential_id), - user_handle: None, - }); - add(Key::Credential { - rp_id: Some(credential.rp_id.clone()), - credential_id: None, - user_handle: None, - }); - add(Key::Credential { - rp_id: Some(credential.rp_id), - credential_id: None, - user_handle: Some(credential.user_handle), - }); - add(Key::Credential { - rp_id: None, - credential_id: None, - user_handle: None, - }); - } - GLOBAL_SIGNATURE_COUNTER => add(Key::GlobalSignatureCounter), - MASTER_KEYS => add(Key::MasterKeys), - PIN_HASH => add(Key::PinHash), - PIN_RETRIES => add(Key::PinRetries), - ATTESTATION_PRIVATE_KEY => add(Key::AttestationPrivateKey), - ATTESTATION_CERTIFICATE => add(Key::AttestationCertificate), - AAGUID => add(Key::Aaguid), - #[cfg(feature = "with_ctap2_1")] - MIN_PIN_LENGTH => add(Key::MinPinLength), - #[cfg(feature = "with_ctap2_1")] - MIN_PIN_LENGTH_RP_IDS => add(Key::MinPinLengthRpIds), - CRED_RANDOM_SECRET => add(Key::CredRandomSecret), - _ => debug_assert!(false), - } - } -} - +/// CTAP persistent storage. pub struct PersistentStore { - store: embedded_flash::Store, + store: persistent_store::Store, } impl PersistentStore { @@ -190,17 +97,19 @@ impl PersistentStore { #[cfg(any(test, feature = "ram_storage"))] let storage = PersistentStore::new_test_storage(); let mut store = PersistentStore { - store: embedded_flash::Store::new(storage, Config).unwrap(), + store: persistent_store::Store::new(storage).ok().unwrap(), }; - store.init(rng); + store.init(rng).unwrap(); store } + /// Creates a syscall storage in flash. #[cfg(not(any(test, feature = "ram_storage")))] fn new_prod_storage() -> Storage { Storage::new(NUM_PAGES).unwrap() } + /// Creates a buffer storage in RAM. #[cfg(any(test, feature = "ram_storage"))] fn new_test_storage() -> Storage { #[cfg(not(test))] @@ -208,345 +117,325 @@ impl PersistentStore { #[cfg(test)] const PAGE_SIZE: usize = 0x1000; let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); - let options = embedded_flash::BufferOptions { + let options = persistent_store::BufferOptions { word_size: 4, page_size: PAGE_SIZE, max_word_writes: 2, max_page_erases: 10000, - strict_write: true, + strict_mode: true, }; Storage::new(store, options) } - fn init(&mut self, rng: &mut impl Rng256) { - if self.store.find_one(&Key::MasterKeys).is_none() { + /// Initializes the store by creating missing objects. + fn init(&mut self, rng: &mut impl Rng256) -> Result<(), Ctap2StatusCode> { + // Generate and store the master keys if they are missing. + if self.store.find_handle(key::MASTER_KEYS)?.is_none() { let master_encryption_key = rng.gen_uniform_u8x32(); let master_hmac_key = rng.gen_uniform_u8x32(); let mut master_keys = Vec::with_capacity(64); master_keys.extend_from_slice(&master_encryption_key); master_keys.extend_from_slice(&master_hmac_key); - self.store - .insert(StoreEntry { - tag: MASTER_KEYS, - data: &master_keys, - sensitive: true, - }) - .unwrap(); + self.store.insert(key::MASTER_KEYS, &master_keys)?; } - if self.store.find_one(&Key::CredRandomSecret).is_none() { + // Generate and store the CredRandom secrets if they are missing. + if self.store.find_handle(key::CRED_RANDOM_SECRET)?.is_none() { let cred_random_with_uv = rng.gen_uniform_u8x32(); let cred_random_without_uv = rng.gen_uniform_u8x32(); let mut cred_random = Vec::with_capacity(64); cred_random.extend_from_slice(&cred_random_without_uv); cred_random.extend_from_slice(&cred_random_with_uv); - self.store - .insert(StoreEntry { - tag: CRED_RANDOM_SECRET, - data: &cred_random, - sensitive: true, - }) - .unwrap(); + self.store.insert(key::CRED_RANDOM_SECRET, &cred_random)?; } // TODO(jmichel): remove this when vendor command is in place #[cfg(not(test))] - self.load_attestation_data_from_firmware(); - - if self.store.find_one(&Key::Aaguid).is_none() { - self.set_aaguid(key_material::AAGUID).unwrap(); + self.load_attestation_data_from_firmware()?; + if self.store.find_handle(key::AAGUID)?.is_none() { + self.set_aaguid(key_material::AAGUID)?; } + Ok(()) } // TODO(jmichel): remove this function when vendor command is in place. #[cfg(not(test))] - fn load_attestation_data_from_firmware(&mut self) { + fn load_attestation_data_from_firmware(&mut self) -> Result<(), Ctap2StatusCode> { // The following 2 entries are meant to be written by vendor-specific commands. - if self.store.find_one(&Key::AttestationPrivateKey).is_none() { - self.set_attestation_private_key(key_material::ATTESTATION_PRIVATE_KEY) - .unwrap(); + if self + .store + .find_handle(key::ATTESTATION_PRIVATE_KEY)? + .is_none() + { + self.set_attestation_private_key(key_material::ATTESTATION_PRIVATE_KEY)?; } - if self.store.find_one(&Key::AttestationCertificate).is_none() { - self.set_attestation_certificate(key_material::ATTESTATION_CERTIFICATE) - .unwrap(); + if self + .store + .find_handle(key::ATTESTATION_CERTIFICATE)? + .is_none() + { + self.set_attestation_certificate(key_material::ATTESTATION_CERTIFICATE)?; } + Ok(()) } + /// Returns the first matching credential. + /// + /// Returns `None` if no credentials are matched or if `check_cred_protect` is set and the first + /// matched credential requires user verification. pub fn find_credential( &self, rp_id: &str, credential_id: &[u8], check_cred_protect: bool, ) -> Result, Ctap2StatusCode> { - let key = Key::Credential { - rp_id: Some(rp_id.into()), - credential_id: Some(credential_id.into()), - user_handle: None, - }; - let entry = match self.store.find_one(&key) { - None => return Ok(None), - Some((_, entry)) => entry, - }; - debug_assert_eq!(entry.tag, TAG_CREDENTIAL); - let result = deserialize_credential(entry.data); - debug_assert!(result.is_some()); - let user_verification_required = result.as_ref().map_or(false, |cred| { - cred.cred_protect_policy == Some(CredentialProtectionPolicy::UserVerificationRequired) + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + // We don't check whether there is more than one matching credential to be able to exit + // early. + let result = iter.map(|(_, credential)| credential).find(|credential| { + credential.rp_id == rp_id && credential.credential_id == credential_id }); - if check_cred_protect && user_verification_required { - Ok(None) - } else { - Ok(result) + iter_result?; + if let Some(cred) = &result { + let user_verification_required = cred.cred_protect_policy + == Some(CredentialProtectionPolicy::UserVerificationRequired); + if check_cred_protect && user_verification_required { + return Ok(None); + } } + Ok(result) } + /// Stores or updates a credential. + /// + /// If a credential with the same RP id and user handle already exists, it is replaced. pub fn store_credential( &mut self, - credential: PublicKeyCredentialSource, + new_credential: PublicKeyCredentialSource, ) -> Result<(), Ctap2StatusCode> { - let key = Key::Credential { - rp_id: Some(credential.rp_id.clone()), - credential_id: None, - user_handle: Some(credential.user_handle.clone()), - }; - let old_entry = self.store.find_one(&key); - if old_entry.is_none() && self.count_credentials()? >= MAX_SUPPORTED_RESIDENTIAL_KEYS { + // Holds the key of the existing credential if this is an update. + let mut old_key = None; + let min_key = key::CREDENTIALS.start; + // Holds whether a key is used (indices are shifted by min_key). + let mut keys = vec![false; MAX_SUPPORTED_RESIDENTIAL_KEYS]; + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + for (key, credential) in iter { + if key < min_key + || key - min_key >= MAX_SUPPORTED_RESIDENTIAL_KEYS + || keys[key - min_key] + { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + } + keys[key - min_key] = true; + if credential.rp_id == new_credential.rp_id + && credential.user_handle == new_credential.user_handle + { + if old_key.is_some() { + return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + } + old_key = Some(key); + } + } + iter_result?; + if old_key.is_none() + && keys.iter().filter(|&&x| x).count() >= MAX_SUPPORTED_RESIDENTIAL_KEYS + { return Err(Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL); } - let credential = serialize_credential(credential)?; - let new_entry = StoreEntry { - tag: TAG_CREDENTIAL, - data: &credential, - sensitive: true, - }; - match old_entry { - None => self.store.insert(new_entry)?, - Some((index, old_entry)) => { - debug_assert_eq!(old_entry.tag, TAG_CREDENTIAL); - self.store.replace(index, new_entry)? - } + let key = match old_key { + // This is a new credential being added, we need to allocate a free key. We choose the + // first available key. + None => key::CREDENTIALS + .take(MAX_SUPPORTED_RESIDENTIAL_KEYS) + .find(|key| !keys[key - min_key]) + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?, + // This is an existing credential being updated, we reuse its key. + Some(x) => x, }; + let value = serialize_credential(new_credential)?; + self.store.insert(key, &value)?; Ok(()) } + /// Returns the list of matching credentials. + /// + /// Does not return credentials that are not discoverable if `check_cred_protect` is set. pub fn filter_credential( &self, rp_id: &str, check_cred_protect: bool, ) -> Result, Ctap2StatusCode> { - Ok(self - .store - .find_all(&Key::Credential { - rp_id: Some(rp_id.into()), - credential_id: None, - user_handle: None, - }) - .filter_map(|(_, entry)| { - debug_assert_eq!(entry.tag, TAG_CREDENTIAL); - let credential = deserialize_credential(entry.data); - debug_assert!(credential.is_some()); - credential + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + let result = iter + .filter_map(|(_, credential)| { + if credential.rp_id == rp_id { + Some(credential) + } else { + None + } }) .filter(|cred| !check_cred_protect || cred.is_discoverable()) - .collect()) + .collect(); + iter_result?; + Ok(result) } + /// Returns the number of credentials. + #[cfg(test)] pub fn count_credentials(&self) -> Result { - Ok(self - .store - .find_all(&Key::Credential { - rp_id: None, - credential_id: None, - user_handle: None, - }) - .count()) + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + let result = iter.count(); + iter_result?; + Ok(result) } + /// Iterates through the credentials. + /// + /// If an error is encountered during iteration, it is written to `result`. + fn iter_credentials<'a>( + &'a self, + result: &'a mut Result<(), Ctap2StatusCode>, + ) -> Result, Ctap2StatusCode> { + IterCredentials::new(&self.store, result) + } + + /// Returns the next creation order. pub fn new_creation_order(&self) -> Result { - Ok(self - .store - .find_all(&Key::Credential { - rp_id: None, - credential_id: None, - user_handle: None, - }) - .filter_map(|(_, entry)| { - debug_assert_eq!(entry.tag, TAG_CREDENTIAL); - let credential = deserialize_credential(entry.data); - debug_assert!(credential.is_some()); - credential - }) - .map(|c| c.creation_order) - .max() - .unwrap_or(0) - .wrapping_add(1)) + let mut iter_result = Ok(()); + let iter = self.iter_credentials(&mut iter_result)?; + let max = iter.map(|(_, credential)| credential.creation_order).max(); + iter_result?; + Ok(max.unwrap_or(0).wrapping_add(1)) } + /// Returns the global signature counter. pub fn global_signature_counter(&self) -> Result { - Ok(self - .store - .find_one(&Key::GlobalSignatureCounter) - .map_or(INITIAL_SIGNATURE_COUNTER, |(_, entry)| { - u32::from_ne_bytes(*array_ref!(entry.data, 0, 4)) - })) + match self.store.find(key::GLOBAL_SIGNATURE_COUNTER)? { + None => Ok(INITIAL_SIGNATURE_COUNTER), + Some(value) if value.len() == 4 => Ok(u32::from_ne_bytes(*array_ref!(&value, 0, 4))), + Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), + } } + /// Increments the global signature counter. pub fn incr_global_signature_counter(&mut self, increment: u32) -> Result<(), Ctap2StatusCode> { - let mut buffer = [0; core::mem::size_of::()]; - match self.store.find_one(&Key::GlobalSignatureCounter) { - None => { - buffer.copy_from_slice(&(increment + INITIAL_SIGNATURE_COUNTER).to_ne_bytes()); - self.store.insert(StoreEntry { - tag: GLOBAL_SIGNATURE_COUNTER, - data: &buffer, - sensitive: false, - })?; - } - Some((index, entry)) => { - let value = u32::from_ne_bytes(*array_ref!(entry.data, 0, 4)); - // In hopes that servers handle the wrapping gracefully. - buffer.copy_from_slice(&value.wrapping_add(increment).to_ne_bytes()); - self.store.replace( - index, - StoreEntry { - tag: GLOBAL_SIGNATURE_COUNTER, - data: &buffer, - sensitive: false, - }, - )?; - } - } + let old_value = self.global_signature_counter()?; + // In hopes that servers handle the wrapping gracefully. + let new_value = old_value.wrapping_add(increment); + self.store + .insert(key::GLOBAL_SIGNATURE_COUNTER, &new_value.to_ne_bytes())?; Ok(()) } + /// Returns the master keys. pub fn master_keys(&self) -> Result { - let (_, entry) = self.store.find_one(&Key::MasterKeys).unwrap(); - if entry.data.len() != 64 { + let master_keys = self + .store + .find(key::MASTER_KEYS)? + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?; + if master_keys.len() != 64 { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } Ok(MasterKeys { - encryption: *array_ref![entry.data, 0, 32], - hmac: *array_ref![entry.data, 32, 32], + encryption: *array_ref![master_keys, 0, 32], + hmac: *array_ref![master_keys, 32, 32], }) } + /// Returns the CredRandom secret. pub fn cred_random_secret(&self, has_uv: bool) -> Result<[u8; 32], Ctap2StatusCode> { - let (_, entry) = self.store.find_one(&Key::CredRandomSecret).unwrap(); - if entry.data.len() != 64 { + let cred_random_secret = self + .store + .find(key::CRED_RANDOM_SECRET)? + .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?; + if cred_random_secret.len() != 64 { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } let offset = if has_uv { 32 } else { 0 }; - Ok(*array_ref![entry.data, offset, 32]) + Ok(*array_ref![cred_random_secret, offset, 32]) } + /// Returns the PIN hash if defined. pub fn pin_hash(&self) -> Result, Ctap2StatusCode> { - let data = match self.store.find_one(&Key::PinHash) { + let pin_hash = match self.store.find(key::PIN_HASH)? { None => return Ok(None), - Some((_, entry)) => entry.data, + Some(pin_hash) => pin_hash, }; - if data.len() != PIN_AUTH_LENGTH { + if pin_hash.len() != PIN_AUTH_LENGTH { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - Ok(Some(*array_ref![data, 0, PIN_AUTH_LENGTH])) + Ok(Some(*array_ref![pin_hash, 0, PIN_AUTH_LENGTH])) } + /// Sets the PIN hash. + /// + /// If it was already defined, it is updated. pub fn set_pin_hash( &mut self, pin_hash: &[u8; PIN_AUTH_LENGTH], ) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: PIN_HASH, - data: pin_hash, - sensitive: true, - }; - match self.store.find_one(&Key::PinHash) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - } - Ok(()) + Ok(self.store.insert(key::PIN_HASH, pin_hash)?) } + /// Returns the number of remaining PIN retries. pub fn pin_retries(&self) -> Result { - Ok(self - .store - .find_one(&Key::PinRetries) - .map_or(MAX_PIN_RETRIES, |(_, entry)| { - debug_assert_eq!(entry.data.len(), 1); - entry.data[0] - })) + match self.store.find(key::PIN_RETRIES)? { + None => Ok(MAX_PIN_RETRIES), + Some(value) if value.len() == 1 => Ok(value[0]), + _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), + } } + /// Decrements the number of remaining PIN retries. pub fn decr_pin_retries(&mut self) -> Result<(), Ctap2StatusCode> { - match self.store.find_one(&Key::PinRetries) { - None => { - self.store.insert(StoreEntry { - tag: PIN_RETRIES, - data: &[MAX_PIN_RETRIES.saturating_sub(1)], - sensitive: false, - })?; - } - Some((index, entry)) => { - debug_assert_eq!(entry.data.len(), 1); - if entry.data[0] == 0 { - return Ok(()); - } - let new_value = entry.data[0].saturating_sub(1); - self.store.replace( - index, - StoreEntry { - tag: PIN_RETRIES, - data: &[new_value], - sensitive: false, - }, - )?; - } + let old_value = self.pin_retries()?; + let new_value = old_value.saturating_sub(1); + if new_value != old_value { + self.store.insert(key::PIN_RETRIES, &[new_value])?; } Ok(()) } + /// Resets the number of remaining PIN retries. pub fn reset_pin_retries(&mut self) -> Result<(), Ctap2StatusCode> { - if let Some((index, _)) = self.store.find_one(&Key::PinRetries) { - self.store.delete(index)?; - } - Ok(()) + Ok(self.store.remove(key::PIN_RETRIES)?) } + /// Returns the minimum PIN length. #[cfg(feature = "with_ctap2_1")] pub fn min_pin_length(&self) -> Result { - Ok(self - .store - .find_one(&Key::MinPinLength) - .map_or(DEFAULT_MIN_PIN_LENGTH, |(_, entry)| { - debug_assert_eq!(entry.data.len(), 1); - entry.data[0] - })) + match self.store.find(key::MIN_PIN_LENGTH)? { + None => Ok(DEFAULT_MIN_PIN_LENGTH), + Some(value) if value.len() == 1 => Ok(value[0]), + _ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), + } } + /// Sets the minimum PIN length. #[cfg(feature = "with_ctap2_1")] pub fn set_min_pin_length(&mut self, min_pin_length: u8) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: MIN_PIN_LENGTH, - data: &[min_pin_length], - sensitive: false, - }; - Ok(match self.store.find_one(&Key::MinPinLength) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - }) + Ok(self.store.insert(key::MIN_PIN_LENGTH, &[min_pin_length])?) } + /// Returns the list of RP IDs that are used to check if reading the minimum PIN length is + /// allowed. #[cfg(feature = "with_ctap2_1")] pub fn _min_pin_length_rp_ids(&self) -> Result, Ctap2StatusCode> { let rp_ids = self .store - .find_one(&Key::MinPinLengthRpIds) - .map_or(Some(_DEFAULT_MIN_PIN_LENGTH_RP_IDS), |(_, entry)| { - _deserialize_min_pin_length_rp_ids(entry.data) + .find(key::_MIN_PIN_LENGTH_RP_IDS)? + .map_or(Some(_DEFAULT_MIN_PIN_LENGTH_RP_IDS), |value| { + _deserialize_min_pin_length_rp_ids(&value) }); debug_assert!(rp_ids.is_some()); Ok(rp_ids.unwrap_or(vec![])) } + /// Sets the list of RP IDs that are used to check if reading the minimum PIN length is allowed. #[cfg(feature = "with_ctap2_1")] pub fn _set_min_pin_length_rp_ids( &mut self, @@ -561,145 +450,186 @@ impl PersistentStore { if min_pin_length_rp_ids.len() > _MAX_RP_IDS_LENGTH { return Err(Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL); } - let entry = StoreEntry { - tag: MIN_PIN_LENGTH_RP_IDS, - data: &_serialize_min_pin_length_rp_ids(min_pin_length_rp_ids)?, - sensitive: false, - }; - match self.store.find_one(&Key::MinPinLengthRpIds) { - None => { - self.store.insert(entry).unwrap(); - } - Some((index, _)) => { - self.store.replace(index, entry).unwrap(); - } - } - Ok(()) + Ok(self.store.insert( + key::_MIN_PIN_LENGTH_RP_IDS, + &_serialize_min_pin_length_rp_ids(min_pin_length_rp_ids)?, + )?) } + /// Returns the attestation private key if defined. pub fn attestation_private_key( &self, - ) -> Result, Ctap2StatusCode> { - let data = match self.store.find_one(&Key::AttestationPrivateKey) { - None => return Ok(None), - Some((_, entry)) => entry.data, - }; - if data.len() != key_material::ATTESTATION_PRIVATE_KEY_LENGTH { - return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + ) -> Result, Ctap2StatusCode> { + match self.store.find(key::ATTESTATION_PRIVATE_KEY)? { + None => Ok(None), + Some(key) if key.len() == key_material::ATTESTATION_PRIVATE_KEY_LENGTH => { + Ok(Some(*array_ref![ + key, + 0, + key_material::ATTESTATION_PRIVATE_KEY_LENGTH + ])) + } + Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), } - Ok(Some(array_ref!( - data, - 0, - key_material::ATTESTATION_PRIVATE_KEY_LENGTH - ))) } + /// Sets the attestation private key. + /// + /// If it is already defined, it is overwritten. pub fn set_attestation_private_key( &mut self, attestation_private_key: &[u8; key_material::ATTESTATION_PRIVATE_KEY_LENGTH], ) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: ATTESTATION_PRIVATE_KEY, - data: attestation_private_key, - sensitive: true, - }; - match self.store.find_one(&Key::AttestationPrivateKey) { - None => self.store.insert(entry)?, - _ => return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), + match self.store.find(key::ATTESTATION_PRIVATE_KEY)? { + None => Ok(self + .store + .insert(key::ATTESTATION_PRIVATE_KEY, attestation_private_key)?), + Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), } - Ok(()) } + /// Returns the attestation certificate if defined. pub fn attestation_certificate(&self) -> Result>, Ctap2StatusCode> { - let data = match self.store.find_one(&Key::AttestationCertificate) { - None => return Ok(None), - Some((_, entry)) => entry.data, - }; - Ok(Some(data.to_vec())) + Ok(self.store.find(key::ATTESTATION_CERTIFICATE)?) } + /// Sets the attestation certificate. + /// + /// If it is already defined, it is overwritten. pub fn set_attestation_certificate( &mut self, attestation_certificate: &[u8], ) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: ATTESTATION_CERTIFICATE, - data: attestation_certificate, - sensitive: false, - }; - match self.store.find_one(&Key::AttestationCertificate) { - None => self.store.insert(entry)?, - _ => return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), + match self.store.find(key::ATTESTATION_CERTIFICATE)? { + None => Ok(self + .store + .insert(key::ATTESTATION_CERTIFICATE, attestation_certificate)?), + Some(_) => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR), } - Ok(()) } + /// Returns the AAGUID. pub fn aaguid(&self) -> Result<[u8; key_material::AAGUID_LENGTH], Ctap2StatusCode> { - let (_, entry) = self + let aaguid = self .store - .find_one(&Key::Aaguid) + .find(key::AAGUID)? .ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?; - let data = entry.data; - if data.len() != key_material::AAGUID_LENGTH { + if aaguid.len() != key_material::AAGUID_LENGTH { return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); } - Ok(*array_ref![data, 0, key_material::AAGUID_LENGTH]) + Ok(*array_ref![aaguid, 0, key_material::AAGUID_LENGTH]) } + /// Sets the AAGUID. + /// + /// If it is already defined, it is overwritten. pub fn set_aaguid( &mut self, aaguid: &[u8; key_material::AAGUID_LENGTH], ) -> Result<(), Ctap2StatusCode> { - let entry = StoreEntry { - tag: AAGUID, - data: aaguid, - sensitive: false, - }; - match self.store.find_one(&Key::Aaguid) { - None => self.store.insert(entry)?, - Some((index, _)) => self.store.replace(index, entry)?, - } - Ok(()) + Ok(self.store.insert(key::AAGUID, aaguid)?) } + /// Resets the store as for a CTAP reset. + /// + /// In particular persistent entries are not reset. pub fn reset(&mut self, rng: &mut impl Rng256) -> Result<(), Ctap2StatusCode> { - loop { - let index = { - let mut iter = self.store.iter().filter(|(_, entry)| should_reset(entry)); - match iter.next() { - None => break, - Some((index, _)) => index, - } - }; - self.store.delete(index)?; - } - self.init(rng); + self.store.clear(key::NUM_PERSISTENT_KEYS)?; + self.init(rng)?; Ok(()) } } -impl From for Ctap2StatusCode { - fn from(error: StoreError) -> Ctap2StatusCode { +impl From for Ctap2StatusCode { + fn from(error: persistent_store::StoreError) -> Ctap2StatusCode { + use persistent_store::StoreError; match error { - StoreError::StoreFull => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, - StoreError::InvalidTag => unreachable!(), - StoreError::InvalidPrecondition => unreachable!(), + // This error is expected. The store is full. + StoreError::NoCapacity => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, + // This error is expected. The flash is out of life. + StoreError::NoLifetime => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL, + // This error is expected if we don't satisfy the store preconditions. For example we + // try to store a credential which is too long. + StoreError::InvalidArgument => Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR, + // This error is not expected. The storage has been tempered with. We could erase the + // storage. + StoreError::InvalidStorage => Ctap2StatusCode::CTAP2_ERR_VENDOR_HARDWARE_FAILURE, + // This error is not expected. The kernel is failing our syscalls. + StoreError::StorageError => Ctap2StatusCode::CTAP1_ERR_OTHER, } } } -fn should_reset(entry: &StoreEntry<'_>) -> bool { - match entry.tag { - ATTESTATION_PRIVATE_KEY | ATTESTATION_CERTIFICATE | AAGUID => false, - _ => true, +/// Iterator for credentials. +struct IterCredentials<'a> { + /// The store being iterated. + store: &'a persistent_store::Store, + + /// The store iterator. + iter: persistent_store::StoreIter<'a, Storage>, + + /// The iteration result. + /// + /// It starts as success and gets written at most once with an error if something fails. The + /// iteration stops as soon as an error is encountered. + result: &'a mut Result<(), Ctap2StatusCode>, +} + +impl<'a> IterCredentials<'a> { + /// Creates a credential iterator. + fn new( + store: &'a persistent_store::Store, + result: &'a mut Result<(), Ctap2StatusCode>, + ) -> Result, Ctap2StatusCode> { + let iter = store.iter()?; + Ok(IterCredentials { + store, + iter, + result, + }) + } + + /// Marks the iteration as failed if the content is absent. + /// + /// For convenience, the function takes and returns ownership instead of taking a shared + /// reference and returning nothing. This permits to use it in both expressions and statements + /// instead of statements only. + fn unwrap(&mut self, x: Option) -> Option { + if x.is_none() { + *self.result = Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR); + } + x } } +impl<'a> Iterator for IterCredentials<'a> { + type Item = (usize, PublicKeyCredentialSource); + + fn next(&mut self) -> Option<(usize, PublicKeyCredentialSource)> { + if self.result.is_err() { + return None; + } + while let Some(next) = self.iter.next() { + let handle = self.unwrap(next.ok())?; + let key = handle.get_key(); + if !key::CREDENTIALS.contains(&key) { + continue; + } + let value = self.unwrap(handle.get_value(&self.store).ok())?; + let credential = self.unwrap(deserialize_credential(&value))?; + return Some((key, credential)); + } + None + } +} + +/// Deserializes a credential from storage representation. fn deserialize_credential(data: &[u8]) -> Option { let cbor = cbor::read(data).ok()?; cbor.try_into().ok() } +/// Serializes a credential to storage representation. fn serialize_credential(credential: PublicKeyCredentialSource) -> Result, Ctap2StatusCode> { let mut data = Vec::new(); if cbor::write(credential.into(), &mut data) { @@ -709,6 +639,7 @@ fn serialize_credential(credential: PublicKeyCredentialSource) -> Result } } +/// Deserializes a list of RP IDs from storage representation. #[cfg(feature = "with_ctap2_1")] fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option> { let cbor = cbor::read(data).ok()?; @@ -720,6 +651,7 @@ fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option> { .ok() } +/// Serializes a list of RP IDs to storage representation. #[cfg(feature = "with_ctap2_1")] fn _serialize_min_pin_length_rp_ids(rp_ids: Vec) -> Result, Ctap2StatusCode> { let mut data = Vec::new(); @@ -756,28 +688,6 @@ mod test { } } - #[test] - fn format_overhead() { - // nRF52840 NVMC - const WORD_SIZE: usize = 4; - const PAGE_SIZE: usize = 0x1000; - const NUM_PAGES: usize = 100; - let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); - let options = embedded_flash::BufferOptions { - word_size: WORD_SIZE, - page_size: PAGE_SIZE, - max_word_writes: 2, - max_page_erases: 10000, - strict_write: true, - }; - let storage = Storage::new(store, options); - let store = embedded_flash::Store::new(storage, Config).unwrap(); - // We can replace 3 bytes with minimal overhead. - assert_eq!(store.replace_len(false, 0), 2 * WORD_SIZE); - assert_eq!(store.replace_len(false, 3), 3 * WORD_SIZE); - assert_eq!(store.replace_len(false, 4), 3 * WORD_SIZE); - } - #[test] fn test_store() { let mut rng = ThreadRng256 {}; @@ -1080,21 +990,21 @@ mod test { let mut persistent_store = PersistentStore::new(&mut rng); // The pin retries is initially at the maximum. - assert_eq!(persistent_store.pin_retries().unwrap(), MAX_PIN_RETRIES); + assert_eq!(persistent_store.pin_retries(), Ok(MAX_PIN_RETRIES)); // Decrementing the pin retries decrements the pin retries. for pin_retries in (0..MAX_PIN_RETRIES).rev() { persistent_store.decr_pin_retries().unwrap(); - assert_eq!(persistent_store.pin_retries().unwrap(), pin_retries); + assert_eq!(persistent_store.pin_retries(), Ok(pin_retries)); } // Decrementing the pin retries after zero does not modify the pin retries. persistent_store.decr_pin_retries().unwrap(); - assert_eq!(persistent_store.pin_retries().unwrap(), 0); + assert_eq!(persistent_store.pin_retries(), Ok(0)); // Resetting the pin retries resets the pin retries. persistent_store.reset_pin_retries().unwrap(); - assert_eq!(persistent_store.pin_retries().unwrap(), MAX_PIN_RETRIES); + assert_eq!(persistent_store.pin_retries(), Ok(MAX_PIN_RETRIES)); } #[test] @@ -1124,7 +1034,7 @@ mod test { // The persistent keys stay initialized and preserve their value after a reset. persistent_store.reset(&mut rng).unwrap(); assert_eq!( - persistent_store.attestation_private_key().unwrap().unwrap(), + &persistent_store.attestation_private_key().unwrap().unwrap(), key_material::ATTESTATION_PRIVATE_KEY ); assert_eq!( diff --git a/src/ctap/storage/key.rs b/src/ctap/storage/key.rs new file mode 100644 index 0000000..5c5b20e --- /dev/null +++ b/src/ctap/storage/key.rs @@ -0,0 +1,146 @@ +// Copyright 2019-2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Number of keys that persist the CTAP reset command. +pub const NUM_PERSISTENT_KEYS: usize = 20; + +/// Defines a key given its name and value or range of values. +macro_rules! make_key { + ($(#[$doc: meta])* $name: ident = $key: literal..$end: literal) => { + $(#[$doc])* pub const $name: core::ops::Range = $key..$end; + }; + ($(#[$doc: meta])* $name: ident = $key: literal) => { + $(#[$doc])* pub const $name: usize = $key; + }; +} + +/// Returns the range of values of a key given its value description. +#[cfg(test)] +macro_rules! make_range { + ($key: literal..$end: literal) => { + $key..$end + }; + ($key: literal) => { + $key..$key + 1 + }; +} + +/// Helper to define keys as a partial partition of a range. +macro_rules! make_partition { + ($range: expr, + $( + $(#[$doc: meta])* + $name: ident = $key: literal $(.. $end: literal)?; + )*) => { + $( + make_key!($(#[$doc])* $name = $key $(.. $end)?); + )* + #[cfg(test)] + const KEY_RANGE: core::ops::Range = $range; + #[cfg(test)] + const ALL_KEYS: &[core::ops::Range] = &[$(make_range!($key $(.. $end)?)),*]; + }; + } + +make_partition! { + // We reserve 0 and 2048+ for possible migration purposes. We add persistent entries starting + // from 1 and going up. We add non-persistent entries starting from 2047 and going down. This + // way, we don't commit to a fixed number of persistent keys. + 1..2048, + + // WARNING: Keys should not be deleted but prefixed with `_` to avoid accidentally reusing them. + + /// The attestation private key. + ATTESTATION_PRIVATE_KEY = 1; + + /// The attestation certificate. + ATTESTATION_CERTIFICATE = 2; + + /// The aaguid. + AAGUID = 3; + + // This is the persistent key limit: + // - When adding a (persistent) key above this message, make sure its value is smaller than + // NUM_PERSISTENT_KEYS. + // - When adding a (non-persistent) key below this message, make sure its value is bigger or + // equal than NUM_PERSISTENT_KEYS. + + /// Reserved for future credential-related objects. + /// + /// In particular, additional credentials could be added there by reducing the lower bound of + /// the credential range below as well as the upper bound of this range in a similar manner. + _RESERVED_CREDENTIALS = 1000..1700; + + /// The credentials. + /// + /// Depending on `MAX_SUPPORTED_RESIDENTIAL_KEYS`, only a prefix of those keys is used. Each + /// board may configure `MAX_SUPPORTED_RESIDENTIAL_KEYS` depending on the storage size. + CREDENTIALS = 1700..2000; + + /// The secret of the CredRandom feature. + CRED_RANDOM_SECRET = 2041; + + /// List of RP IDs allowed to read the minimum PIN length. + #[cfg(feature = "with_ctap2_1")] + _MIN_PIN_LENGTH_RP_IDS = 2042; + + /// The minimum PIN length. + /// + /// If the entry is absent, the minimum PIN length is `DEFAULT_MIN_PIN_LENGTH`. + #[cfg(feature = "with_ctap2_1")] + MIN_PIN_LENGTH = 2043; + + /// The number of PIN retries. + /// + /// If the entry is absent, the number of PIN retries is `MAX_PIN_RETRIES`. + PIN_RETRIES = 2044; + + /// The PIN hash. + /// + /// If the entry is absent, there is no PIN set. + PIN_HASH = 2045; + + /// The encryption and hmac keys. + /// + /// This entry is always present. It is generated at startup if absent. + MASTER_KEYS = 2046; + + /// The global signature counter. + /// + /// If the entry is absent, the counter is 0. + GLOBAL_SIGNATURE_COUNTER = 2047; +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn enough_credentials() { + use super::super::MAX_SUPPORTED_RESIDENTIAL_KEYS; + assert!(MAX_SUPPORTED_RESIDENTIAL_KEYS <= CREDENTIALS.end - CREDENTIALS.start); + } + + #[test] + fn keys_are_disjoint() { + // Check that keys are in the range. + for keys in ALL_KEYS { + assert!(KEY_RANGE.start <= keys.start && keys.end <= KEY_RANGE.end); + } + // Check that keys are assigned at most once, essentially partitioning the range. + for key in KEY_RANGE { + assert!(ALL_KEYS.iter().filter(|keys| keys.contains(&key)).count() <= 1); + } + } +} diff --git a/src/embedded_flash/buffer.rs b/src/embedded_flash/buffer.rs deleted file mode 100644 index 0e7171a..0000000 --- a/src/embedded_flash/buffer.rs +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{Index, Storage, StorageError, StorageResult}; -use alloc::boxed::Box; -use alloc::vec; - -pub struct BufferStorage { - storage: Box<[u8]>, - options: BufferOptions, - word_writes: Box<[usize]>, - page_erases: Box<[usize]>, - snapshot: Snapshot, -} - -#[derive(Copy, Clone, Debug)] -pub struct BufferOptions { - /// Size of a word in bytes. - pub word_size: usize, - - /// Size of a page in bytes. - pub page_size: usize, - - /// How many times a word can be written between page erasures - pub max_word_writes: usize, - - /// How many times a page can be erased. - pub max_page_erases: usize, - - /// Bits cannot be written from 0 to 1. - pub strict_write: bool, -} - -impl BufferStorage { - /// Creates a fake embedded flash using a buffer. - /// - /// This implementation checks that no words are written more than `max_word_writes` between - /// page erasures and than no pages are erased more than `max_page_erases`. If `strict_write` is - /// true, it also checks that no bits are written from 0 to 1. It also permits to take snapshots - /// of the storage during write and erase operations (although words would still be written or - /// erased completely). - /// - /// # Panics - /// - /// The following preconditions must hold: - /// - `options.word_size` must be a power of two. - /// - `options.page_size` must be a power of two. - /// - `options.page_size` must be word-aligned. - /// - `storage.len()` must be page-aligned. - pub fn new(storage: Box<[u8]>, options: BufferOptions) -> BufferStorage { - assert!(options.word_size.is_power_of_two()); - assert!(options.page_size.is_power_of_two()); - let num_words = storage.len() / options.word_size; - let num_pages = storage.len() / options.page_size; - let buffer = BufferStorage { - storage, - options, - word_writes: vec![0; num_words].into_boxed_slice(), - page_erases: vec![0; num_pages].into_boxed_slice(), - snapshot: Snapshot::Ready, - }; - assert!(buffer.is_word_aligned(buffer.options.page_size)); - assert!(buffer.is_page_aligned(buffer.storage.len())); - buffer - } - - /// Takes a snapshot of the storage after a given amount of word operations. - /// - /// Each time a word is written or erased, the delay is decremented if positive. Otherwise, a - /// snapshot is taken before the operation is executed. - /// - /// # Panics - /// - /// Panics if a snapshot has been armed and not examined. - pub fn arm_snapshot(&mut self, delay: usize) { - self.snapshot.arm(delay); - } - - /// Unarms and returns the snapshot or the delay remaining. - /// - /// # Panics - /// - /// Panics if a snapshot was not armed. - pub fn get_snapshot(&mut self) -> Result, usize> { - self.snapshot.get() - } - - /// Takes a snapshot of the storage. - pub fn take_snapshot(&self) -> Box<[u8]> { - self.storage.clone() - } - - /// Returns the storage. - pub fn get_storage(self) -> Box<[u8]> { - self.storage - } - - fn is_word_aligned(&self, x: usize) -> bool { - x & (self.options.word_size - 1) == 0 - } - - fn is_page_aligned(&self, x: usize) -> bool { - x & (self.options.page_size - 1) == 0 - } - - /// Writes a slice to the storage. - /// - /// The slice `value` is written to `index`. The `erase` boolean specifies whether this is an - /// erase operation or a write operation which matters for the checks and updating the shadow - /// storage. This also takes a snapshot of the storage if a snapshot was armed and the delay has - /// elapsed. - /// - /// The following preconditions should hold: - /// - `index` is word-aligned. - /// - `value.len()` is word-aligned. - /// - /// The following checks are performed: - /// - The region of length `value.len()` starting at `index` fits in a storage page. - /// - A word is not written more than `max_word_writes`. - /// - A page is not erased more than `max_page_erases`. - /// - The new word only switches 1s to 0s (only if `strict_write` is set). - fn update_storage(&mut self, index: Index, value: &[u8], erase: bool) -> StorageResult<()> { - debug_assert!(self.is_word_aligned(index.byte) && self.is_word_aligned(value.len())); - let dst = index.range(value.len(), self)?.step_by(self.word_size()); - let src = value.chunks(self.word_size()); - // Check and update page shadow. - if erase { - let page = index.page; - assert!(self.page_erases[page] < self.max_page_erases()); - self.page_erases[page] += 1; - } - for (byte, val) in dst.zip(src) { - let range = byte..byte + self.word_size(); - // The driver doesn't write identical words. - if &self.storage[range.clone()] == val { - continue; - } - // Check and update word shadow. - let word = byte / self.word_size(); - if erase { - self.word_writes[word] = 0; - } else { - assert!(self.word_writes[word] < self.max_word_writes()); - self.word_writes[word] += 1; - } - // Check strict write. - if !erase && self.options.strict_write { - for (byte, &val) in range.clone().zip(val) { - assert_eq!(self.storage[byte] & val, val); - } - } - // Take snapshot if armed and delay expired. - self.snapshot.take(&self.storage); - // Write storage - self.storage[range].copy_from_slice(val); - } - Ok(()) - } -} - -impl Storage for BufferStorage { - fn word_size(&self) -> usize { - self.options.word_size - } - - fn page_size(&self) -> usize { - self.options.page_size - } - - fn num_pages(&self) -> usize { - self.storage.len() / self.options.page_size - } - - fn max_word_writes(&self) -> usize { - self.options.max_word_writes - } - - fn max_page_erases(&self) -> usize { - self.options.max_page_erases - } - - fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]> { - Ok(&self.storage[index.range(length, self)?]) - } - - fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()> { - if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) { - return Err(StorageError::NotAligned); - } - self.update_storage(index, value, false) - } - - fn erase_page(&mut self, page: usize) -> StorageResult<()> { - let index = Index { page, byte: 0 }; - let value = vec![0xff; self.page_size()]; - self.update_storage(index, &value, true) - } -} - -// Controls when a snapshot of the storage is taken. -// -// This can be used to simulate power-offs while the device is writing to the storage or erasing a -// page in the storage. -enum Snapshot { - // Mutable word operations have normal behavior. - Ready, - // If the delay is positive, mutable word operations decrement it. If the count is zero, mutable - // word operations take a snapshot of the storage. - Armed { delay: usize }, - // Mutable word operations have normal behavior. - Taken { storage: Box<[u8]> }, -} - -impl Snapshot { - fn arm(&mut self, delay: usize) { - match self { - Snapshot::Ready => *self = Snapshot::Armed { delay }, - _ => panic!(), - } - } - - fn get(&mut self) -> Result, usize> { - let mut snapshot = Snapshot::Ready; - core::mem::swap(self, &mut snapshot); - match snapshot { - Snapshot::Armed { delay } => Err(delay), - Snapshot::Taken { storage } => Ok(storage), - _ => panic!(), - } - } - - fn take(&mut self, storage: &[u8]) { - if let Snapshot::Armed { delay } = self { - if *delay == 0 { - let storage = storage.to_vec().into_boxed_slice(); - *self = Snapshot::Taken { storage }; - } else { - *delay -= 1; - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - const NUM_PAGES: usize = 2; - const OPTIONS: BufferOptions = BufferOptions { - word_size: 4, - page_size: 16, - max_word_writes: 2, - max_page_erases: 3, - strict_write: true, - }; - // Those words are decreasing bit patterns. Bits are only changed from 1 to 0 and at last one - // bit is changed. - const BLANK_WORD: &[u8] = &[0xff, 0xff, 0xff, 0xff]; - const FIRST_WORD: &[u8] = &[0xee, 0xdd, 0xbb, 0x77]; - const SECOND_WORD: &[u8] = &[0xca, 0xc9, 0xa9, 0x65]; - const THIRD_WORD: &[u8] = &[0x88, 0x88, 0x88, 0x44]; - - fn new_storage() -> Box<[u8]> { - vec![0xff; NUM_PAGES * OPTIONS.page_size].into_boxed_slice() - } - - #[test] - fn words_are_decreasing() { - fn assert_is_decreasing(prev: &[u8], next: &[u8]) { - for (&prev, &next) in prev.iter().zip(next.iter()) { - assert_eq!(prev & next, next); - assert!(prev != next); - } - } - assert_is_decreasing(BLANK_WORD, FIRST_WORD); - assert_is_decreasing(FIRST_WORD, SECOND_WORD); - assert_is_decreasing(SECOND_WORD, THIRD_WORD); - } - - #[test] - fn options_ok() { - let buffer = BufferStorage::new(new_storage(), OPTIONS); - assert_eq!(buffer.word_size(), OPTIONS.word_size); - assert_eq!(buffer.page_size(), OPTIONS.page_size); - assert_eq!(buffer.num_pages(), NUM_PAGES); - assert_eq!(buffer.max_word_writes(), OPTIONS.max_word_writes); - assert_eq!(buffer.max_page_erases(), OPTIONS.max_page_erases); - } - - #[test] - fn read_write_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let next_index = Index { page: 0, byte: 4 }; - assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD); - buffer.write_slice(index, FIRST_WORD).unwrap(); - assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD); - assert_eq!(buffer.read_slice(next_index, 4).unwrap(), BLANK_WORD); - } - - #[test] - fn erase_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let other_index = Index { page: 1, byte: 0 }; - buffer.write_slice(index, FIRST_WORD).unwrap(); - buffer.write_slice(other_index, FIRST_WORD).unwrap(); - assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD); - assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD); - buffer.erase_page(0).unwrap(); - assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD); - assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD); - } - - #[test] - fn invalid_range() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 12 }; - let half_index = Index { page: 0, byte: 14 }; - let over_index = Index { page: 0, byte: 16 }; - let bad_page = Index { page: 2, byte: 0 }; - - // Reading a word in the storage is ok. - assert!(buffer.read_slice(index, 4).is_ok()); - // Reading a half-word in the storage is ok. - assert!(buffer.read_slice(half_index, 2).is_ok()); - // Reading even a single byte outside a page is not ok. - assert!(buffer.read_slice(over_index, 1).is_err()); - // But reading an empty slice just after a page is ok. - assert!(buffer.read_slice(over_index, 0).is_ok()); - // Reading even an empty slice outside the storage is not ok. - assert!(buffer.read_slice(bad_page, 0).is_err()); - - // Writing a word in the storage is ok. - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - // Writing an unaligned word is not ok. - assert!(buffer.write_slice(half_index, FIRST_WORD).is_err()); - // Writing a word outside a page is not ok. - assert!(buffer.write_slice(over_index, FIRST_WORD).is_err()); - // But writing an empty slice just after a page is ok. - assert!(buffer.write_slice(over_index, &[]).is_ok()); - // Writing even an empty slice outside the storage is not ok. - assert!(buffer.write_slice(bad_page, &[]).is_err()); - - // Only pages in the storage can be erased. - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(2).is_err()); - } - - #[test] - fn write_twice_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 4 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - } - - #[test] - fn write_twice_and_once_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let next_index = Index { page: 0, byte: 4 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - assert!(buffer.write_slice(next_index, THIRD_WORD).is_ok()); - } - - #[test] - #[should_panic] - fn write_three_times_panics() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 4 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - let _ = buffer.write_slice(index, THIRD_WORD); - } - - #[test] - fn write_twice_then_once_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.write_slice(index, FIRST_WORD).is_ok()); - } - - #[test] - fn erase_three_times_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - } - - #[test] - fn erase_three_times_and_once_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(1).is_ok()); - } - - #[test] - #[should_panic] - fn erase_four_times_panics() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - assert!(buffer.erase_page(0).is_ok()); - let _ = buffer.erase_page(0).is_ok(); - } - - #[test] - #[should_panic] - fn switch_zero_to_one_panics() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - assert!(buffer.write_slice(index, SECOND_WORD).is_ok()); - let _ = buffer.write_slice(index, FIRST_WORD); - } - - #[test] - fn get_storage_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 4 }; - buffer.write_slice(index, FIRST_WORD).unwrap(); - let storage = buffer.get_storage(); - assert_eq!(&storage[..4], BLANK_WORD); - assert_eq!(&storage[4..8], FIRST_WORD); - } - - #[test] - fn snapshot_ok() { - let mut buffer = BufferStorage::new(new_storage(), OPTIONS); - let index = Index { page: 0, byte: 0 }; - let value = [FIRST_WORD, SECOND_WORD].concat(); - buffer.arm_snapshot(1); - buffer.write_slice(index, &value).unwrap(); - let storage = buffer.get_snapshot().unwrap(); - assert_eq!(&storage[..8], &[FIRST_WORD, BLANK_WORD].concat()[..]); - let storage = buffer.take_snapshot(); - assert_eq!(&storage[..8], &value[..]); - } -} diff --git a/src/embedded_flash/mod.rs b/src/embedded_flash/mod.rs index 05407c0..b16504b 100644 --- a/src/embedded_flash/mod.rs +++ b/src/embedded_flash/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2019-2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,16 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(any(test, feature = "ram_storage"))] -mod buffer; -mod storage; -mod store; #[cfg(not(any(test, feature = "ram_storage")))] mod syscall; -#[cfg(any(test, feature = "ram_storage"))] -pub use self::buffer::{BufferOptions, BufferStorage}; -pub use self::storage::{Index, Storage, StorageError, StorageResult}; -pub use self::store::{Store, StoreConfig, StoreEntry, StoreError, StoreIndex}; #[cfg(not(any(test, feature = "ram_storage")))] pub use self::syscall::SyscallStorage; diff --git a/src/embedded_flash/storage.rs b/src/embedded_flash/storage.rs deleted file mode 100644 index fe87ac4..0000000 --- a/src/embedded_flash/storage.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[derive(Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Debug))] -pub struct Index { - pub page: usize, - pub byte: usize, -} - -#[derive(Debug)] -pub enum StorageError { - BadFlash, - NotAligned, - OutOfBounds, - KernelError { code: isize }, -} - -pub type StorageResult = Result; - -/// Abstraction for embedded flash storage. -pub trait Storage { - /// Returns the size of a word in bytes. - fn word_size(&self) -> usize; - - /// Returns the size of a page in bytes. - fn page_size(&self) -> usize; - - /// Returns the number of pages in the storage. - fn num_pages(&self) -> usize; - - /// Returns how many times a word can be written between page erasures. - fn max_word_writes(&self) -> usize; - - /// Returns how many times a page can be erased in the lifetime of the flash. - fn max_page_erases(&self) -> usize; - - /// Reads a slice from the storage. - /// - /// The slice does not need to be word-aligned. - /// - /// # Errors - /// - /// The `index` must designate `length` bytes in the storage. - fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]>; - - /// Writes a word-aligned slice to the storage. - /// - /// The written words should not have been written too many times since last page erasure. - /// - /// # Errors - /// - /// The following preconditions must hold: - /// - `index` must be word-aligned. - /// - `value.len()` must be a multiple of the word size. - /// - `index` must designate `value.len()` bytes in the storage. - /// - `value` must be in memory until [read-only allow][tock_1274] is resolved. - /// - /// [tock_1274]: https://github.com/tock/tock/issues/1274. - fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()>; - - /// Erases a page of the storage. - /// - /// # Errors - /// - /// The `page` must be in the storage. - fn erase_page(&mut self, page: usize) -> StorageResult<()>; -} - -impl Index { - /// Returns whether a slice fits in a storage page. - fn is_valid(self, length: usize, storage: &impl Storage) -> bool { - self.page < storage.num_pages() - && storage - .page_size() - .checked_sub(length) - .map(|limit| self.byte <= limit) - .unwrap_or(false) - } - - /// Returns the range of a valid slice. - /// - /// The range starts at `self` with `length` bytes. - pub fn range( - self, - length: usize, - storage: &impl Storage, - ) -> StorageResult> { - if self.is_valid(length, storage) { - let start = self.page * storage.page_size() + self.byte; - Ok(start..start + length) - } else { - Err(StorageError::OutOfBounds) - } - } -} diff --git a/src/embedded_flash/store/bitfield.rs b/src/embedded_flash/store/bitfield.rs deleted file mode 100644 index 797c78b..0000000 --- a/src/embedded_flash/store/bitfield.rs +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/// Defines a consecutive sequence of bits. -#[derive(Copy, Clone)] -pub struct BitRange { - /// The first bit of the sequence. - pub start: usize, - - /// The length in bits of the sequence. - pub length: usize, -} - -impl BitRange { - /// Returns the first bit following a bit range. - pub fn end(self) -> usize { - self.start + self.length - } -} - -/// Defines a consecutive sequence of bytes. -/// -/// The bits in those bytes are ignored which essentially creates a gap in a sequence of bits. The -/// gap is necessarily at byte boundaries. This is used to ignore the user data in an entry -/// essentially providing a view of the entry information (header and footer). -#[derive(Copy, Clone)] -pub struct ByteGap { - pub start: usize, - pub length: usize, -} - -/// Empty gap. All bits count. -pub const NO_GAP: ByteGap = ByteGap { - start: 0, - length: 0, -}; - -impl ByteGap { - /// Translates a bit to skip the gap. - fn shift(self, bit: usize) -> usize { - if bit < 8 * self.start { - bit - } else { - bit + 8 * self.length - } - } - - /// Returns the slice of `data` corresponding to the gap. - pub fn slice(self, data: &[u8]) -> &[u8] { - &data[self.start..self.start + self.length] - } -} - -/// Returns whether a bit is set in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. -pub fn is_zero(bit: usize, data: &[u8], gap: ByteGap) -> bool { - let bit = gap.shift(bit); - debug_assert!(bit < 8 * data.len()); - data[bit / 8] & (1 << (bit % 8)) == 0 -} - -/// Sets a bit to zero in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. -pub fn set_zero(bit: usize, data: &mut [u8], gap: ByteGap) { - let bit = gap.shift(bit); - debug_assert!(bit < 8 * data.len()); - data[bit / 8] &= !(1 << (bit % 8)); -} - -/// Returns a little-endian value in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by -/// `range`. The value must fit in a `usize`. -pub fn get_range(range: BitRange, data: &[u8], gap: ByteGap) -> usize { - debug_assert!(range.length <= 8 * core::mem::size_of::()); - let mut result = 0; - for i in 0..range.length { - if !is_zero(range.start + i, data, gap) { - result |= 1 << i; - } - } - result -} - -/// Sets a little-endian value in a sequence of bits. -/// -/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that -/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by -/// `range`. The bits set to 1 in `value` must also be set to `1` in the sequence of bits. -pub fn set_range(range: BitRange, data: &mut [u8], gap: ByteGap, value: usize) { - debug_assert!(range.length == 8 * core::mem::size_of::() || value < 1 << range.length); - for i in 0..range.length { - if value & 1 << i == 0 { - set_zero(range.start + i, data, gap); - } - } -} - -/// Tests the `is_zero` and `set_zero` pair of functions. -#[test] -fn zero_ok() { - const GAP: ByteGap = ByteGap { - start: 2, - length: 1, - }; - for i in 0..24 { - assert!(!is_zero(i, &[0xffu8, 0xff, 0x00, 0xff] as &[u8], GAP)); - } - // Tests reading and setting a bit. The result should have all bits set to 1 except for the bit - // to test and the gap. - fn test(bit: usize, result: &[u8]) { - assert!(is_zero(bit, result, GAP)); - let mut data = vec![0xff; result.len()]; - // Set the gap bits to 0. - for i in 0..GAP.length { - data[GAP.start + i] = 0x00; - } - set_zero(bit, &mut data, GAP); - assert_eq!(data, result); - } - test(0, &[0xfe, 0xff, 0x00, 0xff]); - test(1, &[0xfd, 0xff, 0x00, 0xff]); - test(2, &[0xfb, 0xff, 0x00, 0xff]); - test(7, &[0x7f, 0xff, 0x00, 0xff]); - test(8, &[0xff, 0xfe, 0x00, 0xff]); - test(15, &[0xff, 0x7f, 0x00, 0xff]); - test(16, &[0xff, 0xff, 0x00, 0xfe]); - test(17, &[0xff, 0xff, 0x00, 0xfd]); - test(23, &[0xff, 0xff, 0x00, 0x7f]); -} - -/// Tests the `get_range` and `set_range` pair of functions. -#[test] -fn range_ok() { - // Tests reading and setting a range. The result should have all bits set to 1 except for the - // range to test and the gap. - fn test(start: usize, length: usize, value: usize, result: &[u8], gap: ByteGap) { - let range = BitRange { start, length }; - assert_eq!(get_range(range, result, gap), value); - let mut data = vec![0xff; result.len()]; - for i in 0..gap.length { - data[gap.start + i] = 0x00; - } - set_range(range, &mut data, gap, value); - assert_eq!(data, result); - } - test(0, 8, 42, &[42], NO_GAP); - test(3, 12, 0b11_0101, &[0b1010_1111, 0b1000_0001], NO_GAP); - test(0, 16, 0x1234, &[0x34, 0x12], NO_GAP); - test(4, 16, 0x1234, &[0x4f, 0x23, 0xf1], NO_GAP); - let mut gap = ByteGap { - start: 1, - length: 1, - }; - test(3, 12, 0b11_0101, &[0b1010_1111, 0x00, 0b1000_0001], gap); - gap.length = 2; - test(0, 16, 0x1234, &[0x34, 0x00, 0x00, 0x12], gap); - gap.start = 2; - gap.length = 1; - test(4, 16, 0x1234, &[0x4f, 0x23, 0x00, 0xf1], gap); -} diff --git a/src/embedded_flash/store/format.rs b/src/embedded_flash/store/format.rs deleted file mode 100644 index 03787cf..0000000 --- a/src/embedded_flash/store/format.rs +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::super::{Index, Storage}; -use super::{bitfield, StoreConfig, StoreEntry, StoreError}; -use alloc::vec; -use alloc::vec::Vec; - -/// Whether a user entry is a replace entry. -pub enum IsReplace { - /// This is a replace entry. - Replace, - - /// This is an insert entry. - Insert, -} - -/// Helpers to parse the store format. -/// -/// See the store module-level documentation for information about the format. -pub struct Format { - pub word_size: usize, - pub page_size: usize, - pub num_pages: usize, - pub max_page_erases: usize, - pub num_tags: usize, - - /// Whether an entry is present. - /// - /// - 0 for entries (user entries or internal entries). - /// - 1 for free space until the end of the page. - present_bit: usize, - - /// Whether an entry is deleted. - /// - /// - 0 for deleted entries. - /// - 1 for alive entries. - deleted_bit: usize, - - /// Whether an entry is internal. - /// - /// - 0 for internal entries. - /// - 1 for user entries. - internal_bit: usize, - - /// Whether a user entry is a replace entry. - /// - /// - 0 for replace entries. - /// - 1 for insert entries. - replace_bit: usize, - - /// Whether a user entry has sensitive data. - /// - /// - 0 for sensitive data. - /// - 1 for non-sensitive data. - /// - /// When a user entry with sensitive data is deleted, the data is overwritten with zeroes. This - /// feature is subject to the same guarantees as all other features of the store, in particular - /// deleting a sensitive entry is atomic. See the store module-level documentation for more - /// information. - sensitive_bit: usize, - - /// The data length of a user entry. - length_range: bitfield::BitRange, - - /// The tag of a user entry. - tag_range: bitfield::BitRange, - - /// The page index of a replace entry. - replace_page_range: bitfield::BitRange, - - /// The byte index of a replace entry. - replace_byte_range: bitfield::BitRange, - - /// The index of the page to erase. - /// - /// This is only present for internal entries. - old_page_range: bitfield::BitRange, - - /// The current erase count of the page to erase. - /// - /// This is only present for internal entries. - saved_erase_count_range: bitfield::BitRange, - - /// Whether a page is initialized. - /// - /// - 0 for initialized pages. - /// - 1 for uninitialized pages. - initialized_bit: usize, - - /// The erase count of a page. - erase_count_range: bitfield::BitRange, - - /// Whether a page is being compacted. - /// - /// - 0 for pages being compacted. - /// - 1 otherwise. - compacting_bit: usize, - - /// The page index to which a page is being compacted. - new_page_range: bitfield::BitRange, -} - -impl Format { - /// Returns a helper to parse the store format for a given storage and config. - /// - /// # Errors - /// - /// Returns `None` if any of the following conditions does not hold: - /// - The word size must be a power of two. - /// - The page size must be a power of two. - /// - There should be at least 2 pages in the storage. - /// - It should be possible to write a word at least twice. - /// - It should be possible to erase a page at least once. - /// - There should be at least 1 tag. - pub fn new(storage: &S, config: &C) -> Option { - let word_size = storage.word_size(); - let page_size = storage.page_size(); - let num_pages = storage.num_pages(); - let max_word_writes = storage.max_word_writes(); - let max_page_erases = storage.max_page_erases(); - let num_tags = config.num_tags(); - if !(word_size.is_power_of_two() - && page_size.is_power_of_two() - && num_pages > 1 - && max_word_writes >= 2 - && max_page_erases > 0 - && num_tags > 0) - { - return None; - } - // Compute how many bits we need to store the fields. - let page_bits = num_bits(num_pages); - let byte_bits = num_bits(page_size); - let tag_bits = num_bits(num_tags); - let erase_bits = num_bits(max_page_erases + 1); - // Compute the bit position of the fields. - let present_bit = 0; - let deleted_bit = present_bit + 1; - let internal_bit = deleted_bit + 1; - let replace_bit = internal_bit + 1; - let sensitive_bit = replace_bit + 1; - let length_range = bitfield::BitRange { - start: sensitive_bit + 1, - length: byte_bits, - }; - let tag_range = bitfield::BitRange { - start: length_range.end(), - length: tag_bits, - }; - let replace_page_range = bitfield::BitRange { - start: tag_range.end(), - length: page_bits, - }; - let replace_byte_range = bitfield::BitRange { - start: replace_page_range.end(), - length: byte_bits, - }; - let old_page_range = bitfield::BitRange { - start: internal_bit + 1, - length: page_bits, - }; - let saved_erase_count_range = bitfield::BitRange { - start: old_page_range.end(), - length: erase_bits, - }; - let initialized_bit = 0; - let erase_count_range = bitfield::BitRange { - start: initialized_bit + 1, - length: erase_bits, - }; - let compacting_bit = erase_count_range.end(); - let new_page_range = bitfield::BitRange { - start: compacting_bit + 1, - length: page_bits, - }; - let format = Format { - word_size, - page_size, - num_pages, - max_page_erases, - num_tags, - present_bit, - deleted_bit, - internal_bit, - replace_bit, - sensitive_bit, - length_range, - tag_range, - replace_page_range, - replace_byte_range, - old_page_range, - saved_erase_count_range, - initialized_bit, - erase_count_range, - compacting_bit, - new_page_range, - }; - // Make sure all the following conditions hold: - // - The page header is one word. - // - The internal entry is one word. - // - The entry header fits in one word (which is equivalent to the entry header size being - // exactly one word for sensitive entries). - if format.page_header_size() != word_size - || format.internal_entry_size() != word_size - || format.header_size(true) != word_size - { - return None; - } - Some(format) - } - - /// Ensures a user entry is valid. - pub fn validate_entry(&self, entry: StoreEntry) -> Result<(), StoreError> { - if entry.tag >= self.num_tags { - return Err(StoreError::InvalidTag); - } - if entry.data.len() >= self.page_size { - return Err(StoreError::StoreFull); - } - Ok(()) - } - - /// Returns the entry header length in bytes. - /// - /// This is the smallest number of bytes necessary to store all fields of the entry info up to - /// and including `length`. For sensitive entries, the result is word-aligned. - pub fn header_size(&self, sensitive: bool) -> usize { - let mut size = self.bits_to_bytes(self.length_range.end()); - if sensitive { - // We need to align to the next word boundary so that wiping the user data will not - // count as a write to the header. - size = self.align_word(size); - } - size - } - - /// Returns the entry header length in bytes. - /// - /// This is a convenience function for `header_size` above. - fn header_offset(&self, entry: &[u8]) -> usize { - self.header_size(self.is_sensitive(entry)) - } - - /// Returns the entry info length in bytes. - /// - /// This is the number of bytes necessary to store all fields of the entry info. This also - /// includes the internal padding to protect the `committed` bit from the `deleted` bit and to - /// protect the entry info from the user data for sensitive entries. - fn info_size(&self, is_replace: IsReplace, sensitive: bool) -> usize { - let suffix_bits = 2; // committed + complete - let info_bits = match is_replace { - IsReplace::Replace => self.replace_byte_range.end() + suffix_bits, - IsReplace::Insert => self.tag_range.end() + suffix_bits, - }; - let mut info_size = self.bits_to_bytes(info_bits); - // If the suffix bits would end up in the header, we need to add one byte for them. - let header_size = self.header_size(sensitive); - if info_size <= header_size { - info_size = header_size + 1; - } - // If the entry is sensitive, we need to align to the next word boundary. - if sensitive { - info_size = self.align_word(info_size); - } - info_size - } - - /// Returns the length in bytes of an entry. - /// - /// This depends on the length of the user data and whether the entry replaces an old entry or - /// is an insertion. This also includes the internal padding to protect the `committed` bit from - /// the `deleted` bit. - pub fn entry_size(&self, is_replace: IsReplace, sensitive: bool, length: usize) -> usize { - let mut entry_size = length + self.info_size(is_replace, sensitive); - let word_size = self.word_size; - entry_size = self.align_word(entry_size); - // The entry must be at least 2 words such that the `committed` and `deleted` bits are on - // different words. - if entry_size == word_size { - entry_size += word_size; - } - entry_size - } - - /// Returns the length in bytes of an internal entry. - pub fn internal_entry_size(&self) -> usize { - let length = self.bits_to_bytes(self.saved_erase_count_range.end()); - self.align_word(length) - } - - pub fn is_present(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.present_bit, header, bitfield::NO_GAP) - } - - pub fn set_present(&self, header: &mut [u8]) { - bitfield::set_zero(self.present_bit, header, bitfield::NO_GAP) - } - - pub fn is_deleted(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.deleted_bit, header, bitfield::NO_GAP) - } - - /// Returns whether an entry is present and not deleted. - pub fn is_alive(&self, header: &[u8]) -> bool { - self.is_present(header) && !self.is_deleted(header) - } - - pub fn set_deleted(&self, header: &mut [u8]) { - bitfield::set_zero(self.deleted_bit, header, bitfield::NO_GAP) - } - - pub fn is_internal(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.internal_bit, header, bitfield::NO_GAP) - } - - pub fn set_internal(&self, header: &mut [u8]) { - bitfield::set_zero(self.internal_bit, header, bitfield::NO_GAP) - } - - pub fn is_replace(&self, header: &[u8]) -> IsReplace { - if bitfield::is_zero(self.replace_bit, header, bitfield::NO_GAP) { - IsReplace::Replace - } else { - IsReplace::Insert - } - } - - fn set_replace(&self, header: &mut [u8]) { - bitfield::set_zero(self.replace_bit, header, bitfield::NO_GAP) - } - - pub fn is_sensitive(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.sensitive_bit, header, bitfield::NO_GAP) - } - - pub fn set_sensitive(&self, header: &mut [u8]) { - bitfield::set_zero(self.sensitive_bit, header, bitfield::NO_GAP) - } - - pub fn get_length(&self, header: &[u8]) -> usize { - bitfield::get_range(self.length_range, header, bitfield::NO_GAP) - } - - fn set_length(&self, header: &mut [u8], length: usize) { - bitfield::set_range(self.length_range, header, bitfield::NO_GAP, length) - } - - pub fn get_data<'a>(&self, entry: &'a [u8]) -> &'a [u8] { - &entry[self.header_offset(entry)..][..self.get_length(entry)] - } - - /// Returns the span of user data in an entry. - /// - /// The complement of this gap in the entry is exactly the entry info. The header is before the - /// gap and the footer is after the gap. - pub fn entry_gap(&self, entry: &[u8]) -> bitfield::ByteGap { - let start = self.header_offset(entry); - let mut length = self.get_length(entry); - if self.is_sensitive(entry) { - length = self.align_word(length); - } - bitfield::ByteGap { start, length } - } - - pub fn get_tag(&self, entry: &[u8]) -> usize { - bitfield::get_range(self.tag_range, entry, self.entry_gap(entry)) - } - - fn set_tag(&self, entry: &mut [u8], tag: usize) { - bitfield::set_range(self.tag_range, entry, self.entry_gap(entry), tag) - } - - pub fn get_replace_index(&self, entry: &[u8]) -> Index { - let gap = self.entry_gap(entry); - let page = bitfield::get_range(self.replace_page_range, entry, gap); - let byte = bitfield::get_range(self.replace_byte_range, entry, gap); - Index { page, byte } - } - - fn set_replace_page(&self, entry: &mut [u8], page: usize) { - bitfield::set_range(self.replace_page_range, entry, self.entry_gap(entry), page) - } - - fn set_replace_byte(&self, entry: &mut [u8], byte: usize) { - bitfield::set_range(self.replace_byte_range, entry, self.entry_gap(entry), byte) - } - - /// Returns the bit position of the `committed` bit. - /// - /// This cannot be precomputed like other fields since it depends on the length of the entry. - fn committed_bit(&self, entry: &[u8]) -> usize { - 8 * entry.len() - 2 - } - - /// Returns the bit position of the `complete` bit. - /// - /// This cannot be precomputed like other fields since it depends on the length of the entry. - fn complete_bit(&self, entry: &[u8]) -> usize { - 8 * entry.len() - 1 - } - - pub fn is_committed(&self, entry: &[u8]) -> bool { - bitfield::is_zero(self.committed_bit(entry), entry, bitfield::NO_GAP) - } - - pub fn set_committed(&self, entry: &mut [u8]) { - bitfield::set_zero(self.committed_bit(entry), entry, bitfield::NO_GAP) - } - - pub fn is_complete(&self, entry: &[u8]) -> bool { - bitfield::is_zero(self.complete_bit(entry), entry, bitfield::NO_GAP) - } - - fn set_complete(&self, entry: &mut [u8]) { - bitfield::set_zero(self.complete_bit(entry), entry, bitfield::NO_GAP) - } - - pub fn get_old_page(&self, header: &[u8]) -> usize { - bitfield::get_range(self.old_page_range, header, bitfield::NO_GAP) - } - - pub fn set_old_page(&self, header: &mut [u8], old_page: usize) { - bitfield::set_range(self.old_page_range, header, bitfield::NO_GAP, old_page) - } - - pub fn get_saved_erase_count(&self, header: &[u8]) -> usize { - bitfield::get_range(self.saved_erase_count_range, header, bitfield::NO_GAP) - } - - pub fn set_saved_erase_count(&self, header: &mut [u8], erase_count: usize) { - bitfield::set_range( - self.saved_erase_count_range, - header, - bitfield::NO_GAP, - erase_count, - ) - } - - /// Builds an entry for replace or insert operations. - pub fn build_entry(&self, replace: Option, user_entry: StoreEntry) -> Vec { - let StoreEntry { - tag, - data, - sensitive, - } = user_entry; - let is_replace = match replace { - None => IsReplace::Insert, - Some(_) => IsReplace::Replace, - }; - let entry_len = self.entry_size(is_replace, sensitive, data.len()); - let mut entry = Vec::with_capacity(entry_len); - // Build the header. - entry.resize(self.header_size(sensitive), 0xff); - self.set_present(&mut entry[..]); - if sensitive { - self.set_sensitive(&mut entry[..]); - } - self.set_length(&mut entry[..], data.len()); - // Add the data. - entry.extend_from_slice(data); - // Build the footer. - entry.resize(entry_len, 0xff); - self.set_tag(&mut entry[..], tag); - self.set_complete(&mut entry[..]); - match replace { - None => self.set_committed(&mut entry[..]), - Some(Index { page, byte }) => { - self.set_replace(&mut entry[..]); - self.set_replace_page(&mut entry[..], page); - self.set_replace_byte(&mut entry[..], byte); - } - } - entry - } - - /// Builds an entry for replace or insert operations. - pub fn build_erase_entry(&self, old_page: usize, saved_erase_count: usize) -> Vec { - let mut entry = vec![0xff; self.internal_entry_size()]; - self.set_present(&mut entry[..]); - self.set_internal(&mut entry[..]); - self.set_old_page(&mut entry[..], old_page); - self.set_saved_erase_count(&mut entry[..], saved_erase_count); - entry - } - - /// Returns the length in bytes of a page header entry. - /// - /// This includes the word padding. - pub fn page_header_size(&self) -> usize { - self.align_word(self.bits_to_bytes(self.erase_count_range.end())) - } - - pub fn is_initialized(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.initialized_bit, header, bitfield::NO_GAP) - } - - pub fn set_initialized(&self, header: &mut [u8]) { - bitfield::set_zero(self.initialized_bit, header, bitfield::NO_GAP) - } - - pub fn get_erase_count(&self, header: &[u8]) -> usize { - bitfield::get_range(self.erase_count_range, header, bitfield::NO_GAP) - } - - pub fn set_erase_count(&self, header: &mut [u8], count: usize) { - bitfield::set_range(self.erase_count_range, header, bitfield::NO_GAP, count) - } - - pub fn is_compacting(&self, header: &[u8]) -> bool { - bitfield::is_zero(self.compacting_bit, header, bitfield::NO_GAP) - } - - pub fn set_compacting(&self, header: &mut [u8]) { - bitfield::set_zero(self.compacting_bit, header, bitfield::NO_GAP) - } - - pub fn get_new_page(&self, header: &[u8]) -> usize { - bitfield::get_range(self.new_page_range, header, bitfield::NO_GAP) - } - - pub fn set_new_page(&self, header: &mut [u8], new_page: usize) { - bitfield::set_range(self.new_page_range, header, bitfield::NO_GAP, new_page) - } - - /// Returns the smallest word boundary greater or equal to a value. - fn align_word(&self, value: usize) -> usize { - let word_size = self.word_size; - (value + word_size - 1) / word_size * word_size - } - - /// Returns the minimum number of bytes to represent a given number of bits. - fn bits_to_bytes(&self, bits: usize) -> usize { - (bits + 7) / 8 - } -} - -/// Returns the number of bits necessary to write numbers smaller than `x`. -fn num_bits(x: usize) -> usize { - x.next_power_of_two().trailing_zeros() as usize -} - -#[test] -fn num_bits_ok() { - assert_eq!(num_bits(0), 0); - assert_eq!(num_bits(1), 0); - assert_eq!(num_bits(2), 1); - assert_eq!(num_bits(3), 2); - assert_eq!(num_bits(4), 2); - assert_eq!(num_bits(5), 3); - assert_eq!(num_bits(8), 3); - assert_eq!(num_bits(9), 4); - assert_eq!(num_bits(16), 4); -} diff --git a/src/embedded_flash/store/mod.rs b/src/embedded_flash/store/mod.rs deleted file mode 100644 index 170fd87..0000000 --- a/src/embedded_flash/store/mod.rs +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Provides a multi-purpose data-structure. -//! -//! # Description -//! -//! The `Store` data-structure permits to iterate, find, insert, delete, and replace entries in a -//! multi-set. The mutable operations (insert, delete, and replace) are atomic, in the sense that if -//! power is lost during the operation, then the operation might either succeed or fail but the -//! store remains in a coherent state. The data-structure is flash-efficient, in the sense that it -//! tries to minimize the number of times a page is erased. -//! -//! An _entry_ is made of a _tag_, which is a number, and a _data_, which is a slice of bytes. The -//! tag is stored efficiently by using unassigned bits of the entry header and footer. For example, -//! it can be used to decide how to deserialize the data. It is not necessary to use tags since a -//! prefix of the data could be used to decide how to deserialize the rest. -//! -//! Entries can also be associated to a set of _keys_. The find operation permits to retrieve all -//! entries associated to a given key. The same key can be associated to multiple entries and the -//! same entry can be associated to multiple keys. -//! -//! # Storage -//! -//! The data-structure is parametric over its storage which must implement the `Storage` trait. -//! There are currently 2 implementations of this trait: -//! - `SyscallStorage` using the `embedded_flash` syscall API for production builds. -//! - `BufferStorage` using a heap-allocated buffer for testing. -//! -//! # Configuration -//! -//! The data-structure can be configured with the `StoreConfig` trait. By implementing this trait, -//! the number of possible tags and the association between keys and entries are defined. -//! -//! # Properties -//! -//! The data-structure provides the following properties: -//! - When an operation returns success, then the represented multi-set is updated accordingly. For -//! example, an inserted entry can be found without alteration until replaced or deleted. -//! - When an operation returns an error, the resulting multi-set state is described in the error -//! documentation. -//! - When power is lost before an operation returns, the operation will either succeed or be -//! rolled-back on the next initialization. So the multi-set would be either left unchanged or -//! updated accordingly. -//! -//! Those properties rely on the following assumptions: -//! - Writing a word to flash is atomic. When power is lost, the word is either fully written or not -//! written at all. -//! - Reading a word from flash is deterministic. When power is lost while writing or erasing a word -//! (erasing a page containing that word), reading that word repeatedly returns the same result -//! (until it is written or its page is erased). -//! - To decide whether a page has been erased, it is enough to test if all its bits are equal to 1. -//! -//! The properties may still hold outside those assumptions but with weaker probabilities as the -//! usage diverges from the assumptions. -//! -//! # Implementation -//! -//! The store is a page-aligned sequence of bits. It matches the following grammar: -//! -//! ```text -//! Store := Page* -//! Page := PageHeader (Entry | InternalEntry)* Padding(page) -//! PageHeader := // must fit in one word -//! initialized:1 -//! erase_count:erase_bits -//! compacting:1 -//! new_page:page_bits -//! Padding(word) -//! Entry := Header Data Footer -//! // Let X be the byte (word-aligned for sensitive queries) following `length` in `Info`. -//! Header := Info[..X] // must fit in one word -//! Footer := Info[X..] // must fit in one word -//! Info := -//! present=0 -//! deleted:1 -//! internal=1 -//! replace:1 -//! sensitive:1 -//! length:byte_bits -//! tag:tag_bits -//! [ // present if `replace` is 0 -//! replace_page:page_bits -//! replace_byte:byte_bits -//! ] -//! [Padding(bit)] // until `complete` is the last bit of a different word than `present` -//! committed:1 -//! complete=0 -//! InternalEntry := -//! present=0 -//! deleted:1 -//! internal=0 -//! old_page:page_bits -//! saved_erase_count:erase_bits -//! Padding(word) -//! Padding(X) := 1* until X-aligned -//! ``` -//! -//! For bit flags, a value of 0 means true and a value of 1 means false. So when erased, bits are -//! false. They can be set to true by writing 0. -//! -//! The `Entry` rule is for user entries and the `InternalEntry` rule is for internal entries of the -//! store. Currently, there is only one kind of internal entry: an entry to erase the page being -//! compacted. -//! -//! The `Header` and `Footer` rules are computed from the `Info` rule. An entry could simply be the -//! concatenation of internal metadata and the user data. However, to optimize the size in flash, we -//! splice the user data in the middle of the metadata. The reason is that we can only write twice -//! the same word and for replace entries we need to write the deleted bit and the committed bit -//! independently. Also, this is important for the complete bit to be the last written bit (since -//! slices are written to flash from low to high addresses). Here is the representation of a -//! specific replace entry for a specific configuration: -//! -//! ```text -//! page_bits=6 -//! byte_bits=9 -//! tag_bits=5 -//! -//! byte.bit name -//! 0.0 present -//! 0.1 deleted -//! 0.2 internal -//! 0.3 replace -//! 0.4 sensitive -//! 0.5 length (9 bits) -//! 1.6 tag (least significant 2 bits out of 5) -//! (the header ends at the first byte boundary after `length`) -//! 2.0 (2 bytes in this example) -//! (the footer starts immediately after the user data) -//! 4.0 tag (most significant 3 bits out of 5) -//! 4.3 replace_page (6 bits) -//! 5.1 replace_byte (9 bits) -//! 6.2 padding (make sure the 2 properties below hold) -//! 7.6 committed -//! 7.7 complete (on a different word than `present`) -//! 8.0 (word-aligned) -//! ``` -//! -//! The store should always contain at least one blank page, so that it is always possible to -//! compact. - -// TODO(cretin): We don't need inner padding for insert entries. The store format can be: -// InsertEntry | ReplaceEntry | InternalEntry (maybe rename to EraseEntry) -// InsertEntry padding is until `complete` is the last bit of a word. -// ReplaceEntry padding is until `complete` is the last bit of a different word than `present`. -// TODO(cretin): Add checksum (may play the same role as the completed bit) and recovery strategy? -// TODO(cretin): Add corruption (deterministic but undetermined reads) to fuzzing. -// TODO(cretin): Add more complex transactions? (this does not seem necessary yet) -// TODO(cretin): Add possibility to shred an entry (force compact page after delete)? - -mod bitfield; -mod format; - -use self::format::{Format, IsReplace}; -use super::{Index, Storage}; -#[cfg(any(test, feature = "ram_storage"))] -use crate::embedded_flash::BufferStorage; -#[cfg(any(test, feature = "ram_storage"))] -use alloc::boxed::Box; -use alloc::collections::BTreeMap; -use alloc::vec; -use alloc::vec::Vec; - -/// Configures a store. -pub trait StoreConfig { - /// How entries are keyed. - /// - /// To disable keys, this may be defined to `()` or even better a custom empty enum. - type Key: Ord; - - /// Number of entry tags. - /// - /// All tags must be smaller than this value. - /// - /// To disable tags, this function should return `1`. The only valid tag would then be `0`. - fn num_tags(&self) -> usize; - - /// Specifies the set of keys of an entry. - /// - /// If keys are not used, this function can immediately return. Otherwise, it should call - /// `associate_key` for each key that should be associated to `entry`. - fn keys(&self, entry: StoreEntry, associate_key: impl FnMut(Self::Key)); -} - -/// Errors returned by store operations. -#[derive(Debug, PartialEq, Eq)] -pub enum StoreError { - /// The operation could not proceed because the store is full. - StoreFull, - - /// The operation could not proceed because the provided tag is invalid. - InvalidTag, - - /// The operation could not proceed because the preconditions do not hold. - InvalidPrecondition, -} - -/// The position of an entry in the store. -#[cfg_attr(feature = "std", derive(Debug))] -#[derive(Copy, Clone)] -pub struct StoreIndex { - /// The index of this entry in the storage. - index: Index, - - /// The generation at which this index is valid. - /// - /// See the documentation of the field with the same name in the `Store` struct. - generation: usize, -} - -/// A user entry. -#[cfg_attr(feature = "std", derive(Debug, PartialEq, Eq))] -#[derive(Copy, Clone)] -pub struct StoreEntry<'a> { - /// The tag of the entry. - /// - /// Must be smaller than the configured number of tags. - pub tag: usize, - - /// The data of the entry. - pub data: &'a [u8], - - /// Whether the data is sensitive. - /// - /// Sensitive data is overwritten with zeroes when the entry is deleted. - pub sensitive: bool, -} - -/// Implements a configurable multi-set on top of any storage. -pub struct Store { - storage: S, - config: C, - format: Format, - - /// The index of the blank page reserved for compaction. - blank_page: usize, - - /// Counts the number of compactions since the store creation. - /// - /// A `StoreIndex` is valid only if they originate from the same generation. This is checked by - /// operations that take a `StoreIndex` as argument. - generation: usize, -} - -impl Store { - /// Creates a new store. - /// - /// Initializes the storage if it is fresh (filled with `0xff`). Rolls-back or completes an - /// operation if the store was powered off in the middle of that operation. In other words, - /// operations are atomic. - /// - /// # Errors - /// - /// Returns `None` if `storage` and/or `config` are not supported. - pub fn new(storage: S, config: C) -> Option> { - let format = Format::new(&storage, &config)?; - let blank_page = format.num_pages; - let mut store = Store { - storage, - config, - format, - blank_page, - generation: 0, - }; - // Finish any ongoing page compaction. - store.recover_compact_page(); - // Finish or roll-back any other entry-level operations. - store.recover_entry_operations(); - // Initialize uninitialized pages. - store.initialize_storage(); - Some(store) - } - - /// Iterates over all entries in the store. - pub fn iter(&self) -> impl Iterator { - Iter::new(self).filter_map(move |(index, entry)| { - if self.format.is_alive(entry) { - Some(( - StoreIndex { - index, - generation: self.generation, - }, - StoreEntry { - tag: self.format.get_tag(entry), - data: self.format.get_data(entry), - sensitive: self.format.is_sensitive(entry), - }, - )) - } else { - None - } - }) - } - - /// Iterates over all entries matching a key in the store. - pub fn find_all<'a>( - &'a self, - key: &'a C::Key, - ) -> impl Iterator + 'a { - self.iter().filter(move |&(_, entry)| { - let mut has_match = false; - self.config.keys(entry, |k| has_match |= key == &k); - has_match - }) - } - - /// Returns the first entry matching a key in the store. - /// - /// This is a convenience function for when at most one entry should match the key. - /// - /// # Panics - /// - /// In debug mode, panics if more than one entry matches the key. - pub fn find_one<'a>(&'a self, key: &'a C::Key) -> Option<(StoreIndex, StoreEntry<'a>)> { - let mut iter = self.find_all(key); - let first = iter.next()?; - let has_only_one_element = iter.next().is_none(); - debug_assert!(has_only_one_element); - Some(first) - } - - /// Deletes an entry from the store. - pub fn delete(&mut self, index: StoreIndex) -> Result<(), StoreError> { - if self.generation != index.generation { - return Err(StoreError::InvalidPrecondition); - } - self.delete_index(index.index); - Ok(()) - } - - /// Replaces an entry with another with the same tag in the store. - /// - /// This operation (like others) is atomic. If it returns successfully, then the old entry is - /// deleted and the new is inserted. If it fails, the old entry is not deleted and the new entry - /// is not inserted. If power is lost during the operation, during next startup, the operation - /// is either rolled-back (like in case of failure) or completed (like in case of success). - /// - /// # Errors - /// - /// Returns: - /// - `StoreFull` if the new entry does not fit in the store. - /// - `InvalidTag` if the tag of the new entry is not smaller than the configured number of - /// tags. - pub fn replace(&mut self, old: StoreIndex, new: StoreEntry) -> Result<(), StoreError> { - if self.generation != old.generation { - return Err(StoreError::InvalidPrecondition); - } - self.format.validate_entry(new)?; - let mut old_index = old.index; - // Find a slot. - let entry_len = self.replace_len(new.sensitive, new.data.len()); - let index = self.find_slot_for_write(entry_len, Some(&mut old_index))?; - // Build a new entry replacing the old one. - let entry = self.format.build_entry(Some(old_index), new); - debug_assert_eq!(entry.len(), entry_len); - // Write the new entry. - self.write_entry(index, &entry); - // Commit the new entry, which both deletes the old entry and commits the new one. - self.commit_index(index); - Ok(()) - } - - /// Inserts an entry in the store. - /// - /// # Errors - /// - /// Returns: - /// - `StoreFull` if the new entry does not fit in the store. - /// - `InvalidTag` if the tag of the new entry is not smaller than the configured number of - /// tags. - pub fn insert(&mut self, entry: StoreEntry) -> Result<(), StoreError> { - self.format.validate_entry(entry)?; - // Build entry. - let entry = self.format.build_entry(None, entry); - // Find a slot. - let index = self.find_slot_for_write(entry.len(), None)?; - // Write entry. - self.write_entry(index, &entry); - Ok(()) - } - - /// Returns the byte cost of a replace operation. - /// - /// Computes the length in bytes that would be used in the storage if a replace operation is - /// executed provided the data of the new entry has `length` bytes and whether this data is - /// sensitive. - pub fn replace_len(&self, sensitive: bool, length: usize) -> usize { - self.format - .entry_size(IsReplace::Replace, sensitive, length) - } - - /// Returns the byte cost of an insert operation. - /// - /// Computes the length in bytes that would be used in the storage if an insert operation is - /// executed provided the data of the inserted entry has `length` bytes and whether this data is - /// sensitive. - #[allow(dead_code)] - pub fn insert_len(&self, sensitive: bool, length: usize) -> usize { - self.format.entry_size(IsReplace::Insert, sensitive, length) - } - - /// Returns the erase count of all pages. - /// - /// The value at index `page` of the result is the number of times page `page` was erased. This - /// number is an underestimate in case power was lost when this page was erased. - #[allow(dead_code)] - pub fn compaction_info(&self) -> Vec { - let mut info = Vec::with_capacity(self.format.num_pages); - for page in 0..self.format.num_pages { - let (page_header, _) = self.read_page_header(page); - let erase_count = self.format.get_erase_count(page_header); - info.push(erase_count); - } - info - } - - /// Completes any ongoing page compaction. - fn recover_compact_page(&mut self) { - for page in 0..self.format.num_pages { - let (page_header, _) = self.read_page_header(page); - if self.format.is_compacting(page_header) { - let new_page = self.format.get_new_page(page_header); - self.compact_page(page, new_page); - } - } - } - - /// Rolls-back or completes any ongoing operation. - fn recover_entry_operations(&mut self) { - for page in 0..self.format.num_pages { - let (page_header, mut index) = self.read_page_header(page); - if !self.format.is_initialized(page_header) { - // Skip uninitialized pages. - continue; - } - while index.byte < self.format.page_size { - let entry_index = index; - let entry = self.read_entry(index); - index.byte += entry.len(); - if !self.format.is_present(entry) { - // Reached the end of the page. - } else if self.format.is_deleted(entry) { - // Wipe sensitive data if needed. - self.wipe_sensitive_data(entry_index); - } else if self.format.is_internal(entry) { - // Finish page compaction. - self.erase_page(entry_index); - } else if !self.format.is_complete(entry) { - // Roll-back incomplete operations. - self.delete_index(entry_index); - } else if !self.format.is_committed(entry) { - // Finish complete but uncommitted operations. - self.commit_index(entry_index) - } - } - } - } - - /// Initializes uninitialized pages. - fn initialize_storage(&mut self) { - for page in 0..self.format.num_pages { - let (header, index) = self.read_page_header(page); - if self.format.is_initialized(header) { - // Update blank page. - let first_entry = self.read_entry(index); - if !self.format.is_present(first_entry) { - self.blank_page = page; - } - } else { - // We set the erase count to zero the very first time we initialize a page. - self.initialize_page(page, 0); - } - } - debug_assert!(self.blank_page != self.format.num_pages); - } - - /// Marks an entry as deleted. - /// - /// The provided index must point to the beginning of an entry. - fn delete_index(&mut self, index: Index) { - self.update_word(index, |format, word| format.set_deleted(word)); - self.wipe_sensitive_data(index); - } - - /// Wipes the data of a sensitive entry. - /// - /// If the entry at the provided index is sensitive, overwrites the data with zeroes. Otherwise, - /// does nothing. - fn wipe_sensitive_data(&mut self, mut index: Index) { - let entry = self.read_entry(index); - debug_assert!(self.format.is_present(entry)); - debug_assert!(self.format.is_deleted(entry)); - if self.format.is_internal(entry) || !self.format.is_sensitive(entry) { - // No need to wipe the data. - return; - } - let gap = self.format.entry_gap(entry); - let data = gap.slice(entry); - if data.iter().all(|&byte| byte == 0x00) { - // The data is already wiped. - return; - } - index.byte += gap.start; - self.storage - .write_slice(index, &vec![0; gap.length]) - .unwrap(); - } - - /// Finds a page with enough free space. - /// - /// Returns an index to the free space of a page which can hold an entry of `length` bytes. If - /// necessary, pages may be compacted to free space. In that case, if provided, the `old_index` - /// is updated according to compaction. - fn find_slot_for_write( - &mut self, - length: usize, - mut old_index: Option<&mut Index>, - ) -> Result { - loop { - if let Some(index) = self.choose_slot_for_write(length) { - return Ok(index); - } - match self.choose_page_for_compact() { - None => return Err(StoreError::StoreFull), - Some(page) => { - let blank_page = self.blank_page; - // Compact the chosen page and update the old index to point to the entry in the - // new page if it happened to be in the old page. This is essentially a way to - // avoid index invalidation due to compaction. - let map = self.compact_page(page, blank_page); - if let Some(old_index) = &mut old_index { - map_index(page, blank_page, &map, old_index); - } - } - } - } - } - - /// Returns whether a page has enough free space. - /// - /// Returns an index to the free space of a page with smallest free space that may hold `length` - /// bytes. - fn choose_slot_for_write(&self, length: usize) -> Option { - Iter::new(self) - .filter(|(index, entry)| { - index.page != self.blank_page - && !self.format.is_present(entry) - && length <= entry.len() - }) - .min_by_key(|(_, entry)| entry.len()) - .map(|(index, _)| index) - } - - /// Returns the page that should be compacted. - fn choose_page_for_compact(&self) -> Option { - // TODO(cretin): This could be optimized by using some cost function depending on: - // - the erase count - // - the length of the free space - // - the length of the alive entries - // We want to minimize this cost. We could also take into account the length of the entry we - // want to write to bound the number of compaction before failing with StoreFull. - // - // We should also make sure that all pages (including if they have no deleted entries and no - // free space) are eventually compacted (ideally to a heavily used page) to benefit from the - // low erase count of those pages. - (0..self.format.num_pages) - .map(|page| (page, self.page_info(page))) - .filter(|&(page, ref info)| { - page != self.blank_page - && info.erase_count < self.format.max_page_erases - && info.deleted_length > self.format.internal_entry_size() - }) - .min_by(|(_, lhs_info), (_, rhs_info)| lhs_info.compare_for_compaction(rhs_info)) - .map(|(page, _)| page) - } - - fn page_info(&self, page: usize) -> PageInfo { - let (page_header, mut index) = self.read_page_header(page); - let mut info = PageInfo { - erase_count: self.format.get_erase_count(page_header), - deleted_length: 0, - free_length: 0, - }; - while index.byte < self.format.page_size { - let entry = self.read_entry(index); - index.byte += entry.len(); - if !self.format.is_present(entry) { - debug_assert_eq!(info.free_length, 0); - info.free_length = entry.len(); - } else if self.format.is_deleted(entry) { - info.deleted_length += entry.len(); - } - } - debug_assert_eq!(index.page, page); - info - } - - fn read_slice(&self, index: Index, length: usize) -> &[u8] { - self.storage.read_slice(index, length).unwrap() - } - - /// Reads an entry (with header and footer) at a given index. - /// - /// If no entry is present, returns the free space up to the end of the page. - fn read_entry(&self, index: Index) -> &[u8] { - let first_byte = self.read_slice(index, 1); - let max_length = self.format.page_size - index.byte; - let mut length = if !self.format.is_present(first_byte) { - max_length - } else if self.format.is_internal(first_byte) { - self.format.internal_entry_size() - } else { - // We don't know if the entry is sensitive or not, but it doesn't matter here. We just - // need to read the replace, sensitive, and length fields. - let header = self.read_slice(index, self.format.header_size(false)); - let replace = self.format.is_replace(header); - let sensitive = self.format.is_sensitive(header); - let length = self.format.get_length(header); - self.format.entry_size(replace, sensitive, length) - }; - // Truncate the length to fit the page. This can only happen in case of corruption or - // partial writes. - length = core::cmp::min(length, max_length); - self.read_slice(index, length) - } - - /// Reads a page header. - /// - /// Also returns the index after the page header. - fn read_page_header(&self, page: usize) -> (&[u8], Index) { - let mut index = Index { page, byte: 0 }; - let page_header = self.read_slice(index, self.format.page_header_size()); - index.byte += page_header.len(); - (page_header, index) - } - - /// Updates a word at a given index. - /// - /// The `update` function is called with the word at `index`. The input value is the current - /// value of the word. The output value is the value that will be written. It should only change - /// bits from 1 to 0. - fn update_word(&mut self, index: Index, update: impl FnOnce(&Format, &mut [u8])) { - let word_size = self.format.word_size; - let mut word = self.read_slice(index, word_size).to_vec(); - update(&self.format, &mut word); - self.storage.write_slice(index, &word).unwrap(); - } - - fn write_entry(&mut self, index: Index, entry: &[u8]) { - self.storage.write_slice(index, entry).unwrap(); - } - - /// Initializes a page by writing the page header. - /// - /// If the page is not erased, it is first erased. - fn initialize_page(&mut self, page: usize, erase_count: usize) { - let index = Index { page, byte: 0 }; - let page = self.read_slice(index, self.format.page_size); - if !page.iter().all(|&byte| byte == 0xff) { - self.storage.erase_page(index.page).unwrap(); - } - self.update_word(index, |format, header| { - format.set_initialized(header); - format.set_erase_count(header, erase_count); - }); - self.blank_page = index.page; - } - - /// Commits a replace entry. - /// - /// Deletes the old entry and commits the new entry. - fn commit_index(&mut self, mut index: Index) { - let entry = self.read_entry(index); - index.byte += entry.len(); - let word_size = self.format.word_size; - debug_assert!(entry.len() >= 2 * word_size); - match self.format.is_replace(entry) { - IsReplace::Replace => { - let delete_index = self.format.get_replace_index(entry); - self.delete_index(delete_index); - } - IsReplace::Insert => debug_assert!(false), - }; - index.byte -= word_size; - self.update_word(index, |format, word| format.set_committed(word)); - } - - /// Compacts a page to an other. - /// - /// Returns the mapping from the alive entries in the old page to their index in the new page. - fn compact_page(&mut self, old_page: usize, new_page: usize) -> BTreeMap { - // Write the old page as being compacted to the new page. - let mut erase_count = 0; - self.update_word( - Index { - page: old_page, - byte: 0, - }, - |format, header| { - erase_count = format.get_erase_count(header); - format.set_compacting(header); - format.set_new_page(header, new_page); - }, - ); - // Copy alive entries from the old page to the new page. - let page_header_size = self.format.page_header_size(); - let mut old_index = Index { - page: old_page, - byte: page_header_size, - }; - let mut new_index = Index { - page: new_page, - byte: page_header_size, - }; - let mut map = BTreeMap::new(); - while old_index.byte < self.format.page_size { - let old_entry = self.read_entry(old_index); - let old_entry_index = old_index.byte; - old_index.byte += old_entry.len(); - if !self.format.is_alive(old_entry) { - continue; - } - let previous_mapping = map.insert(old_entry_index, new_index.byte); - debug_assert!(previous_mapping.is_none()); - // We need to copy the old entry because it is in the storage and we are going to write - // to the storage. Rust cannot tell that both entries don't overlap. - let old_entry = old_entry.to_vec(); - self.write_entry(new_index, &old_entry); - new_index.byte += old_entry.len(); - } - // Save the old page index and erase count to the new page. - let erase_index = new_index; - let erase_entry = self.format.build_erase_entry(old_page, erase_count); - self.write_entry(new_index, &erase_entry); - // Erase the page. - self.erase_page(erase_index); - // Increase generation. - self.generation += 1; - map - } - - /// Commits an internal entry. - /// - /// The only kind of internal entry is to erase a page, which first erases the page, then - /// initializes it with the saved erase count, and finally deletes the internal entry. - fn erase_page(&mut self, erase_index: Index) { - let erase_entry = self.read_entry(erase_index); - debug_assert!(self.format.is_present(erase_entry)); - debug_assert!(!self.format.is_deleted(erase_entry)); - debug_assert!(self.format.is_internal(erase_entry)); - let old_page = self.format.get_old_page(erase_entry); - let erase_count = self.format.get_saved_erase_count(erase_entry) + 1; - // Erase the page. - self.storage.erase_page(old_page).unwrap(); - // Initialize the page. - self.initialize_page(old_page, erase_count); - // Delete the internal entry. - self.delete_index(erase_index); - } -} - -// Those functions are not meant for production. -#[cfg(any(test, feature = "ram_storage"))] -impl Store { - /// Takes a snapshot of the storage after a given amount of word operations. - pub fn arm_snapshot(&mut self, delay: usize) { - self.storage.arm_snapshot(delay); - } - - /// Unarms and returns the snapshot or the delay remaining. - pub fn get_snapshot(&mut self) -> Result, usize> { - self.storage.get_snapshot() - } - - /// Takes a snapshot of the storage. - pub fn take_snapshot(&self) -> Box<[u8]> { - self.storage.take_snapshot() - } - - /// Returns the storage. - pub fn get_storage(self) -> Box<[u8]> { - self.storage.get_storage() - } - - /// Erases and initializes a page with a given erase count. - pub fn set_erase_count(&mut self, page: usize, erase_count: usize) { - self.initialize_page(page, erase_count); - } - - /// Returns whether all deleted sensitive entries have been wiped. - pub fn deleted_entries_are_wiped(&self) -> bool { - for (_, entry) in Iter::new(self) { - if !self.format.is_present(entry) - || !self.format.is_deleted(entry) - || self.format.is_internal(entry) - || !self.format.is_sensitive(entry) - { - continue; - } - let gap = self.format.entry_gap(entry); - let data = gap.slice(entry); - if !data.iter().all(|&byte| byte == 0x00) { - return false; - } - } - true - } -} - -/// Maps an index from an old page to a new page if needed. -fn map_index(old_page: usize, new_page: usize, map: &BTreeMap, index: &mut Index) { - if index.page == old_page { - index.page = new_page; - index.byte = *map.get(&index.byte).unwrap(); - } -} - -/// Page information for compaction. -struct PageInfo { - /// How many times the page was erased. - erase_count: usize, - - /// Cumulative length of deleted entries (including header and footer). - deleted_length: usize, - - /// Length of the free space. - free_length: usize, -} - -impl PageInfo { - /// Returns whether a page should be compacted before another. - fn compare_for_compaction(&self, rhs: &PageInfo) -> core::cmp::Ordering { - self.erase_count - .cmp(&rhs.erase_count) - .then(rhs.deleted_length.cmp(&self.deleted_length)) - .then(self.free_length.cmp(&rhs.free_length)) - } -} - -/// Iterates over all entries (including free space) of a store. -struct Iter<'a, S: Storage, C: StoreConfig> { - store: &'a Store, - index: Index, -} - -impl<'a, S: Storage, C: StoreConfig> Iter<'a, S, C> { - fn new(store: &'a Store) -> Iter<'a, S, C> { - let index = Index { - page: 0, - byte: store.format.page_header_size(), - }; - Iter { store, index } - } -} - -impl<'a, S: Storage, C: StoreConfig> Iterator for Iter<'a, S, C> { - type Item = (Index, &'a [u8]); - - fn next(&mut self) -> Option<(Index, &'a [u8])> { - if self.index.byte == self.store.format.page_size { - self.index.page += 1; - self.index.byte = self.store.format.page_header_size(); - } - if self.index.page == self.store.format.num_pages { - return None; - } - let index = self.index; - let entry = self.store.read_entry(self.index); - self.index.byte += entry.len(); - Some((index, entry)) - } -} - -#[cfg(test)] -mod tests { - use super::super::{BufferOptions, BufferStorage}; - use super::*; - - struct Config; - - const WORD_SIZE: usize = 4; - const PAGE_SIZE: usize = 8 * WORD_SIZE; - const NUM_PAGES: usize = 3; - - impl StoreConfig for Config { - type Key = u8; - - fn num_tags(&self) -> usize { - 1 - } - - fn keys(&self, entry: StoreEntry, mut add: impl FnMut(u8)) { - assert_eq!(entry.tag, 0); - if !entry.data.is_empty() { - add(entry.data[0]); - } - } - } - - fn new_buffer(storage: Box<[u8]>) -> BufferStorage { - let options = BufferOptions { - word_size: WORD_SIZE, - page_size: PAGE_SIZE, - max_word_writes: 2, - max_page_erases: 2, - strict_write: true, - }; - BufferStorage::new(storage, options) - } - - fn new_store() -> Store { - let storage = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice(); - Store::new(new_buffer(storage), Config).unwrap() - } - - #[test] - fn insert_ok() { - let mut store = new_store(); - assert_eq!(store.iter().count(), 0); - let tag = 0; - let key = 1; - let data = &[key, 2]; - let entry = StoreEntry { - tag, - data, - sensitive: false, - }; - store.insert(entry).unwrap(); - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_one(&key).unwrap().1, entry); - } - - #[test] - fn insert_sensitive_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let data = &[key, 4]; - let entry = StoreEntry { - tag, - data, - sensitive: true, - }; - store.insert(entry).unwrap(); - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_one(&key).unwrap().1, entry); - } - - #[test] - fn delete_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let entry = StoreEntry { - tag, - data: &[key, 2], - sensitive: false, - }; - store.insert(entry).unwrap(); - assert_eq!(store.find_all(&key).count(), 1); - let (index, _) = store.find_one(&key).unwrap(); - store.delete(index).unwrap(); - assert_eq!(store.find_all(&key).count(), 0); - assert_eq!(store.iter().count(), 0); - } - - #[test] - fn delete_sensitive_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let entry = StoreEntry { - tag, - data: &[key, 2], - sensitive: true, - }; - store.insert(entry).unwrap(); - assert_eq!(store.find_all(&key).count(), 1); - let (index, _) = store.find_one(&key).unwrap(); - store.delete(index).unwrap(); - assert_eq!(store.find_all(&key).count(), 0); - assert_eq!(store.iter().count(), 0); - assert!(store.deleted_entries_are_wiped()); - } - - #[test] - fn insert_until_full() { - let mut store = new_store(); - let tag = 0; - let mut key = 0; - while store - .insert(StoreEntry { - tag, - data: &[key, 0], - sensitive: false, - }) - .is_ok() - { - key += 1; - } - assert!(key > 0); - } - - #[test] - fn compact_ok() { - let mut store = new_store(); - let tag = 0; - let mut key = 0; - while store - .insert(StoreEntry { - tag, - data: &[key, 0], - sensitive: false, - }) - .is_ok() - { - key += 1; - } - let (index, _) = store.find_one(&0).unwrap(); - store.delete(index).unwrap(); - store - .insert(StoreEntry { - tag: 0, - data: &[key, 0], - sensitive: false, - }) - .unwrap(); - for k in 1..=key { - assert_eq!(store.find_all(&k).count(), 1); - } - } - - #[test] - fn reboot_ok() { - let mut store = new_store(); - let tag = 0; - let key = 1; - let data = &[key, 2]; - let entry = StoreEntry { - tag, - data, - sensitive: false, - }; - store.insert(entry).unwrap(); - - // Reboot the store. - let store = store.get_storage(); - let store = Store::new(new_buffer(store), Config).unwrap(); - - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_one(&key).unwrap().1, entry); - } - - #[test] - fn replace_atomic() { - let tag = 0; - let key = 1; - let old_entry = StoreEntry { - tag, - data: &[key, 2, 3, 4, 5, 6], - sensitive: false, - }; - let new_entry = StoreEntry { - tag, - data: &[key, 7, 8, 9], - sensitive: false, - }; - let mut delay = 0; - loop { - let mut store = new_store(); - store.insert(old_entry).unwrap(); - store.arm_snapshot(delay); - let (index, _) = store.find_one(&key).unwrap(); - store.replace(index, new_entry).unwrap(); - let (complete, store) = match store.get_snapshot() { - Err(_) => (true, store.get_storage()), - Ok(store) => (false, store), - }; - let store = Store::new(new_buffer(store), Config).unwrap(); - assert_eq!(store.iter().count(), 1); - assert_eq!(store.find_all(&key).count(), 1); - let (_, cur_entry) = store.find_one(&key).unwrap(); - assert!((cur_entry == old_entry && !complete) || cur_entry == new_entry); - if complete { - break; - } - delay += 1; - } - } - - #[test] - fn compact_atomic() { - let tag = 0; - let mut delay = 0; - loop { - let mut store = new_store(); - let mut key = 0; - while store - .insert(StoreEntry { - tag, - data: &[key, 0], - sensitive: false, - }) - .is_ok() - { - key += 1; - } - let (index, _) = store.find_one(&0).unwrap(); - store.delete(index).unwrap(); - let (index, _) = store.find_one(&1).unwrap(); - store.arm_snapshot(delay); - store - .replace( - index, - StoreEntry { - tag, - data: &[1, 1], - sensitive: false, - }, - ) - .unwrap(); - let (complete, store) = match store.get_snapshot() { - Err(_) => (true, store.get_storage()), - Ok(store) => (false, store), - }; - let store = Store::new(new_buffer(store), Config).unwrap(); - assert_eq!(store.iter().count(), key as usize - 1); - for k in 2..key { - assert_eq!(store.find_all(&k).count(), 1); - assert_eq!( - store.find_one(&k).unwrap().1, - StoreEntry { - tag, - data: &[k, 0], - sensitive: false, - } - ); - } - assert_eq!(store.find_all(&1).count(), 1); - let (_, entry) = store.find_one(&1).unwrap(); - assert_eq!(entry.tag, tag); - assert!((entry.data == [1, 0] && !complete) || entry.data == [1, 1]); - if complete { - break; - } - delay += 1; - } - } - - #[test] - fn invalid_tag() { - let mut store = new_store(); - let entry = StoreEntry { - tag: 1, - data: &[], - sensitive: false, - }; - assert_eq!(store.insert(entry), Err(StoreError::InvalidTag)); - } - - #[test] - fn invalid_length() { - let mut store = new_store(); - let entry = StoreEntry { - tag: 0, - data: &[0; PAGE_SIZE], - sensitive: false, - }; - assert_eq!(store.insert(entry), Err(StoreError::StoreFull)); - } -} diff --git a/src/embedded_flash/syscall.rs b/src/embedded_flash/syscall.rs index 5eae561..e043772 100644 --- a/src/embedded_flash/syscall.rs +++ b/src/embedded_flash/syscall.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2019-2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{Index, Storage, StorageError, StorageResult}; use alloc::vec::Vec; use libtock_core::syscalls; +use persistent_store::{Storage, StorageError, StorageIndex, StorageResult}; const DRIVER_NUMBER: usize = 0x50003; @@ -42,15 +42,13 @@ mod memop_nr { fn get_info(nr: usize, arg: usize) -> StorageResult { let code = syscalls::command(DRIVER_NUMBER, command_nr::GET_INFO, nr, arg); - code.map_err(|e| StorageError::KernelError { - code: e.return_code, - }) + code.map_err(|_| StorageError::CustomError) } fn memop(nr: u32, arg: usize) -> StorageResult { let code = unsafe { syscalls::raw::memop(nr, arg) }; if code < 0 { - Err(StorageError::KernelError { code }) + Err(StorageError::CustomError) } else { Ok(code as usize) } @@ -70,7 +68,7 @@ impl SyscallStorage { /// /// # Errors /// - /// Returns `BadFlash` if any of the following conditions do not hold: + /// Returns `CustomError` if any of the following conditions do not hold: /// - The word size is a power of two. /// - The page size is a power of two. /// - The page size is a multiple of the word size. @@ -90,13 +88,13 @@ impl SyscallStorage { || !syscall.page_size.is_power_of_two() || !syscall.is_word_aligned(syscall.page_size) { - return Err(StorageError::BadFlash); + return Err(StorageError::CustomError); } for i in 0..memop(memop_nr::STORAGE_CNT, 0)? { let storage_ptr = memop(memop_nr::STORAGE_PTR, i)?; let max_storage_len = memop(memop_nr::STORAGE_LEN, i)?; if !syscall.is_page_aligned(storage_ptr) || !syscall.is_page_aligned(max_storage_len) { - return Err(StorageError::BadFlash); + return Err(StorageError::CustomError); } let storage_len = core::cmp::min(num_pages * syscall.page_size, max_storage_len); num_pages -= storage_len / syscall.page_size; @@ -141,12 +139,12 @@ impl Storage for SyscallStorage { self.max_page_erases } - fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]> { + fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]> { let start = index.range(length, self)?.start; find_slice(&self.storage_locations, start, length) } - fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()> { + fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()> { if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) { return Err(StorageError::NotAligned); } @@ -163,28 +161,24 @@ impl Storage for SyscallStorage { ) }; if code < 0 { - return Err(StorageError::KernelError { code }); + return Err(StorageError::CustomError); } let code = syscalls::command(DRIVER_NUMBER, command_nr::WRITE_SLICE, ptr, value.len()); - if let Err(e) = code { - return Err(StorageError::KernelError { - code: e.return_code, - }); + if code.is_err() { + return Err(StorageError::CustomError); } Ok(()) } fn erase_page(&mut self, page: usize) -> StorageResult<()> { - let index = Index { page, byte: 0 }; + let index = StorageIndex { page, byte: 0 }; let length = self.page_size(); let ptr = self.read_slice(index, length)?.as_ptr() as usize; let code = syscalls::command(DRIVER_NUMBER, command_nr::ERASE_PAGE, ptr, length); - if let Err(e) = code { - return Err(StorageError::KernelError { - code: e.return_code, - }); + if code.is_err() { + return Err(StorageError::CustomError); } Ok(()) }