Use new persistent store library (and delete old)
This commit is contained in:
@@ -15,6 +15,7 @@ libtock_drivers = { path = "third_party/libtock-drivers" }
|
||||
lang_items = { path = "third_party/lang-items" }
|
||||
cbor = { path = "libraries/cbor" }
|
||||
crypto = { path = "libraries/crypto" }
|
||||
persistent_store = { path = "libraries/persistent_store" }
|
||||
byteorder = { version = "1", default-features = false }
|
||||
arrayref = "0.3.6"
|
||||
subtle = { version = "2.2", default-features = false, features = ["nightly"] }
|
||||
@@ -23,7 +24,7 @@ subtle = { version = "2.2", default-features = false, features = ["nightly"] }
|
||||
debug_allocations = ["lang_items/debug_allocations"]
|
||||
debug_ctap = ["crypto/derive_debug", "libtock_drivers/debug_ctap"]
|
||||
panic_console = ["lang_items/panic_console"]
|
||||
std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std"]
|
||||
std = ["cbor/std", "crypto/std", "crypto/derive_debug", "lang_items/std", "persistent_store/std"]
|
||||
ram_storage = []
|
||||
verbose = ["debug_ctap", "libtock_drivers/verbose_usb"]
|
||||
with_ctap1 = ["crypto/with_ctap1"]
|
||||
|
||||
@@ -500,7 +500,7 @@ where
|
||||
let (signature, x5c) = match self.persistent_store.attestation_private_key()? {
|
||||
Some(attestation_private_key) => {
|
||||
let attestation_key =
|
||||
crypto::ecdsa::SecKey::from_bytes(attestation_private_key).unwrap();
|
||||
crypto::ecdsa::SecKey::from_bytes(&attestation_private_key).unwrap();
|
||||
let attestation_certificate = self
|
||||
.persistent_store
|
||||
.attestation_certificate()?
|
||||
|
||||
@@ -81,5 +81,17 @@ pub enum Ctap2StatusCode {
|
||||
/// This type of error is unexpected and the current state is undefined.
|
||||
CTAP2_ERR_VENDOR_INTERNAL_ERROR = 0xF2,
|
||||
|
||||
/// The persistent storage invariant is broken.
|
||||
///
|
||||
/// There can be multiple reasons:
|
||||
/// - The persistent storage has not been erased before its first usage.
|
||||
/// - The persistent storage has been tempered by a third party.
|
||||
/// - The flash is malfunctioning (including the Tock driver).
|
||||
///
|
||||
/// In the first 2 cases the persistent storage should be completely erased. If the error
|
||||
/// reproduces, it may indicate a software bug or a hardware deficiency. In both cases, the
|
||||
/// error should be reported.
|
||||
CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE = 0xF3,
|
||||
|
||||
CTAP2_ERR_VENDOR_LAST = 0xFF,
|
||||
}
|
||||
|
||||
@@ -12,13 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod key;
|
||||
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
use crate::ctap::data_formats::{extract_array, extract_text_string};
|
||||
use crate::ctap::data_formats::{CredentialProtectionPolicy, PublicKeyCredentialSource};
|
||||
use crate::ctap::pin_protocol_v1::PIN_AUTH_LENGTH;
|
||||
use crate::ctap::status_code::Ctap2StatusCode;
|
||||
use crate::ctap::{key_material, USE_BATCH_ATTESTATION};
|
||||
use crate::embedded_flash::{self, StoreConfig, StoreEntry, StoreError};
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
use alloc::string::String;
|
||||
#[cfg(any(test, feature = "ram_storage", feature = "with_ctap2_1"))]
|
||||
use alloc::vec;
|
||||
@@ -30,16 +32,16 @@ use core::convert::TryInto;
|
||||
use crypto::rng256::Rng256;
|
||||
|
||||
#[cfg(any(test, feature = "ram_storage"))]
|
||||
type Storage = embedded_flash::BufferStorage;
|
||||
type Storage = persistent_store::BufferStorage;
|
||||
#[cfg(not(any(test, feature = "ram_storage")))]
|
||||
type Storage = embedded_flash::SyscallStorage;
|
||||
type Storage = crate::embedded_flash::SyscallStorage;
|
||||
|
||||
// Those constants may be modified before compilation to tune the behavior of the key.
|
||||
//
|
||||
// The number of pages should be at least 2 and at most what the flash can hold. There should be no
|
||||
// reason to put a small number here, except that the latency of flash operations depends on the
|
||||
// number of pages. This will improve in the future. Currently, using 20 pages gives 65ms per
|
||||
// operation. The rule of thumb is 3.5ms per additional page.
|
||||
// The number of pages should be at least 3 and at most what the flash can hold. There should be no
|
||||
// reason to put a small number here, except that the latency of flash operations is linear in the
|
||||
// number of pages. This may improve in the future. Currently, using 20 pages gives between 20ms and
|
||||
// 240ms per operation. The rule of thumb is between 1ms and 12ms per additional page.
|
||||
//
|
||||
// Limiting the number of residential keys permits to ensure a minimum number of counter increments.
|
||||
// Let:
|
||||
@@ -49,32 +51,15 @@ type Storage = embedded_flash::SyscallStorage;
|
||||
// - C the number of erase cycles (10000)
|
||||
// - I the minimum number of counter increments
|
||||
//
|
||||
// We have: I = ((P - 1) * 4092 - K * S) / 12 * C
|
||||
// We have: I = (P * 4084 - 5107 - K * S) / 8 * C
|
||||
//
|
||||
// With P=20 and K=150, we have I > 2M which is enough for 500 increments per day for 10 years.
|
||||
// With P=20 and K=150, we have I=2M which is enough for 500 increments per day for 10 years.
|
||||
#[cfg(feature = "ram_storage")]
|
||||
const NUM_PAGES: usize = 2;
|
||||
const NUM_PAGES: usize = 3;
|
||||
#[cfg(not(feature = "ram_storage"))]
|
||||
const NUM_PAGES: usize = 20;
|
||||
const MAX_SUPPORTED_RESIDENTIAL_KEYS: usize = 150;
|
||||
|
||||
// List of tags. They should all be unique. And there should be less than NUM_TAGS.
|
||||
const TAG_CREDENTIAL: usize = 0;
|
||||
const GLOBAL_SIGNATURE_COUNTER: usize = 1;
|
||||
const MASTER_KEYS: usize = 2;
|
||||
const PIN_HASH: usize = 3;
|
||||
const PIN_RETRIES: usize = 4;
|
||||
const ATTESTATION_PRIVATE_KEY: usize = 5;
|
||||
const ATTESTATION_CERTIFICATE: usize = 6;
|
||||
const AAGUID: usize = 7;
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
const MIN_PIN_LENGTH: usize = 8;
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
const MIN_PIN_LENGTH_RP_IDS: usize = 9;
|
||||
// Different NUM_TAGS depending on the CTAP version make the storage incompatible,
|
||||
// so we use the maximum.
|
||||
const NUM_TAGS: usize = 10;
|
||||
|
||||
const MAX_PIN_RETRIES: u8 = 8;
|
||||
const ATTESTATION_PRIVATE_KEY_LENGTH: usize = 32;
|
||||
const AAGUID_LENGTH: usize = 16;
|
||||
@@ -88,92 +73,18 @@ const _DEFAULT_MIN_PIN_LENGTH_RP_IDS: Vec<String> = Vec::new();
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
const _MAX_RP_IDS_LENGTH: usize = 8;
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum Key {
|
||||
// TODO(cretin): Test whether this doesn't consume too much memory. Otherwise, we can use less
|
||||
// keys. Either only a simple enum value for all credentials, or group by rp_id.
|
||||
Credential {
|
||||
rp_id: Option<String>,
|
||||
credential_id: Option<Vec<u8>>,
|
||||
user_handle: Option<Vec<u8>>,
|
||||
},
|
||||
GlobalSignatureCounter,
|
||||
MasterKeys,
|
||||
PinHash,
|
||||
PinRetries,
|
||||
AttestationPrivateKey,
|
||||
AttestationCertificate,
|
||||
Aaguid,
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
MinPinLength,
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
MinPinLengthRpIds,
|
||||
}
|
||||
|
||||
/// Wrapper for master keys.
|
||||
pub struct MasterKeys {
|
||||
/// Master encryption key.
|
||||
pub encryption: [u8; 32],
|
||||
|
||||
/// Master hmac key.
|
||||
pub hmac: [u8; 32],
|
||||
}
|
||||
|
||||
struct Config;
|
||||
|
||||
impl StoreConfig for Config {
|
||||
type Key = Key;
|
||||
|
||||
fn num_tags(&self) -> usize {
|
||||
NUM_TAGS
|
||||
}
|
||||
|
||||
fn keys(&self, entry: StoreEntry, mut add: impl FnMut(Key)) {
|
||||
match entry.tag {
|
||||
TAG_CREDENTIAL => {
|
||||
let credential = match deserialize_credential(entry.data) {
|
||||
None => {
|
||||
debug_assert!(false);
|
||||
return;
|
||||
}
|
||||
Some(credential) => credential,
|
||||
};
|
||||
add(Key::Credential {
|
||||
rp_id: Some(credential.rp_id.clone()),
|
||||
credential_id: Some(credential.credential_id),
|
||||
user_handle: None,
|
||||
});
|
||||
add(Key::Credential {
|
||||
rp_id: Some(credential.rp_id.clone()),
|
||||
credential_id: None,
|
||||
user_handle: None,
|
||||
});
|
||||
add(Key::Credential {
|
||||
rp_id: Some(credential.rp_id),
|
||||
credential_id: None,
|
||||
user_handle: Some(credential.user_handle),
|
||||
});
|
||||
add(Key::Credential {
|
||||
rp_id: None,
|
||||
credential_id: None,
|
||||
user_handle: None,
|
||||
});
|
||||
}
|
||||
GLOBAL_SIGNATURE_COUNTER => add(Key::GlobalSignatureCounter),
|
||||
MASTER_KEYS => add(Key::MasterKeys),
|
||||
PIN_HASH => add(Key::PinHash),
|
||||
PIN_RETRIES => add(Key::PinRetries),
|
||||
ATTESTATION_PRIVATE_KEY => add(Key::AttestationPrivateKey),
|
||||
ATTESTATION_CERTIFICATE => add(Key::AttestationCertificate),
|
||||
AAGUID => add(Key::Aaguid),
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
MIN_PIN_LENGTH => add(Key::MinPinLength),
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
MIN_PIN_LENGTH_RP_IDS => add(Key::MinPinLengthRpIds),
|
||||
_ => debug_assert!(false),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// CTAP persistent storage.
|
||||
pub struct PersistentStore {
|
||||
store: embedded_flash::Store<Storage, Config>,
|
||||
store: persistent_store::Store<Storage>,
|
||||
}
|
||||
|
||||
impl PersistentStore {
|
||||
@@ -188,17 +99,19 @@ impl PersistentStore {
|
||||
#[cfg(any(test, feature = "ram_storage"))]
|
||||
let storage = PersistentStore::new_test_storage();
|
||||
let mut store = PersistentStore {
|
||||
store: embedded_flash::Store::new(storage, Config).unwrap(),
|
||||
store: persistent_store::Store::new(storage).ok().unwrap(),
|
||||
};
|
||||
store.init(rng);
|
||||
store.init(rng).unwrap();
|
||||
store
|
||||
}
|
||||
|
||||
/// Creates a syscall storage in flash.
|
||||
#[cfg(not(any(test, feature = "ram_storage")))]
|
||||
fn new_prod_storage() -> Storage {
|
||||
Storage::new(NUM_PAGES).unwrap()
|
||||
}
|
||||
|
||||
/// Creates a buffer storage in RAM.
|
||||
#[cfg(any(test, feature = "ram_storage"))]
|
||||
fn new_test_storage() -> Storage {
|
||||
#[cfg(not(test))]
|
||||
@@ -206,7 +119,7 @@ impl PersistentStore {
|
||||
#[cfg(test)]
|
||||
const PAGE_SIZE: usize = 0x1000;
|
||||
let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice();
|
||||
let options = embedded_flash::BufferOptions {
|
||||
let options = persistent_store::BufferOptions {
|
||||
word_size: 4,
|
||||
page_size: PAGE_SIZE,
|
||||
max_word_writes: 2,
|
||||
@@ -216,283 +129,265 @@ impl PersistentStore {
|
||||
Storage::new(store, options)
|
||||
}
|
||||
|
||||
fn init(&mut self, rng: &mut impl Rng256) {
|
||||
if self.store.find_one(&Key::MasterKeys).is_none() {
|
||||
/// Initializes the store by creating missing objects.
|
||||
fn init(&mut self, rng: &mut impl Rng256) -> Result<(), Ctap2StatusCode> {
|
||||
// Generate and store the master keys if they are missing.
|
||||
if self.store.find_handle(key::MASTER_KEYS)?.is_none() {
|
||||
let master_encryption_key = rng.gen_uniform_u8x32();
|
||||
let master_hmac_key = rng.gen_uniform_u8x32();
|
||||
let mut master_keys = Vec::with_capacity(64);
|
||||
master_keys.extend_from_slice(&master_encryption_key);
|
||||
master_keys.extend_from_slice(&master_hmac_key);
|
||||
self.store
|
||||
.insert(StoreEntry {
|
||||
tag: MASTER_KEYS,
|
||||
data: &master_keys,
|
||||
sensitive: true,
|
||||
})
|
||||
.unwrap();
|
||||
self.store.insert(key::MASTER_KEYS, &master_keys)?;
|
||||
}
|
||||
// The following 3 entries are meant to be written by vendor-specific commands.
|
||||
if USE_BATCH_ATTESTATION {
|
||||
if self.store.find_one(&Key::AttestationPrivateKey).is_none() {
|
||||
self.set_attestation_private_key(key_material::ATTESTATION_PRIVATE_KEY)
|
||||
.unwrap();
|
||||
if self
|
||||
.store
|
||||
.find_handle(key::ATTESTATION_PRIVATE_KEY)?
|
||||
.is_none()
|
||||
{
|
||||
self.set_attestation_private_key(key_material::ATTESTATION_PRIVATE_KEY)?;
|
||||
}
|
||||
if self.store.find_one(&Key::AttestationCertificate).is_none() {
|
||||
self.set_attestation_certificate(key_material::ATTESTATION_CERTIFICATE)
|
||||
.unwrap();
|
||||
if self
|
||||
.store
|
||||
.find_handle(key::ATTESTATION_CERTIFICATE)?
|
||||
.is_none()
|
||||
{
|
||||
self.set_attestation_certificate(key_material::ATTESTATION_CERTIFICATE)?;
|
||||
}
|
||||
}
|
||||
if self.store.find_one(&Key::Aaguid).is_none() {
|
||||
self.set_aaguid(key_material::AAGUID).unwrap();
|
||||
if self.store.find_handle(key::AAGUID)?.is_none() {
|
||||
self.set_aaguid(key_material::AAGUID)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the first matching credential.
|
||||
///
|
||||
/// Returns `None` if no credentials are matched or if `check_cred_protect` is set and the first
|
||||
/// matched credential requires user verification.
|
||||
pub fn find_credential(
|
||||
&self,
|
||||
rp_id: &str,
|
||||
credential_id: &[u8],
|
||||
check_cred_protect: bool,
|
||||
) -> Result<Option<PublicKeyCredentialSource>, Ctap2StatusCode> {
|
||||
let key = Key::Credential {
|
||||
rp_id: Some(rp_id.into()),
|
||||
credential_id: Some(credential_id.into()),
|
||||
user_handle: None,
|
||||
};
|
||||
let entry = match self.store.find_one(&key) {
|
||||
None => return Ok(None),
|
||||
Some((_, entry)) => entry,
|
||||
};
|
||||
debug_assert_eq!(entry.tag, TAG_CREDENTIAL);
|
||||
let result = deserialize_credential(entry.data);
|
||||
debug_assert!(result.is_some());
|
||||
let user_verification_required = result.as_ref().map_or(false, |cred| {
|
||||
cred.cred_protect_policy == Some(CredentialProtectionPolicy::UserVerificationRequired)
|
||||
let mut iter_result = Ok(());
|
||||
let iter = self.iter_credentials(&mut iter_result)?;
|
||||
// TODO(reviewer): Should we return an error if we find more than one matching credential?
|
||||
// We did not use to in the previous version (panic in debug mode, nothing in release mode)
|
||||
// but I don't remember why. Let's document it.
|
||||
let result = iter.map(|(_, credential)| credential).find(|credential| {
|
||||
credential.rp_id == rp_id && credential.credential_id == credential_id
|
||||
});
|
||||
iter_result?;
|
||||
if let Some(cred) = &result {
|
||||
let user_verification_required = cred.cred_protect_policy
|
||||
== Some(CredentialProtectionPolicy::UserVerificationRequired);
|
||||
if check_cred_protect && user_verification_required {
|
||||
Ok(None)
|
||||
} else {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores or updates a credential.
|
||||
///
|
||||
/// If a credential with the same RP id and user handle already exists, it is replaced.
|
||||
pub fn store_credential(
|
||||
&mut self,
|
||||
credential: PublicKeyCredentialSource,
|
||||
new_credential: PublicKeyCredentialSource,
|
||||
) -> Result<(), Ctap2StatusCode> {
|
||||
let key = Key::Credential {
|
||||
rp_id: Some(credential.rp_id.clone()),
|
||||
credential_id: None,
|
||||
user_handle: Some(credential.user_handle.clone()),
|
||||
};
|
||||
let old_entry = self.store.find_one(&key);
|
||||
if old_entry.is_none() && self.count_credentials()? >= MAX_SUPPORTED_RESIDENTIAL_KEYS {
|
||||
// Holds the key of the existing credential if this is an update.
|
||||
let mut old_key = None;
|
||||
// Holds the unordered list of used keys.
|
||||
let mut keys = Vec::new();
|
||||
let mut iter_result = Ok(());
|
||||
let iter = self.iter_credentials(&mut iter_result)?;
|
||||
for (key, credential) in iter {
|
||||
keys.push(key);
|
||||
if credential.rp_id == new_credential.rp_id
|
||||
&& credential.user_handle == new_credential.user_handle
|
||||
{
|
||||
if old_key.is_some() {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE);
|
||||
}
|
||||
old_key = Some(key);
|
||||
}
|
||||
}
|
||||
iter_result?;
|
||||
if old_key.is_none() && keys.len() >= MAX_SUPPORTED_RESIDENTIAL_KEYS {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL);
|
||||
}
|
||||
let credential = serialize_credential(credential)?;
|
||||
let new_entry = StoreEntry {
|
||||
tag: TAG_CREDENTIAL,
|
||||
data: &credential,
|
||||
sensitive: true,
|
||||
};
|
||||
match old_entry {
|
||||
None => self.store.insert(new_entry)?,
|
||||
Some((index, old_entry)) => {
|
||||
debug_assert_eq!(old_entry.tag, TAG_CREDENTIAL);
|
||||
self.store.replace(index, new_entry)?
|
||||
}
|
||||
let key = match old_key {
|
||||
// This is a new credential being added, we need to allocate a free key. We choose the
|
||||
// first available key. This is quadratic in the number of existing keys.
|
||||
None => key::CREDENTIALS
|
||||
.take(MAX_SUPPORTED_RESIDENTIAL_KEYS)
|
||||
.find(|key| !keys.contains(key))
|
||||
.ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?,
|
||||
// This is an existing credential being updated, we reuse its key.
|
||||
Some(x) => x,
|
||||
};
|
||||
let value = serialize_credential(new_credential)?;
|
||||
self.store.insert(key, &value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the list of matching credentials.
|
||||
///
|
||||
/// Does not return credentials that are not discoverable if `check_cred_protect` is set.
|
||||
pub fn filter_credential(
|
||||
&self,
|
||||
rp_id: &str,
|
||||
check_cred_protect: bool,
|
||||
) -> Result<Vec<PublicKeyCredentialSource>, Ctap2StatusCode> {
|
||||
Ok(self
|
||||
.store
|
||||
.find_all(&Key::Credential {
|
||||
rp_id: Some(rp_id.into()),
|
||||
credential_id: None,
|
||||
user_handle: None,
|
||||
})
|
||||
.filter_map(|(_, entry)| {
|
||||
debug_assert_eq!(entry.tag, TAG_CREDENTIAL);
|
||||
let credential = deserialize_credential(entry.data);
|
||||
debug_assert!(credential.is_some());
|
||||
credential
|
||||
let mut iter_result = Ok(());
|
||||
let iter = self.iter_credentials(&mut iter_result)?;
|
||||
let result = iter
|
||||
.filter_map(|(_, credential)| {
|
||||
if credential.rp_id == rp_id {
|
||||
Some(credential)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.filter(|cred| !check_cred_protect || cred.is_discoverable())
|
||||
.collect())
|
||||
.collect();
|
||||
iter_result?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Returns the number of credentials.
|
||||
#[cfg(test)]
|
||||
pub fn count_credentials(&self) -> Result<usize, Ctap2StatusCode> {
|
||||
Ok(self
|
||||
.store
|
||||
.find_all(&Key::Credential {
|
||||
rp_id: None,
|
||||
credential_id: None,
|
||||
user_handle: None,
|
||||
})
|
||||
.count())
|
||||
let mut iter_result = Ok(());
|
||||
let iter = self.iter_credentials(&mut iter_result)?;
|
||||
let result = iter.count();
|
||||
iter_result?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Iterates through the credentials.
|
||||
///
|
||||
/// If an error is encountered during iteration, it is written to `result`.
|
||||
fn iter_credentials<'a>(
|
||||
&'a self,
|
||||
result: &'a mut Result<(), Ctap2StatusCode>,
|
||||
) -> Result<IterCredentials<'a>, Ctap2StatusCode> {
|
||||
IterCredentials::new(&self.store, result)
|
||||
}
|
||||
|
||||
/// Returns the global signature counter.
|
||||
pub fn global_signature_counter(&self) -> Result<u32, Ctap2StatusCode> {
|
||||
Ok(self
|
||||
.store
|
||||
.find_one(&Key::GlobalSignatureCounter)
|
||||
.map_or(0, |(_, entry)| {
|
||||
u32::from_ne_bytes(*array_ref!(entry.data, 0, 4))
|
||||
}))
|
||||
Ok(match self.store.find(key::GLOBAL_SIGNATURE_COUNTER)? {
|
||||
None => 0,
|
||||
Some(value) => u32::from_ne_bytes(*array_ref!(&value, 0, 4)),
|
||||
})
|
||||
}
|
||||
|
||||
/// Increments the global signature counter.
|
||||
pub fn incr_global_signature_counter(&mut self) -> Result<(), Ctap2StatusCode> {
|
||||
let mut buffer = [0; core::mem::size_of::<u32>()];
|
||||
match self.store.find_one(&Key::GlobalSignatureCounter) {
|
||||
None => {
|
||||
buffer.copy_from_slice(&1u32.to_ne_bytes());
|
||||
self.store.insert(StoreEntry {
|
||||
tag: GLOBAL_SIGNATURE_COUNTER,
|
||||
data: &buffer,
|
||||
sensitive: false,
|
||||
})?;
|
||||
}
|
||||
Some((index, entry)) => {
|
||||
let value = u32::from_ne_bytes(*array_ref!(entry.data, 0, 4));
|
||||
let old_value = self.global_signature_counter()?;
|
||||
// In hopes that servers handle the wrapping gracefully.
|
||||
buffer.copy_from_slice(&value.wrapping_add(1).to_ne_bytes());
|
||||
self.store.replace(
|
||||
index,
|
||||
StoreEntry {
|
||||
tag: GLOBAL_SIGNATURE_COUNTER,
|
||||
data: &buffer,
|
||||
sensitive: false,
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
let new_value = old_value.wrapping_add(1);
|
||||
self.store
|
||||
.insert(key::GLOBAL_SIGNATURE_COUNTER, &new_value.to_ne_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the master keys.
|
||||
pub fn master_keys(&self) -> Result<MasterKeys, Ctap2StatusCode> {
|
||||
let (_, entry) = self.store.find_one(&Key::MasterKeys).unwrap();
|
||||
if entry.data.len() != 64 {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
|
||||
let master_keys = self
|
||||
.store
|
||||
.find(key::MASTER_KEYS)?
|
||||
.ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?;
|
||||
if master_keys.len() != 64 {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE);
|
||||
}
|
||||
Ok(MasterKeys {
|
||||
encryption: *array_ref![entry.data, 0, 32],
|
||||
hmac: *array_ref![entry.data, 32, 32],
|
||||
encryption: *array_ref![master_keys, 0, 32],
|
||||
hmac: *array_ref![master_keys, 32, 32],
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the PIN hash if defined.
|
||||
pub fn pin_hash(&self) -> Result<Option<[u8; PIN_AUTH_LENGTH]>, Ctap2StatusCode> {
|
||||
let data = match self.store.find_one(&Key::PinHash) {
|
||||
let pin_hash = match self.store.find(key::PIN_HASH)? {
|
||||
None => return Ok(None),
|
||||
Some((_, entry)) => entry.data,
|
||||
Some(pin_hash) => pin_hash,
|
||||
};
|
||||
if data.len() != PIN_AUTH_LENGTH {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
|
||||
if pin_hash.len() != PIN_AUTH_LENGTH {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE);
|
||||
}
|
||||
Ok(Some(*array_ref![data, 0, PIN_AUTH_LENGTH]))
|
||||
Ok(Some(*array_ref![pin_hash, 0, PIN_AUTH_LENGTH]))
|
||||
}
|
||||
|
||||
/// Sets the PIN hash.
|
||||
///
|
||||
/// If it was already defined, it is updated.
|
||||
pub fn set_pin_hash(
|
||||
&mut self,
|
||||
pin_hash: &[u8; PIN_AUTH_LENGTH],
|
||||
) -> Result<(), Ctap2StatusCode> {
|
||||
let entry = StoreEntry {
|
||||
tag: PIN_HASH,
|
||||
data: pin_hash,
|
||||
sensitive: true,
|
||||
};
|
||||
match self.store.find_one(&Key::PinHash) {
|
||||
None => self.store.insert(entry)?,
|
||||
Some((index, _)) => self.store.replace(index, entry)?,
|
||||
}
|
||||
Ok(())
|
||||
Ok(self.store.insert(key::PIN_HASH, pin_hash)?)
|
||||
}
|
||||
|
||||
/// Returns the number of remaining PIN retries.
|
||||
pub fn pin_retries(&self) -> Result<u8, Ctap2StatusCode> {
|
||||
Ok(self
|
||||
.store
|
||||
.find_one(&Key::PinRetries)
|
||||
.map_or(MAX_PIN_RETRIES, |(_, entry)| {
|
||||
debug_assert_eq!(entry.data.len(), 1);
|
||||
entry.data[0]
|
||||
}))
|
||||
match self.store.find(key::PIN_RETRIES)? {
|
||||
None => Ok(MAX_PIN_RETRIES),
|
||||
Some(value) if value.len() == 1 => Ok(value[0]),
|
||||
_ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE),
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrements the number of remaining PIN retries.
|
||||
pub fn decr_pin_retries(&mut self) -> Result<(), Ctap2StatusCode> {
|
||||
match self.store.find_one(&Key::PinRetries) {
|
||||
None => {
|
||||
self.store.insert(StoreEntry {
|
||||
tag: PIN_RETRIES,
|
||||
data: &[MAX_PIN_RETRIES.saturating_sub(1)],
|
||||
sensitive: false,
|
||||
})?;
|
||||
}
|
||||
Some((index, entry)) => {
|
||||
debug_assert_eq!(entry.data.len(), 1);
|
||||
if entry.data[0] == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let new_value = entry.data[0].saturating_sub(1);
|
||||
self.store.replace(
|
||||
index,
|
||||
StoreEntry {
|
||||
tag: PIN_RETRIES,
|
||||
data: &[new_value],
|
||||
sensitive: false,
|
||||
},
|
||||
)?;
|
||||
}
|
||||
let old_value = self.pin_retries()?;
|
||||
let new_value = old_value.saturating_sub(1);
|
||||
if new_value != old_value {
|
||||
self.store.insert(key::PIN_RETRIES, &[new_value])?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resets the number of remaining PIN retries.
|
||||
pub fn reset_pin_retries(&mut self) -> Result<(), Ctap2StatusCode> {
|
||||
if let Some((index, _)) = self.store.find_one(&Key::PinRetries) {
|
||||
self.store.delete(index)?;
|
||||
}
|
||||
Ok(())
|
||||
Ok(self.store.remove(key::PIN_RETRIES)?)
|
||||
}
|
||||
|
||||
/// Returns the minimum PIN length.
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
pub fn min_pin_length(&self) -> Result<u8, Ctap2StatusCode> {
|
||||
Ok(self
|
||||
.store
|
||||
.find_one(&Key::MinPinLength)
|
||||
.map_or(DEFAULT_MIN_PIN_LENGTH, |(_, entry)| {
|
||||
debug_assert_eq!(entry.data.len(), 1);
|
||||
entry.data[0]
|
||||
}))
|
||||
match self.store.find(key::MIN_PIN_LENGTH)? {
|
||||
None => Ok(DEFAULT_MIN_PIN_LENGTH),
|
||||
Some(value) if value.len() == 1 => Ok(value[0]),
|
||||
_ => Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the minimum PIN length.
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
pub fn set_min_pin_length(&mut self, min_pin_length: u8) -> Result<(), Ctap2StatusCode> {
|
||||
let entry = StoreEntry {
|
||||
tag: MIN_PIN_LENGTH,
|
||||
data: &[min_pin_length],
|
||||
sensitive: false,
|
||||
};
|
||||
Ok(match self.store.find_one(&Key::MinPinLength) {
|
||||
None => self.store.insert(entry)?,
|
||||
Some((index, _)) => self.store.replace(index, entry)?,
|
||||
})
|
||||
Ok(self.store.insert(key::MIN_PIN_LENGTH, &[min_pin_length])?)
|
||||
}
|
||||
|
||||
/// TODO: Help from reviewer needed for documentation.
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
pub fn _min_pin_length_rp_ids(&self) -> Result<Vec<String>, Ctap2StatusCode> {
|
||||
let rp_ids = self
|
||||
.store
|
||||
.find_one(&Key::MinPinLengthRpIds)
|
||||
.map_or(Some(_DEFAULT_MIN_PIN_LENGTH_RP_IDS), |(_, entry)| {
|
||||
_deserialize_min_pin_length_rp_ids(entry.data)
|
||||
.find(key::_MIN_PIN_LENGTH_RP_IDS)?
|
||||
.map_or(Some(_DEFAULT_MIN_PIN_LENGTH_RP_IDS), |value| {
|
||||
_deserialize_min_pin_length_rp_ids(&value)
|
||||
});
|
||||
debug_assert!(rp_ids.is_some());
|
||||
Ok(rp_ids.unwrap_or(vec![]))
|
||||
}
|
||||
|
||||
/// TODO: Help from reviewer needed for documentation.
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
pub fn _set_min_pin_length_rp_ids(
|
||||
&mut self,
|
||||
@@ -507,138 +402,173 @@ impl PersistentStore {
|
||||
if min_pin_length_rp_ids.len() > _MAX_RP_IDS_LENGTH {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL);
|
||||
}
|
||||
let entry = StoreEntry {
|
||||
tag: MIN_PIN_LENGTH_RP_IDS,
|
||||
data: &_serialize_min_pin_length_rp_ids(min_pin_length_rp_ids)?,
|
||||
sensitive: false,
|
||||
};
|
||||
match self.store.find_one(&Key::MinPinLengthRpIds) {
|
||||
None => {
|
||||
self.store.insert(entry).unwrap();
|
||||
}
|
||||
Some((index, _)) => {
|
||||
self.store.replace(index, entry).unwrap();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
Ok(self.store.insert(
|
||||
key::_MIN_PIN_LENGTH_RP_IDS,
|
||||
&_serialize_min_pin_length_rp_ids(min_pin_length_rp_ids)?,
|
||||
)?)
|
||||
}
|
||||
|
||||
/// Returns the attestation private key if defined.
|
||||
pub fn attestation_private_key(
|
||||
&self,
|
||||
) -> Result<Option<&[u8; ATTESTATION_PRIVATE_KEY_LENGTH]>, Ctap2StatusCode> {
|
||||
let data = match self.store.find_one(&Key::AttestationPrivateKey) {
|
||||
None => return Ok(None),
|
||||
Some((_, entry)) => entry.data,
|
||||
};
|
||||
if data.len() != ATTESTATION_PRIVATE_KEY_LENGTH {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
|
||||
) -> Result<Option<[u8; ATTESTATION_PRIVATE_KEY_LENGTH]>, Ctap2StatusCode> {
|
||||
match self.store.find(key::ATTESTATION_PRIVATE_KEY)? {
|
||||
None => Ok(None),
|
||||
Some(key) if key.len() != ATTESTATION_PRIVATE_KEY_LENGTH => {
|
||||
Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)
|
||||
}
|
||||
Some(key) => Ok(Some(*array_ref![key, 0, ATTESTATION_PRIVATE_KEY_LENGTH])),
|
||||
}
|
||||
Ok(Some(array_ref!(data, 0, ATTESTATION_PRIVATE_KEY_LENGTH)))
|
||||
}
|
||||
|
||||
/// Sets the attestation private key.
|
||||
///
|
||||
/// If it is already defined, it is overwritten.
|
||||
pub fn set_attestation_private_key(
|
||||
&mut self,
|
||||
attestation_private_key: &[u8; ATTESTATION_PRIVATE_KEY_LENGTH],
|
||||
) -> Result<(), Ctap2StatusCode> {
|
||||
let entry = StoreEntry {
|
||||
tag: ATTESTATION_PRIVATE_KEY,
|
||||
data: attestation_private_key,
|
||||
sensitive: false,
|
||||
};
|
||||
match self.store.find_one(&Key::AttestationPrivateKey) {
|
||||
None => self.store.insert(entry)?,
|
||||
Some((index, _)) => self.store.replace(index, entry)?,
|
||||
}
|
||||
Ok(())
|
||||
Ok(self
|
||||
.store
|
||||
.insert(key::ATTESTATION_PRIVATE_KEY, attestation_private_key)?)
|
||||
}
|
||||
|
||||
/// Returns the attestation certificate if defined.
|
||||
pub fn attestation_certificate(&self) -> Result<Option<Vec<u8>>, Ctap2StatusCode> {
|
||||
let data = match self.store.find_one(&Key::AttestationCertificate) {
|
||||
None => return Ok(None),
|
||||
Some((_, entry)) => entry.data,
|
||||
};
|
||||
Ok(Some(data.to_vec()))
|
||||
Ok(self.store.find(key::ATTESTATION_CERTIFICATE)?)
|
||||
}
|
||||
|
||||
/// Sets the attestation certificate.
|
||||
///
|
||||
/// If it is already defined, it is overwritten.
|
||||
pub fn set_attestation_certificate(
|
||||
&mut self,
|
||||
attestation_certificate: &[u8],
|
||||
) -> Result<(), Ctap2StatusCode> {
|
||||
let entry = StoreEntry {
|
||||
tag: ATTESTATION_CERTIFICATE,
|
||||
data: attestation_certificate,
|
||||
sensitive: false,
|
||||
};
|
||||
match self.store.find_one(&Key::AttestationCertificate) {
|
||||
None => self.store.insert(entry)?,
|
||||
Some((index, _)) => self.store.replace(index, entry)?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn aaguid(&self) -> Result<[u8; AAGUID_LENGTH], Ctap2StatusCode> {
|
||||
let (_, entry) = self
|
||||
Ok(self
|
||||
.store
|
||||
.find_one(&Key::Aaguid)
|
||||
.ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR)?;
|
||||
let data = entry.data;
|
||||
if data.len() != AAGUID_LENGTH {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
|
||||
}
|
||||
Ok(*array_ref![data, 0, AAGUID_LENGTH])
|
||||
.insert(key::ATTESTATION_CERTIFICATE, attestation_certificate)?)
|
||||
}
|
||||
|
||||
/// Returns the AAGUID.
|
||||
pub fn aaguid(&self) -> Result<[u8; AAGUID_LENGTH], Ctap2StatusCode> {
|
||||
let aaguid = self
|
||||
.store
|
||||
.find(key::AAGUID)?
|
||||
.ok_or(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE)?;
|
||||
if aaguid.len() != AAGUID_LENGTH {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE);
|
||||
}
|
||||
Ok(*array_ref![aaguid, 0, AAGUID_LENGTH])
|
||||
}
|
||||
|
||||
/// Sets the AAGUID.
|
||||
///
|
||||
/// If it is already defined, it is overwritten.
|
||||
pub fn set_aaguid(&mut self, aaguid: &[u8; AAGUID_LENGTH]) -> Result<(), Ctap2StatusCode> {
|
||||
let entry = StoreEntry {
|
||||
tag: AAGUID,
|
||||
data: aaguid,
|
||||
sensitive: false,
|
||||
};
|
||||
match self.store.find_one(&Key::Aaguid) {
|
||||
None => self.store.insert(entry)?,
|
||||
Some((index, _)) => self.store.replace(index, entry)?,
|
||||
}
|
||||
Ok(())
|
||||
Ok(self.store.insert(key::AAGUID, aaguid)?)
|
||||
}
|
||||
|
||||
/// Resets the store as for a CTAP reset.
|
||||
///
|
||||
/// In particular persistent entries are not reset.
|
||||
pub fn reset(&mut self, rng: &mut impl Rng256) -> Result<(), Ctap2StatusCode> {
|
||||
loop {
|
||||
let index = {
|
||||
let mut iter = self.store.iter().filter(|(_, entry)| should_reset(entry));
|
||||
match iter.next() {
|
||||
None => break,
|
||||
Some((index, _)) => index,
|
||||
}
|
||||
};
|
||||
self.store.delete(index)?;
|
||||
}
|
||||
self.init(rng);
|
||||
self.store.clear(key::NUM_PERSISTENT_KEYS)?;
|
||||
self.init(rng)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StoreError> for Ctap2StatusCode {
|
||||
fn from(error: StoreError) -> Ctap2StatusCode {
|
||||
impl From<persistent_store::StoreError> for Ctap2StatusCode {
|
||||
fn from(error: persistent_store::StoreError) -> Ctap2StatusCode {
|
||||
use persistent_store::StoreError::*;
|
||||
match error {
|
||||
StoreError::StoreFull => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL,
|
||||
StoreError::InvalidTag => unreachable!(),
|
||||
StoreError::InvalidPrecondition => unreachable!(),
|
||||
// This error is expected. The store is full.
|
||||
NoCapacity => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL,
|
||||
// This error is expected. The flash is out of life.
|
||||
NoLifetime => Ctap2StatusCode::CTAP2_ERR_KEY_STORE_FULL,
|
||||
// This error is expected if we don't satisfy the store preconditions. For example we
|
||||
// try to store a credential which is too long.
|
||||
InvalidArgument => Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR,
|
||||
// This error is not expected. The storage has been tempered with. We could erase the
|
||||
// storage.
|
||||
InvalidStorage => Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE,
|
||||
// This error is not expected. The kernel is failing our syscalls.
|
||||
StorageError => Ctap2StatusCode::CTAP1_ERR_OTHER,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn should_reset(entry: &StoreEntry<'_>) -> bool {
|
||||
match entry.tag {
|
||||
ATTESTATION_PRIVATE_KEY | ATTESTATION_CERTIFICATE | AAGUID => false,
|
||||
_ => true,
|
||||
/// Iterator for credentials.
|
||||
struct IterCredentials<'a> {
|
||||
/// The store being iterated.
|
||||
store: &'a persistent_store::Store<Storage>,
|
||||
|
||||
/// The store iterator.
|
||||
iter: persistent_store::StoreIter<'a, Storage>,
|
||||
|
||||
/// The iteration result.
|
||||
///
|
||||
/// It starts as success and gets written at most once with an error if something fails. The
|
||||
/// iteration stops as soon as an error is encountered.
|
||||
result: &'a mut Result<(), Ctap2StatusCode>,
|
||||
}
|
||||
|
||||
impl<'a> IterCredentials<'a> {
|
||||
/// Creates a credential iterator.
|
||||
fn new(
|
||||
store: &'a persistent_store::Store<Storage>,
|
||||
result: &'a mut Result<(), Ctap2StatusCode>,
|
||||
) -> Result<IterCredentials<'a>, Ctap2StatusCode> {
|
||||
let iter = store.iter()?;
|
||||
Ok(IterCredentials {
|
||||
store,
|
||||
iter,
|
||||
result,
|
||||
})
|
||||
}
|
||||
|
||||
/// Marks the iteration as failed if the content is absent.
|
||||
///
|
||||
/// For convenience, the function takes and returns ownership instead of taking a shared
|
||||
/// reference and returning nothing. This permits to use it in both expressions and statements
|
||||
/// instead of statements only.
|
||||
fn unwrap<T>(&mut self, x: Option<T>) -> Option<T> {
|
||||
if x.is_none() {
|
||||
*self.result = Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INVALID_PERSISTENT_STORAGE);
|
||||
}
|
||||
x
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for IterCredentials<'a> {
|
||||
type Item = (usize, PublicKeyCredentialSource);
|
||||
|
||||
fn next(&mut self) -> Option<(usize, PublicKeyCredentialSource)> {
|
||||
if self.result.is_err() {
|
||||
return None;
|
||||
}
|
||||
while let Some(next) = self.iter.next() {
|
||||
let handle = self.unwrap(next.ok())?;
|
||||
let key = handle.get_key();
|
||||
if !key::CREDENTIALS.contains(&key) {
|
||||
continue;
|
||||
}
|
||||
let value = self.unwrap(handle.get_value(&self.store).ok())?;
|
||||
let credential = self.unwrap(deserialize_credential(&value))?;
|
||||
return Some((key, credential));
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserializes a credential from storage representation.
|
||||
fn deserialize_credential(data: &[u8]) -> Option<PublicKeyCredentialSource> {
|
||||
let cbor = cbor::read(data).ok()?;
|
||||
cbor.try_into().ok()
|
||||
}
|
||||
|
||||
/// Serializes a credential to storage representation.
|
||||
fn serialize_credential(credential: PublicKeyCredentialSource) -> Result<Vec<u8>, Ctap2StatusCode> {
|
||||
let mut data = Vec::new();
|
||||
if cbor::write(credential.into(), &mut data) {
|
||||
@@ -648,6 +578,7 @@ fn serialize_credential(credential: PublicKeyCredentialSource) -> Result<Vec<u8>
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: Help from reviewer needed for documentation.
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option<Vec<String>> {
|
||||
let cbor = cbor::read(data).ok()?;
|
||||
@@ -659,6 +590,7 @@ fn _deserialize_min_pin_length_rp_ids(data: &[u8]) -> Option<Vec<String>> {
|
||||
.ok()
|
||||
}
|
||||
|
||||
/// TODO: Help from reviewer needed for documentation.
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
fn _serialize_min_pin_length_rp_ids(rp_ids: Vec<String>) -> Result<Vec<u8>, Ctap2StatusCode> {
|
||||
let mut data = Vec::new();
|
||||
@@ -693,28 +625,6 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_overhead() {
|
||||
// nRF52840 NVMC
|
||||
const WORD_SIZE: usize = 4;
|
||||
const PAGE_SIZE: usize = 0x1000;
|
||||
const NUM_PAGES: usize = 100;
|
||||
let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice();
|
||||
let options = embedded_flash::BufferOptions {
|
||||
word_size: WORD_SIZE,
|
||||
page_size: PAGE_SIZE,
|
||||
max_word_writes: 2,
|
||||
max_page_erases: 10000,
|
||||
strict_write: true,
|
||||
};
|
||||
let storage = Storage::new(store, options);
|
||||
let store = embedded_flash::Store::new(storage, Config).unwrap();
|
||||
// We can replace 3 bytes with minimal overhead.
|
||||
assert_eq!(store.replace_len(false, 0), 2 * WORD_SIZE);
|
||||
assert_eq!(store.replace_len(false, 3), 3 * WORD_SIZE);
|
||||
assert_eq!(store.replace_len(false, 4), 3 * WORD_SIZE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store() {
|
||||
let mut rng = ThreadRng256 {};
|
||||
@@ -974,21 +884,21 @@ mod test {
|
||||
let mut persistent_store = PersistentStore::new(&mut rng);
|
||||
|
||||
// The pin retries is initially at the maximum.
|
||||
assert_eq!(persistent_store.pin_retries().unwrap(), MAX_PIN_RETRIES);
|
||||
assert_eq!(persistent_store.pin_retries(), Ok(MAX_PIN_RETRIES));
|
||||
|
||||
// Decrementing the pin retries decrements the pin retries.
|
||||
for pin_retries in (0..MAX_PIN_RETRIES).rev() {
|
||||
persistent_store.decr_pin_retries().unwrap();
|
||||
assert_eq!(persistent_store.pin_retries().unwrap(), pin_retries);
|
||||
assert_eq!(persistent_store.pin_retries(), Ok(pin_retries));
|
||||
}
|
||||
|
||||
// Decrementing the pin retries after zero does not modify the pin retries.
|
||||
persistent_store.decr_pin_retries().unwrap();
|
||||
assert_eq!(persistent_store.pin_retries().unwrap(), 0);
|
||||
assert_eq!(persistent_store.pin_retries(), Ok(0));
|
||||
|
||||
// Resetting the pin retries resets the pin retries.
|
||||
persistent_store.reset_pin_retries().unwrap();
|
||||
assert_eq!(persistent_store.pin_retries().unwrap(), MAX_PIN_RETRIES);
|
||||
assert_eq!(persistent_store.pin_retries(), Ok(MAX_PIN_RETRIES));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1018,7 +928,7 @@ mod test {
|
||||
// The persistent keys stay initialized and preserve their value after a reset.
|
||||
persistent_store.reset(&mut rng).unwrap();
|
||||
assert_eq!(
|
||||
persistent_store.attestation_private_key().unwrap().unwrap(),
|
||||
&persistent_store.attestation_private_key().unwrap().unwrap(),
|
||||
key_material::ATTESTATION_PRIVATE_KEY
|
||||
);
|
||||
assert_eq!(
|
||||
|
||||
135
src/ctap/storage/key.rs
Normal file
135
src/ctap/storage/key.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2019-2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Number of keys that persist the CTAP reset command.
|
||||
pub const NUM_PERSISTENT_KEYS: usize = 20;
|
||||
|
||||
/// Defines a key given its name and value or range of values.
|
||||
macro_rules! make_key {
|
||||
($(#[$doc: meta])* $name: ident = $key: literal..$end: literal) => {
|
||||
$(#[$doc])* pub const $name: core::ops::Range<usize> = $key..$end;
|
||||
};
|
||||
($(#[$doc: meta])* $name: ident = $key: literal) => {
|
||||
$(#[$doc])* pub const $name: usize = $key;
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the range of values of a key given its value description.
|
||||
#[cfg(test)]
|
||||
macro_rules! make_range {
|
||||
($key: literal..$end: literal) => {
|
||||
$key..$end
|
||||
};
|
||||
($key: literal) => {
|
||||
$key..$key + 1
|
||||
};
|
||||
}
|
||||
|
||||
/// Helper to define keys as a partial partition of a range.
|
||||
macro_rules! make_partition {
|
||||
($range: expr,
|
||||
$(
|
||||
$(#[$doc: meta])*
|
||||
$name: ident = $key: literal $(.. $end: literal)?;
|
||||
)*) => {
|
||||
$(
|
||||
make_key!($(#[$doc])* $name = $key $(.. $end)?);
|
||||
)*
|
||||
#[cfg(test)]
|
||||
const KEY_RANGE: core::ops::Range<usize> = $range;
|
||||
#[cfg(test)]
|
||||
const ALL_KEYS: &[core::ops::Range<usize>] = &[$(make_range!($key $(.. $end)?)),*];
|
||||
};
|
||||
}
|
||||
|
||||
make_partition! {
|
||||
// We reserve 0 and 2048+ for possible migration purposes. We add persistent entries starting
|
||||
// from 1 and going up. We add non-persistent entries starting from 2047 and going down. This
|
||||
// way, we don't commit to a fixed number of persistent keys.
|
||||
1..2048,
|
||||
|
||||
// WARNING: Keys should not be deleted but prefixed with `_` to avoid accidentally reusing them.
|
||||
|
||||
/// The attestation private key.
|
||||
ATTESTATION_PRIVATE_KEY = 1;
|
||||
|
||||
/// The attestation certificate.
|
||||
ATTESTATION_CERTIFICATE = 2;
|
||||
|
||||
/// The aaguid.
|
||||
AAGUID = 3;
|
||||
|
||||
// This is the persistent key limit:
|
||||
// - When adding a (persistent) key above this message, make sure its value is smaller than
|
||||
// NUM_PERSISTENT_KEYS.
|
||||
// - When adding a (non-persistent) key below this message, make sure its value is bigger or
|
||||
// equal than NUM_PERSISTENT_KEYS.
|
||||
|
||||
/// The credentials.
|
||||
///
|
||||
/// Depending on `MAX_SUPPORTED_RESIDENTIAL_KEYS`, only a prefix of those keys is used. Each
|
||||
/// board may configure `MAX_SUPPORTED_RESIDENTIAL_KEYS` depending on the storage size.
|
||||
CREDENTIALS = 1700..2000;
|
||||
|
||||
/// TODO: Help from reviewer needed for documentation.
|
||||
_MIN_PIN_LENGTH_RP_IDS = 2042;
|
||||
|
||||
/// The minimum PIN length.
|
||||
#[cfg(feature = "with_ctap2_1")]
|
||||
MIN_PIN_LENGTH = 2043;
|
||||
|
||||
/// The number of PIN retries.
|
||||
///
|
||||
/// If the entry is absent, the number of PIN retries is `MAX_PIN_RETRIES`.
|
||||
PIN_RETRIES = 2044;
|
||||
|
||||
/// The PIN hash.
|
||||
///
|
||||
/// If the entry is absent, there is no PIN set.
|
||||
PIN_HASH = 2045;
|
||||
|
||||
/// The encryption and hmac keys.
|
||||
///
|
||||
/// This entry is always present. It is generated at startup if absent. This is not a persistent
|
||||
/// key because its value should change after a CTAP reset.
|
||||
MASTER_KEYS = 2046;
|
||||
|
||||
/// The global signature counter.
|
||||
///
|
||||
/// If the entry is absent, the counter is 0.
|
||||
GLOBAL_SIGNATURE_COUNTER = 2047;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn enough_credentials() {
|
||||
use super::super::MAX_SUPPORTED_RESIDENTIAL_KEYS;
|
||||
assert!(MAX_SUPPORTED_RESIDENTIAL_KEYS <= CREDENTIALS.end - CREDENTIALS.start);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keys_are_disjoint() {
|
||||
// Check that keys are in the range.
|
||||
for keys in ALL_KEYS {
|
||||
assert!(KEY_RANGE.start <= keys.start && keys.end <= KEY_RANGE.end);
|
||||
}
|
||||
// Check that keys are assigned at most once, essentially partitioning the range.
|
||||
for key in KEY_RANGE {
|
||||
assert!(ALL_KEYS.iter().filter(|keys| keys.contains(&key)).count() <= 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,457 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::{Index, Storage, StorageError, StorageResult};
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec;
|
||||
|
||||
pub struct BufferStorage {
|
||||
storage: Box<[u8]>,
|
||||
options: BufferOptions,
|
||||
word_writes: Box<[usize]>,
|
||||
page_erases: Box<[usize]>,
|
||||
snapshot: Snapshot,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct BufferOptions {
|
||||
/// Size of a word in bytes.
|
||||
pub word_size: usize,
|
||||
|
||||
/// Size of a page in bytes.
|
||||
pub page_size: usize,
|
||||
|
||||
/// How many times a word can be written between page erasures
|
||||
pub max_word_writes: usize,
|
||||
|
||||
/// How many times a page can be erased.
|
||||
pub max_page_erases: usize,
|
||||
|
||||
/// Bits cannot be written from 0 to 1.
|
||||
pub strict_write: bool,
|
||||
}
|
||||
|
||||
impl BufferStorage {
|
||||
/// Creates a fake embedded flash using a buffer.
|
||||
///
|
||||
/// This implementation checks that no words are written more than `max_word_writes` between
|
||||
/// page erasures and than no pages are erased more than `max_page_erases`. If `strict_write` is
|
||||
/// true, it also checks that no bits are written from 0 to 1. It also permits to take snapshots
|
||||
/// of the storage during write and erase operations (although words would still be written or
|
||||
/// erased completely).
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The following preconditions must hold:
|
||||
/// - `options.word_size` must be a power of two.
|
||||
/// - `options.page_size` must be a power of two.
|
||||
/// - `options.page_size` must be word-aligned.
|
||||
/// - `storage.len()` must be page-aligned.
|
||||
pub fn new(storage: Box<[u8]>, options: BufferOptions) -> BufferStorage {
|
||||
assert!(options.word_size.is_power_of_two());
|
||||
assert!(options.page_size.is_power_of_two());
|
||||
let num_words = storage.len() / options.word_size;
|
||||
let num_pages = storage.len() / options.page_size;
|
||||
let buffer = BufferStorage {
|
||||
storage,
|
||||
options,
|
||||
word_writes: vec![0; num_words].into_boxed_slice(),
|
||||
page_erases: vec![0; num_pages].into_boxed_slice(),
|
||||
snapshot: Snapshot::Ready,
|
||||
};
|
||||
assert!(buffer.is_word_aligned(buffer.options.page_size));
|
||||
assert!(buffer.is_page_aligned(buffer.storage.len()));
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Takes a snapshot of the storage after a given amount of word operations.
|
||||
///
|
||||
/// Each time a word is written or erased, the delay is decremented if positive. Otherwise, a
|
||||
/// snapshot is taken before the operation is executed.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if a snapshot has been armed and not examined.
|
||||
pub fn arm_snapshot(&mut self, delay: usize) {
|
||||
self.snapshot.arm(delay);
|
||||
}
|
||||
|
||||
/// Unarms and returns the snapshot or the delay remaining.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if a snapshot was not armed.
|
||||
pub fn get_snapshot(&mut self) -> Result<Box<[u8]>, usize> {
|
||||
self.snapshot.get()
|
||||
}
|
||||
|
||||
/// Takes a snapshot of the storage.
|
||||
pub fn take_snapshot(&self) -> Box<[u8]> {
|
||||
self.storage.clone()
|
||||
}
|
||||
|
||||
/// Returns the storage.
|
||||
pub fn get_storage(self) -> Box<[u8]> {
|
||||
self.storage
|
||||
}
|
||||
|
||||
fn is_word_aligned(&self, x: usize) -> bool {
|
||||
x & (self.options.word_size - 1) == 0
|
||||
}
|
||||
|
||||
fn is_page_aligned(&self, x: usize) -> bool {
|
||||
x & (self.options.page_size - 1) == 0
|
||||
}
|
||||
|
||||
/// Writes a slice to the storage.
|
||||
///
|
||||
/// The slice `value` is written to `index`. The `erase` boolean specifies whether this is an
|
||||
/// erase operation or a write operation which matters for the checks and updating the shadow
|
||||
/// storage. This also takes a snapshot of the storage if a snapshot was armed and the delay has
|
||||
/// elapsed.
|
||||
///
|
||||
/// The following preconditions should hold:
|
||||
/// - `index` is word-aligned.
|
||||
/// - `value.len()` is word-aligned.
|
||||
///
|
||||
/// The following checks are performed:
|
||||
/// - The region of length `value.len()` starting at `index` fits in a storage page.
|
||||
/// - A word is not written more than `max_word_writes`.
|
||||
/// - A page is not erased more than `max_page_erases`.
|
||||
/// - The new word only switches 1s to 0s (only if `strict_write` is set).
|
||||
fn update_storage(&mut self, index: Index, value: &[u8], erase: bool) -> StorageResult<()> {
|
||||
debug_assert!(self.is_word_aligned(index.byte) && self.is_word_aligned(value.len()));
|
||||
let dst = index.range(value.len(), self)?.step_by(self.word_size());
|
||||
let src = value.chunks(self.word_size());
|
||||
// Check and update page shadow.
|
||||
if erase {
|
||||
let page = index.page;
|
||||
assert!(self.page_erases[page] < self.max_page_erases());
|
||||
self.page_erases[page] += 1;
|
||||
}
|
||||
for (byte, val) in dst.zip(src) {
|
||||
let range = byte..byte + self.word_size();
|
||||
// The driver doesn't write identical words.
|
||||
if &self.storage[range.clone()] == val {
|
||||
continue;
|
||||
}
|
||||
// Check and update word shadow.
|
||||
let word = byte / self.word_size();
|
||||
if erase {
|
||||
self.word_writes[word] = 0;
|
||||
} else {
|
||||
assert!(self.word_writes[word] < self.max_word_writes());
|
||||
self.word_writes[word] += 1;
|
||||
}
|
||||
// Check strict write.
|
||||
if !erase && self.options.strict_write {
|
||||
for (byte, &val) in range.clone().zip(val) {
|
||||
assert_eq!(self.storage[byte] & val, val);
|
||||
}
|
||||
}
|
||||
// Take snapshot if armed and delay expired.
|
||||
self.snapshot.take(&self.storage);
|
||||
// Write storage
|
||||
self.storage[range].copy_from_slice(val);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Storage for BufferStorage {
|
||||
fn word_size(&self) -> usize {
|
||||
self.options.word_size
|
||||
}
|
||||
|
||||
fn page_size(&self) -> usize {
|
||||
self.options.page_size
|
||||
}
|
||||
|
||||
fn num_pages(&self) -> usize {
|
||||
self.storage.len() / self.options.page_size
|
||||
}
|
||||
|
||||
fn max_word_writes(&self) -> usize {
|
||||
self.options.max_word_writes
|
||||
}
|
||||
|
||||
fn max_page_erases(&self) -> usize {
|
||||
self.options.max_page_erases
|
||||
}
|
||||
|
||||
fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]> {
|
||||
Ok(&self.storage[index.range(length, self)?])
|
||||
}
|
||||
|
||||
fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()> {
|
||||
if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) {
|
||||
return Err(StorageError::NotAligned);
|
||||
}
|
||||
self.update_storage(index, value, false)
|
||||
}
|
||||
|
||||
fn erase_page(&mut self, page: usize) -> StorageResult<()> {
|
||||
let index = Index { page, byte: 0 };
|
||||
let value = vec![0xff; self.page_size()];
|
||||
self.update_storage(index, &value, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Controls when a snapshot of the storage is taken.
|
||||
//
|
||||
// This can be used to simulate power-offs while the device is writing to the storage or erasing a
|
||||
// page in the storage.
|
||||
enum Snapshot {
|
||||
// Mutable word operations have normal behavior.
|
||||
Ready,
|
||||
// If the delay is positive, mutable word operations decrement it. If the count is zero, mutable
|
||||
// word operations take a snapshot of the storage.
|
||||
Armed { delay: usize },
|
||||
// Mutable word operations have normal behavior.
|
||||
Taken { storage: Box<[u8]> },
|
||||
}
|
||||
|
||||
impl Snapshot {
|
||||
fn arm(&mut self, delay: usize) {
|
||||
match self {
|
||||
Snapshot::Ready => *self = Snapshot::Armed { delay },
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get(&mut self) -> Result<Box<[u8]>, usize> {
|
||||
let mut snapshot = Snapshot::Ready;
|
||||
core::mem::swap(self, &mut snapshot);
|
||||
match snapshot {
|
||||
Snapshot::Armed { delay } => Err(delay),
|
||||
Snapshot::Taken { storage } => Ok(storage),
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn take(&mut self, storage: &[u8]) {
|
||||
if let Snapshot::Armed { delay } = self {
|
||||
if *delay == 0 {
|
||||
let storage = storage.to_vec().into_boxed_slice();
|
||||
*self = Snapshot::Taken { storage };
|
||||
} else {
|
||||
*delay -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const NUM_PAGES: usize = 2;
|
||||
const OPTIONS: BufferOptions = BufferOptions {
|
||||
word_size: 4,
|
||||
page_size: 16,
|
||||
max_word_writes: 2,
|
||||
max_page_erases: 3,
|
||||
strict_write: true,
|
||||
};
|
||||
// Those words are decreasing bit patterns. Bits are only changed from 1 to 0 and at last one
|
||||
// bit is changed.
|
||||
const BLANK_WORD: &[u8] = &[0xff, 0xff, 0xff, 0xff];
|
||||
const FIRST_WORD: &[u8] = &[0xee, 0xdd, 0xbb, 0x77];
|
||||
const SECOND_WORD: &[u8] = &[0xca, 0xc9, 0xa9, 0x65];
|
||||
const THIRD_WORD: &[u8] = &[0x88, 0x88, 0x88, 0x44];
|
||||
|
||||
fn new_storage() -> Box<[u8]> {
|
||||
vec![0xff; NUM_PAGES * OPTIONS.page_size].into_boxed_slice()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn words_are_decreasing() {
|
||||
fn assert_is_decreasing(prev: &[u8], next: &[u8]) {
|
||||
for (&prev, &next) in prev.iter().zip(next.iter()) {
|
||||
assert_eq!(prev & next, next);
|
||||
assert!(prev != next);
|
||||
}
|
||||
}
|
||||
assert_is_decreasing(BLANK_WORD, FIRST_WORD);
|
||||
assert_is_decreasing(FIRST_WORD, SECOND_WORD);
|
||||
assert_is_decreasing(SECOND_WORD, THIRD_WORD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn options_ok() {
|
||||
let buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
assert_eq!(buffer.word_size(), OPTIONS.word_size);
|
||||
assert_eq!(buffer.page_size(), OPTIONS.page_size);
|
||||
assert_eq!(buffer.num_pages(), NUM_PAGES);
|
||||
assert_eq!(buffer.max_word_writes(), OPTIONS.max_word_writes);
|
||||
assert_eq!(buffer.max_page_erases(), OPTIONS.max_page_erases);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_write_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 0 };
|
||||
let next_index = Index { page: 0, byte: 4 };
|
||||
assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD);
|
||||
buffer.write_slice(index, FIRST_WORD).unwrap();
|
||||
assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD);
|
||||
assert_eq!(buffer.read_slice(next_index, 4).unwrap(), BLANK_WORD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn erase_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 0 };
|
||||
let other_index = Index { page: 1, byte: 0 };
|
||||
buffer.write_slice(index, FIRST_WORD).unwrap();
|
||||
buffer.write_slice(other_index, FIRST_WORD).unwrap();
|
||||
assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD);
|
||||
assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD);
|
||||
buffer.erase_page(0).unwrap();
|
||||
assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD);
|
||||
assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_range() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 12 };
|
||||
let half_index = Index { page: 0, byte: 14 };
|
||||
let over_index = Index { page: 0, byte: 16 };
|
||||
let bad_page = Index { page: 2, byte: 0 };
|
||||
|
||||
// Reading a word in the storage is ok.
|
||||
assert!(buffer.read_slice(index, 4).is_ok());
|
||||
// Reading a half-word in the storage is ok.
|
||||
assert!(buffer.read_slice(half_index, 2).is_ok());
|
||||
// Reading even a single byte outside a page is not ok.
|
||||
assert!(buffer.read_slice(over_index, 1).is_err());
|
||||
// But reading an empty slice just after a page is ok.
|
||||
assert!(buffer.read_slice(over_index, 0).is_ok());
|
||||
// Reading even an empty slice outside the storage is not ok.
|
||||
assert!(buffer.read_slice(bad_page, 0).is_err());
|
||||
|
||||
// Writing a word in the storage is ok.
|
||||
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||
// Writing an unaligned word is not ok.
|
||||
assert!(buffer.write_slice(half_index, FIRST_WORD).is_err());
|
||||
// Writing a word outside a page is not ok.
|
||||
assert!(buffer.write_slice(over_index, FIRST_WORD).is_err());
|
||||
// But writing an empty slice just after a page is ok.
|
||||
assert!(buffer.write_slice(over_index, &[]).is_ok());
|
||||
// Writing even an empty slice outside the storage is not ok.
|
||||
assert!(buffer.write_slice(bad_page, &[]).is_err());
|
||||
|
||||
// Only pages in the storage can be erased.
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(2).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_twice_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 4 };
|
||||
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_twice_and_once_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 0 };
|
||||
let next_index = Index { page: 0, byte: 4 };
|
||||
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||
assert!(buffer.write_slice(next_index, THIRD_WORD).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn write_three_times_panics() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 4 };
|
||||
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||
let _ = buffer.write_slice(index, THIRD_WORD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_twice_then_once_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 0 };
|
||||
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn erase_three_times_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn erase_three_times_and_once_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(1).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn erase_four_times_panics() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
assert!(buffer.erase_page(0).is_ok());
|
||||
let _ = buffer.erase_page(0).is_ok();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn switch_zero_to_one_panics() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 0 };
|
||||
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||
let _ = buffer.write_slice(index, FIRST_WORD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_storage_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 4 };
|
||||
buffer.write_slice(index, FIRST_WORD).unwrap();
|
||||
let storage = buffer.get_storage();
|
||||
assert_eq!(&storage[..4], BLANK_WORD);
|
||||
assert_eq!(&storage[4..8], FIRST_WORD);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn snapshot_ok() {
|
||||
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||
let index = Index { page: 0, byte: 0 };
|
||||
let value = [FIRST_WORD, SECOND_WORD].concat();
|
||||
buffer.arm_snapshot(1);
|
||||
buffer.write_slice(index, &value).unwrap();
|
||||
let storage = buffer.get_snapshot().unwrap();
|
||||
assert_eq!(&storage[..8], &[FIRST_WORD, BLANK_WORD].concat()[..]);
|
||||
let storage = buffer.take_snapshot();
|
||||
assert_eq!(&storage[..8], &value[..]);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2019 Google LLC
|
||||
// Copyright 2019-2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,16 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(any(test, feature = "ram_storage"))]
|
||||
mod buffer;
|
||||
mod storage;
|
||||
mod store;
|
||||
#[cfg(not(any(test, feature = "ram_storage")))]
|
||||
mod syscall;
|
||||
|
||||
#[cfg(any(test, feature = "ram_storage"))]
|
||||
pub use self::buffer::{BufferOptions, BufferStorage};
|
||||
pub use self::storage::{Index, Storage, StorageError, StorageResult};
|
||||
pub use self::store::{Store, StoreConfig, StoreEntry, StoreError, StoreIndex};
|
||||
#[cfg(not(any(test, feature = "ram_storage")))]
|
||||
pub use self::syscall::SyscallStorage;
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "std", derive(Debug))]
|
||||
pub struct Index {
|
||||
pub page: usize,
|
||||
pub byte: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum StorageError {
|
||||
BadFlash,
|
||||
NotAligned,
|
||||
OutOfBounds,
|
||||
KernelError { code: isize },
|
||||
}
|
||||
|
||||
pub type StorageResult<T> = Result<T, StorageError>;
|
||||
|
||||
/// Abstraction for embedded flash storage.
|
||||
pub trait Storage {
|
||||
/// Returns the size of a word in bytes.
|
||||
fn word_size(&self) -> usize;
|
||||
|
||||
/// Returns the size of a page in bytes.
|
||||
fn page_size(&self) -> usize;
|
||||
|
||||
/// Returns the number of pages in the storage.
|
||||
fn num_pages(&self) -> usize;
|
||||
|
||||
/// Returns how many times a word can be written between page erasures.
|
||||
fn max_word_writes(&self) -> usize;
|
||||
|
||||
/// Returns how many times a page can be erased in the lifetime of the flash.
|
||||
fn max_page_erases(&self) -> usize;
|
||||
|
||||
/// Reads a slice from the storage.
|
||||
///
|
||||
/// The slice does not need to be word-aligned.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// The `index` must designate `length` bytes in the storage.
|
||||
fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]>;
|
||||
|
||||
/// Writes a word-aligned slice to the storage.
|
||||
///
|
||||
/// The written words should not have been written too many times since last page erasure.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// The following preconditions must hold:
|
||||
/// - `index` must be word-aligned.
|
||||
/// - `value.len()` must be a multiple of the word size.
|
||||
/// - `index` must designate `value.len()` bytes in the storage.
|
||||
/// - `value` must be in memory until [read-only allow][tock_1274] is resolved.
|
||||
///
|
||||
/// [tock_1274]: https://github.com/tock/tock/issues/1274.
|
||||
fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()>;
|
||||
|
||||
/// Erases a page of the storage.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// The `page` must be in the storage.
|
||||
fn erase_page(&mut self, page: usize) -> StorageResult<()>;
|
||||
}
|
||||
|
||||
impl Index {
|
||||
/// Returns whether a slice fits in a storage page.
|
||||
fn is_valid(self, length: usize, storage: &impl Storage) -> bool {
|
||||
self.page < storage.num_pages()
|
||||
&& storage
|
||||
.page_size()
|
||||
.checked_sub(length)
|
||||
.map(|limit| self.byte <= limit)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns the range of a valid slice.
|
||||
///
|
||||
/// The range starts at `self` with `length` bytes.
|
||||
pub fn range(
|
||||
self,
|
||||
length: usize,
|
||||
storage: &impl Storage,
|
||||
) -> StorageResult<core::ops::Range<usize>> {
|
||||
if self.is_valid(length, storage) {
|
||||
let start = self.page * storage.page_size() + self.byte;
|
||||
Ok(start..start + length)
|
||||
} else {
|
||||
Err(StorageError::OutOfBounds)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Defines a consecutive sequence of bits.
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct BitRange {
|
||||
/// The first bit of the sequence.
|
||||
pub start: usize,
|
||||
|
||||
/// The length in bits of the sequence.
|
||||
pub length: usize,
|
||||
}
|
||||
|
||||
impl BitRange {
|
||||
/// Returns the first bit following a bit range.
|
||||
pub fn end(self) -> usize {
|
||||
self.start + self.length
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines a consecutive sequence of bytes.
|
||||
///
|
||||
/// The bits in those bytes are ignored which essentially creates a gap in a sequence of bits. The
|
||||
/// gap is necessarily at byte boundaries. This is used to ignore the user data in an entry
|
||||
/// essentially providing a view of the entry information (header and footer).
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct ByteGap {
|
||||
pub start: usize,
|
||||
pub length: usize,
|
||||
}
|
||||
|
||||
/// Empty gap. All bits count.
|
||||
pub const NO_GAP: ByteGap = ByteGap {
|
||||
start: 0,
|
||||
length: 0,
|
||||
};
|
||||
|
||||
impl ByteGap {
|
||||
/// Translates a bit to skip the gap.
|
||||
fn shift(self, bit: usize) -> usize {
|
||||
if bit < 8 * self.start {
|
||||
bit
|
||||
} else {
|
||||
bit + 8 * self.length
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the slice of `data` corresponding to the gap.
|
||||
pub fn slice(self, data: &[u8]) -> &[u8] {
|
||||
&data[self.start..self.start + self.length]
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether a bit is set in a sequence of bits.
|
||||
///
|
||||
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
|
||||
/// are in `data` but not in `gap`.
|
||||
pub fn is_zero(bit: usize, data: &[u8], gap: ByteGap) -> bool {
|
||||
let bit = gap.shift(bit);
|
||||
debug_assert!(bit < 8 * data.len());
|
||||
data[bit / 8] & (1 << (bit % 8)) == 0
|
||||
}
|
||||
|
||||
/// Sets a bit to zero in a sequence of bits.
|
||||
///
|
||||
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
|
||||
/// are in `data` but not in `gap`.
|
||||
pub fn set_zero(bit: usize, data: &mut [u8], gap: ByteGap) {
|
||||
let bit = gap.shift(bit);
|
||||
debug_assert!(bit < 8 * data.len());
|
||||
data[bit / 8] &= !(1 << (bit % 8));
|
||||
}
|
||||
|
||||
/// Returns a little-endian value in a sequence of bits.
|
||||
///
|
||||
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
|
||||
/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by
|
||||
/// `range`. The value must fit in a `usize`.
|
||||
pub fn get_range(range: BitRange, data: &[u8], gap: ByteGap) -> usize {
|
||||
debug_assert!(range.length <= 8 * core::mem::size_of::<usize>());
|
||||
let mut result = 0;
|
||||
for i in 0..range.length {
|
||||
if !is_zero(range.start + i, data, gap) {
|
||||
result |= 1 << i;
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Sets a little-endian value in a sequence of bits.
|
||||
///
|
||||
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
|
||||
/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by
|
||||
/// `range`. The bits set to 1 in `value` must also be set to `1` in the sequence of bits.
|
||||
pub fn set_range(range: BitRange, data: &mut [u8], gap: ByteGap, value: usize) {
|
||||
debug_assert!(range.length == 8 * core::mem::size_of::<usize>() || value < 1 << range.length);
|
||||
for i in 0..range.length {
|
||||
if value & 1 << i == 0 {
|
||||
set_zero(range.start + i, data, gap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tests the `is_zero` and `set_zero` pair of functions.
|
||||
#[test]
|
||||
fn zero_ok() {
|
||||
const GAP: ByteGap = ByteGap {
|
||||
start: 2,
|
||||
length: 1,
|
||||
};
|
||||
for i in 0..24 {
|
||||
assert!(!is_zero(i, &[0xffu8, 0xff, 0x00, 0xff] as &[u8], GAP));
|
||||
}
|
||||
// Tests reading and setting a bit. The result should have all bits set to 1 except for the bit
|
||||
// to test and the gap.
|
||||
fn test(bit: usize, result: &[u8]) {
|
||||
assert!(is_zero(bit, result, GAP));
|
||||
let mut data = vec![0xff; result.len()];
|
||||
// Set the gap bits to 0.
|
||||
for i in 0..GAP.length {
|
||||
data[GAP.start + i] = 0x00;
|
||||
}
|
||||
set_zero(bit, &mut data, GAP);
|
||||
assert_eq!(data, result);
|
||||
}
|
||||
test(0, &[0xfe, 0xff, 0x00, 0xff]);
|
||||
test(1, &[0xfd, 0xff, 0x00, 0xff]);
|
||||
test(2, &[0xfb, 0xff, 0x00, 0xff]);
|
||||
test(7, &[0x7f, 0xff, 0x00, 0xff]);
|
||||
test(8, &[0xff, 0xfe, 0x00, 0xff]);
|
||||
test(15, &[0xff, 0x7f, 0x00, 0xff]);
|
||||
test(16, &[0xff, 0xff, 0x00, 0xfe]);
|
||||
test(17, &[0xff, 0xff, 0x00, 0xfd]);
|
||||
test(23, &[0xff, 0xff, 0x00, 0x7f]);
|
||||
}
|
||||
|
||||
/// Tests the `get_range` and `set_range` pair of functions.
|
||||
#[test]
|
||||
fn range_ok() {
|
||||
// Tests reading and setting a range. The result should have all bits set to 1 except for the
|
||||
// range to test and the gap.
|
||||
fn test(start: usize, length: usize, value: usize, result: &[u8], gap: ByteGap) {
|
||||
let range = BitRange { start, length };
|
||||
assert_eq!(get_range(range, result, gap), value);
|
||||
let mut data = vec![0xff; result.len()];
|
||||
for i in 0..gap.length {
|
||||
data[gap.start + i] = 0x00;
|
||||
}
|
||||
set_range(range, &mut data, gap, value);
|
||||
assert_eq!(data, result);
|
||||
}
|
||||
test(0, 8, 42, &[42], NO_GAP);
|
||||
test(3, 12, 0b11_0101, &[0b1010_1111, 0b1000_0001], NO_GAP);
|
||||
test(0, 16, 0x1234, &[0x34, 0x12], NO_GAP);
|
||||
test(4, 16, 0x1234, &[0x4f, 0x23, 0xf1], NO_GAP);
|
||||
let mut gap = ByteGap {
|
||||
start: 1,
|
||||
length: 1,
|
||||
};
|
||||
test(3, 12, 0b11_0101, &[0b1010_1111, 0x00, 0b1000_0001], gap);
|
||||
gap.length = 2;
|
||||
test(0, 16, 0x1234, &[0x34, 0x00, 0x00, 0x12], gap);
|
||||
gap.start = 2;
|
||||
gap.length = 1;
|
||||
test(4, 16, 0x1234, &[0x4f, 0x23, 0x00, 0xf1], gap);
|
||||
}
|
||||
@@ -1,565 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::super::{Index, Storage};
|
||||
use super::{bitfield, StoreConfig, StoreEntry, StoreError};
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
/// Whether a user entry is a replace entry.
|
||||
pub enum IsReplace {
|
||||
/// This is a replace entry.
|
||||
Replace,
|
||||
|
||||
/// This is an insert entry.
|
||||
Insert,
|
||||
}
|
||||
|
||||
/// Helpers to parse the store format.
|
||||
///
|
||||
/// See the store module-level documentation for information about the format.
|
||||
pub struct Format {
|
||||
pub word_size: usize,
|
||||
pub page_size: usize,
|
||||
pub num_pages: usize,
|
||||
pub max_page_erases: usize,
|
||||
pub num_tags: usize,
|
||||
|
||||
/// Whether an entry is present.
|
||||
///
|
||||
/// - 0 for entries (user entries or internal entries).
|
||||
/// - 1 for free space until the end of the page.
|
||||
present_bit: usize,
|
||||
|
||||
/// Whether an entry is deleted.
|
||||
///
|
||||
/// - 0 for deleted entries.
|
||||
/// - 1 for alive entries.
|
||||
deleted_bit: usize,
|
||||
|
||||
/// Whether an entry is internal.
|
||||
///
|
||||
/// - 0 for internal entries.
|
||||
/// - 1 for user entries.
|
||||
internal_bit: usize,
|
||||
|
||||
/// Whether a user entry is a replace entry.
|
||||
///
|
||||
/// - 0 for replace entries.
|
||||
/// - 1 for insert entries.
|
||||
replace_bit: usize,
|
||||
|
||||
/// Whether a user entry has sensitive data.
|
||||
///
|
||||
/// - 0 for sensitive data.
|
||||
/// - 1 for non-sensitive data.
|
||||
///
|
||||
/// When a user entry with sensitive data is deleted, the data is overwritten with zeroes. This
|
||||
/// feature is subject to the same guarantees as all other features of the store, in particular
|
||||
/// deleting a sensitive entry is atomic. See the store module-level documentation for more
|
||||
/// information.
|
||||
sensitive_bit: usize,
|
||||
|
||||
/// The data length of a user entry.
|
||||
length_range: bitfield::BitRange,
|
||||
|
||||
/// The tag of a user entry.
|
||||
tag_range: bitfield::BitRange,
|
||||
|
||||
/// The page index of a replace entry.
|
||||
replace_page_range: bitfield::BitRange,
|
||||
|
||||
/// The byte index of a replace entry.
|
||||
replace_byte_range: bitfield::BitRange,
|
||||
|
||||
/// The index of the page to erase.
|
||||
///
|
||||
/// This is only present for internal entries.
|
||||
old_page_range: bitfield::BitRange,
|
||||
|
||||
/// The current erase count of the page to erase.
|
||||
///
|
||||
/// This is only present for internal entries.
|
||||
saved_erase_count_range: bitfield::BitRange,
|
||||
|
||||
/// Whether a page is initialized.
|
||||
///
|
||||
/// - 0 for initialized pages.
|
||||
/// - 1 for uninitialized pages.
|
||||
initialized_bit: usize,
|
||||
|
||||
/// The erase count of a page.
|
||||
erase_count_range: bitfield::BitRange,
|
||||
|
||||
/// Whether a page is being compacted.
|
||||
///
|
||||
/// - 0 for pages being compacted.
|
||||
/// - 1 otherwise.
|
||||
compacting_bit: usize,
|
||||
|
||||
/// The page index to which a page is being compacted.
|
||||
new_page_range: bitfield::BitRange,
|
||||
}
|
||||
|
||||
impl Format {
|
||||
/// Returns a helper to parse the store format for a given storage and config.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `None` if any of the following conditions does not hold:
|
||||
/// - The word size must be a power of two.
|
||||
/// - The page size must be a power of two.
|
||||
/// - There should be at least 2 pages in the storage.
|
||||
/// - It should be possible to write a word at least twice.
|
||||
/// - It should be possible to erase a page at least once.
|
||||
/// - There should be at least 1 tag.
|
||||
pub fn new<S: Storage, C: StoreConfig>(storage: &S, config: &C) -> Option<Format> {
|
||||
let word_size = storage.word_size();
|
||||
let page_size = storage.page_size();
|
||||
let num_pages = storage.num_pages();
|
||||
let max_word_writes = storage.max_word_writes();
|
||||
let max_page_erases = storage.max_page_erases();
|
||||
let num_tags = config.num_tags();
|
||||
if !(word_size.is_power_of_two()
|
||||
&& page_size.is_power_of_two()
|
||||
&& num_pages > 1
|
||||
&& max_word_writes >= 2
|
||||
&& max_page_erases > 0
|
||||
&& num_tags > 0)
|
||||
{
|
||||
return None;
|
||||
}
|
||||
// Compute how many bits we need to store the fields.
|
||||
let page_bits = num_bits(num_pages);
|
||||
let byte_bits = num_bits(page_size);
|
||||
let tag_bits = num_bits(num_tags);
|
||||
let erase_bits = num_bits(max_page_erases + 1);
|
||||
// Compute the bit position of the fields.
|
||||
let present_bit = 0;
|
||||
let deleted_bit = present_bit + 1;
|
||||
let internal_bit = deleted_bit + 1;
|
||||
let replace_bit = internal_bit + 1;
|
||||
let sensitive_bit = replace_bit + 1;
|
||||
let length_range = bitfield::BitRange {
|
||||
start: sensitive_bit + 1,
|
||||
length: byte_bits,
|
||||
};
|
||||
let tag_range = bitfield::BitRange {
|
||||
start: length_range.end(),
|
||||
length: tag_bits,
|
||||
};
|
||||
let replace_page_range = bitfield::BitRange {
|
||||
start: tag_range.end(),
|
||||
length: page_bits,
|
||||
};
|
||||
let replace_byte_range = bitfield::BitRange {
|
||||
start: replace_page_range.end(),
|
||||
length: byte_bits,
|
||||
};
|
||||
let old_page_range = bitfield::BitRange {
|
||||
start: internal_bit + 1,
|
||||
length: page_bits,
|
||||
};
|
||||
let saved_erase_count_range = bitfield::BitRange {
|
||||
start: old_page_range.end(),
|
||||
length: erase_bits,
|
||||
};
|
||||
let initialized_bit = 0;
|
||||
let erase_count_range = bitfield::BitRange {
|
||||
start: initialized_bit + 1,
|
||||
length: erase_bits,
|
||||
};
|
||||
let compacting_bit = erase_count_range.end();
|
||||
let new_page_range = bitfield::BitRange {
|
||||
start: compacting_bit + 1,
|
||||
length: page_bits,
|
||||
};
|
||||
let format = Format {
|
||||
word_size,
|
||||
page_size,
|
||||
num_pages,
|
||||
max_page_erases,
|
||||
num_tags,
|
||||
present_bit,
|
||||
deleted_bit,
|
||||
internal_bit,
|
||||
replace_bit,
|
||||
sensitive_bit,
|
||||
length_range,
|
||||
tag_range,
|
||||
replace_page_range,
|
||||
replace_byte_range,
|
||||
old_page_range,
|
||||
saved_erase_count_range,
|
||||
initialized_bit,
|
||||
erase_count_range,
|
||||
compacting_bit,
|
||||
new_page_range,
|
||||
};
|
||||
// Make sure all the following conditions hold:
|
||||
// - The page header is one word.
|
||||
// - The internal entry is one word.
|
||||
// - The entry header fits in one word (which is equivalent to the entry header size being
|
||||
// exactly one word for sensitive entries).
|
||||
if format.page_header_size() != word_size
|
||||
|| format.internal_entry_size() != word_size
|
||||
|| format.header_size(true) != word_size
|
||||
{
|
||||
return None;
|
||||
}
|
||||
Some(format)
|
||||
}
|
||||
|
||||
/// Ensures a user entry is valid.
|
||||
pub fn validate_entry(&self, entry: StoreEntry) -> Result<(), StoreError> {
|
||||
if entry.tag >= self.num_tags {
|
||||
return Err(StoreError::InvalidTag);
|
||||
}
|
||||
if entry.data.len() >= self.page_size {
|
||||
return Err(StoreError::StoreFull);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the entry header length in bytes.
|
||||
///
|
||||
/// This is the smallest number of bytes necessary to store all fields of the entry info up to
|
||||
/// and including `length`. For sensitive entries, the result is word-aligned.
|
||||
pub fn header_size(&self, sensitive: bool) -> usize {
|
||||
let mut size = self.bits_to_bytes(self.length_range.end());
|
||||
if sensitive {
|
||||
// We need to align to the next word boundary so that wiping the user data will not
|
||||
// count as a write to the header.
|
||||
size = self.align_word(size);
|
||||
}
|
||||
size
|
||||
}
|
||||
|
||||
/// Returns the entry header length in bytes.
|
||||
///
|
||||
/// This is a convenience function for `header_size` above.
|
||||
fn header_offset(&self, entry: &[u8]) -> usize {
|
||||
self.header_size(self.is_sensitive(entry))
|
||||
}
|
||||
|
||||
/// Returns the entry info length in bytes.
|
||||
///
|
||||
/// This is the number of bytes necessary to store all fields of the entry info. This also
|
||||
/// includes the internal padding to protect the `committed` bit from the `deleted` bit and to
|
||||
/// protect the entry info from the user data for sensitive entries.
|
||||
fn info_size(&self, is_replace: IsReplace, sensitive: bool) -> usize {
|
||||
let suffix_bits = 2; // committed + complete
|
||||
let info_bits = match is_replace {
|
||||
IsReplace::Replace => self.replace_byte_range.end() + suffix_bits,
|
||||
IsReplace::Insert => self.tag_range.end() + suffix_bits,
|
||||
};
|
||||
let mut info_size = self.bits_to_bytes(info_bits);
|
||||
// If the suffix bits would end up in the header, we need to add one byte for them.
|
||||
let header_size = self.header_size(sensitive);
|
||||
if info_size <= header_size {
|
||||
info_size = header_size + 1;
|
||||
}
|
||||
// If the entry is sensitive, we need to align to the next word boundary.
|
||||
if sensitive {
|
||||
info_size = self.align_word(info_size);
|
||||
}
|
||||
info_size
|
||||
}
|
||||
|
||||
/// Returns the length in bytes of an entry.
|
||||
///
|
||||
/// This depends on the length of the user data and whether the entry replaces an old entry or
|
||||
/// is an insertion. This also includes the internal padding to protect the `committed` bit from
|
||||
/// the `deleted` bit.
|
||||
pub fn entry_size(&self, is_replace: IsReplace, sensitive: bool, length: usize) -> usize {
|
||||
let mut entry_size = length + self.info_size(is_replace, sensitive);
|
||||
let word_size = self.word_size;
|
||||
entry_size = self.align_word(entry_size);
|
||||
// The entry must be at least 2 words such that the `committed` and `deleted` bits are on
|
||||
// different words.
|
||||
if entry_size == word_size {
|
||||
entry_size += word_size;
|
||||
}
|
||||
entry_size
|
||||
}
|
||||
|
||||
/// Returns the length in bytes of an internal entry.
|
||||
pub fn internal_entry_size(&self) -> usize {
|
||||
let length = self.bits_to_bytes(self.saved_erase_count_range.end());
|
||||
self.align_word(length)
|
||||
}
|
||||
|
||||
pub fn is_present(&self, header: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.present_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_present(&self, header: &mut [u8]) {
|
||||
bitfield::set_zero(self.present_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn is_deleted(&self, header: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.deleted_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
/// Returns whether an entry is present and not deleted.
|
||||
pub fn is_alive(&self, header: &[u8]) -> bool {
|
||||
self.is_present(header) && !self.is_deleted(header)
|
||||
}
|
||||
|
||||
pub fn set_deleted(&self, header: &mut [u8]) {
|
||||
bitfield::set_zero(self.deleted_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn is_internal(&self, header: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.internal_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_internal(&self, header: &mut [u8]) {
|
||||
bitfield::set_zero(self.internal_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn is_replace(&self, header: &[u8]) -> IsReplace {
|
||||
if bitfield::is_zero(self.replace_bit, header, bitfield::NO_GAP) {
|
||||
IsReplace::Replace
|
||||
} else {
|
||||
IsReplace::Insert
|
||||
}
|
||||
}
|
||||
|
||||
fn set_replace(&self, header: &mut [u8]) {
|
||||
bitfield::set_zero(self.replace_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn is_sensitive(&self, header: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.sensitive_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_sensitive(&self, header: &mut [u8]) {
|
||||
bitfield::set_zero(self.sensitive_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn get_length(&self, header: &[u8]) -> usize {
|
||||
bitfield::get_range(self.length_range, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
fn set_length(&self, header: &mut [u8], length: usize) {
|
||||
bitfield::set_range(self.length_range, header, bitfield::NO_GAP, length)
|
||||
}
|
||||
|
||||
pub fn get_data<'a>(&self, entry: &'a [u8]) -> &'a [u8] {
|
||||
&entry[self.header_offset(entry)..][..self.get_length(entry)]
|
||||
}
|
||||
|
||||
/// Returns the span of user data in an entry.
|
||||
///
|
||||
/// The complement of this gap in the entry is exactly the entry info. The header is before the
|
||||
/// gap and the footer is after the gap.
|
||||
pub fn entry_gap(&self, entry: &[u8]) -> bitfield::ByteGap {
|
||||
let start = self.header_offset(entry);
|
||||
let mut length = self.get_length(entry);
|
||||
if self.is_sensitive(entry) {
|
||||
length = self.align_word(length);
|
||||
}
|
||||
bitfield::ByteGap { start, length }
|
||||
}
|
||||
|
||||
pub fn get_tag(&self, entry: &[u8]) -> usize {
|
||||
bitfield::get_range(self.tag_range, entry, self.entry_gap(entry))
|
||||
}
|
||||
|
||||
fn set_tag(&self, entry: &mut [u8], tag: usize) {
|
||||
bitfield::set_range(self.tag_range, entry, self.entry_gap(entry), tag)
|
||||
}
|
||||
|
||||
pub fn get_replace_index(&self, entry: &[u8]) -> Index {
|
||||
let gap = self.entry_gap(entry);
|
||||
let page = bitfield::get_range(self.replace_page_range, entry, gap);
|
||||
let byte = bitfield::get_range(self.replace_byte_range, entry, gap);
|
||||
Index { page, byte }
|
||||
}
|
||||
|
||||
fn set_replace_page(&self, entry: &mut [u8], page: usize) {
|
||||
bitfield::set_range(self.replace_page_range, entry, self.entry_gap(entry), page)
|
||||
}
|
||||
|
||||
fn set_replace_byte(&self, entry: &mut [u8], byte: usize) {
|
||||
bitfield::set_range(self.replace_byte_range, entry, self.entry_gap(entry), byte)
|
||||
}
|
||||
|
||||
/// Returns the bit position of the `committed` bit.
|
||||
///
|
||||
/// This cannot be precomputed like other fields since it depends on the length of the entry.
|
||||
fn committed_bit(&self, entry: &[u8]) -> usize {
|
||||
8 * entry.len() - 2
|
||||
}
|
||||
|
||||
/// Returns the bit position of the `complete` bit.
|
||||
///
|
||||
/// This cannot be precomputed like other fields since it depends on the length of the entry.
|
||||
fn complete_bit(&self, entry: &[u8]) -> usize {
|
||||
8 * entry.len() - 1
|
||||
}
|
||||
|
||||
pub fn is_committed(&self, entry: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.committed_bit(entry), entry, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_committed(&self, entry: &mut [u8]) {
|
||||
bitfield::set_zero(self.committed_bit(entry), entry, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn is_complete(&self, entry: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.complete_bit(entry), entry, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
fn set_complete(&self, entry: &mut [u8]) {
|
||||
bitfield::set_zero(self.complete_bit(entry), entry, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn get_old_page(&self, header: &[u8]) -> usize {
|
||||
bitfield::get_range(self.old_page_range, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_old_page(&self, header: &mut [u8], old_page: usize) {
|
||||
bitfield::set_range(self.old_page_range, header, bitfield::NO_GAP, old_page)
|
||||
}
|
||||
|
||||
pub fn get_saved_erase_count(&self, header: &[u8]) -> usize {
|
||||
bitfield::get_range(self.saved_erase_count_range, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_saved_erase_count(&self, header: &mut [u8], erase_count: usize) {
|
||||
bitfield::set_range(
|
||||
self.saved_erase_count_range,
|
||||
header,
|
||||
bitfield::NO_GAP,
|
||||
erase_count,
|
||||
)
|
||||
}
|
||||
|
||||
/// Builds an entry for replace or insert operations.
|
||||
pub fn build_entry(&self, replace: Option<Index>, user_entry: StoreEntry) -> Vec<u8> {
|
||||
let StoreEntry {
|
||||
tag,
|
||||
data,
|
||||
sensitive,
|
||||
} = user_entry;
|
||||
let is_replace = match replace {
|
||||
None => IsReplace::Insert,
|
||||
Some(_) => IsReplace::Replace,
|
||||
};
|
||||
let entry_len = self.entry_size(is_replace, sensitive, data.len());
|
||||
let mut entry = Vec::with_capacity(entry_len);
|
||||
// Build the header.
|
||||
entry.resize(self.header_size(sensitive), 0xff);
|
||||
self.set_present(&mut entry[..]);
|
||||
if sensitive {
|
||||
self.set_sensitive(&mut entry[..]);
|
||||
}
|
||||
self.set_length(&mut entry[..], data.len());
|
||||
// Add the data.
|
||||
entry.extend_from_slice(data);
|
||||
// Build the footer.
|
||||
entry.resize(entry_len, 0xff);
|
||||
self.set_tag(&mut entry[..], tag);
|
||||
self.set_complete(&mut entry[..]);
|
||||
match replace {
|
||||
None => self.set_committed(&mut entry[..]),
|
||||
Some(Index { page, byte }) => {
|
||||
self.set_replace(&mut entry[..]);
|
||||
self.set_replace_page(&mut entry[..], page);
|
||||
self.set_replace_byte(&mut entry[..], byte);
|
||||
}
|
||||
}
|
||||
entry
|
||||
}
|
||||
|
||||
/// Builds an entry for replace or insert operations.
|
||||
pub fn build_erase_entry(&self, old_page: usize, saved_erase_count: usize) -> Vec<u8> {
|
||||
let mut entry = vec![0xff; self.internal_entry_size()];
|
||||
self.set_present(&mut entry[..]);
|
||||
self.set_internal(&mut entry[..]);
|
||||
self.set_old_page(&mut entry[..], old_page);
|
||||
self.set_saved_erase_count(&mut entry[..], saved_erase_count);
|
||||
entry
|
||||
}
|
||||
|
||||
/// Returns the length in bytes of a page header entry.
|
||||
///
|
||||
/// This includes the word padding.
|
||||
pub fn page_header_size(&self) -> usize {
|
||||
self.align_word(self.bits_to_bytes(self.erase_count_range.end()))
|
||||
}
|
||||
|
||||
pub fn is_initialized(&self, header: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.initialized_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_initialized(&self, header: &mut [u8]) {
|
||||
bitfield::set_zero(self.initialized_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn get_erase_count(&self, header: &[u8]) -> usize {
|
||||
bitfield::get_range(self.erase_count_range, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_erase_count(&self, header: &mut [u8], count: usize) {
|
||||
bitfield::set_range(self.erase_count_range, header, bitfield::NO_GAP, count)
|
||||
}
|
||||
|
||||
pub fn is_compacting(&self, header: &[u8]) -> bool {
|
||||
bitfield::is_zero(self.compacting_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_compacting(&self, header: &mut [u8]) {
|
||||
bitfield::set_zero(self.compacting_bit, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn get_new_page(&self, header: &[u8]) -> usize {
|
||||
bitfield::get_range(self.new_page_range, header, bitfield::NO_GAP)
|
||||
}
|
||||
|
||||
pub fn set_new_page(&self, header: &mut [u8], new_page: usize) {
|
||||
bitfield::set_range(self.new_page_range, header, bitfield::NO_GAP, new_page)
|
||||
}
|
||||
|
||||
/// Returns the smallest word boundary greater or equal to a value.
|
||||
fn align_word(&self, value: usize) -> usize {
|
||||
let word_size = self.word_size;
|
||||
(value + word_size - 1) / word_size * word_size
|
||||
}
|
||||
|
||||
/// Returns the minimum number of bytes to represent a given number of bits.
|
||||
fn bits_to_bytes(&self, bits: usize) -> usize {
|
||||
(bits + 7) / 8
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of bits necessary to write numbers smaller than `x`.
|
||||
fn num_bits(x: usize) -> usize {
|
||||
x.next_power_of_two().trailing_zeros() as usize
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn num_bits_ok() {
|
||||
assert_eq!(num_bits(0), 0);
|
||||
assert_eq!(num_bits(1), 0);
|
||||
assert_eq!(num_bits(2), 1);
|
||||
assert_eq!(num_bits(3), 2);
|
||||
assert_eq!(num_bits(4), 2);
|
||||
assert_eq!(num_bits(5), 3);
|
||||
assert_eq!(num_bits(8), 3);
|
||||
assert_eq!(num_bits(9), 4);
|
||||
assert_eq!(num_bits(16), 4);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
// Copyright 2019 Google LLC
|
||||
// Copyright 2019-2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -12,9 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::{Index, Storage, StorageError, StorageResult};
|
||||
use alloc::vec::Vec;
|
||||
use libtock_core::syscalls;
|
||||
use persistent_store::{Storage, StorageError, StorageIndex, StorageResult};
|
||||
|
||||
const DRIVER_NUMBER: usize = 0x50003;
|
||||
|
||||
@@ -42,15 +42,13 @@ mod memop_nr {
|
||||
|
||||
fn get_info(nr: usize, arg: usize) -> StorageResult<usize> {
|
||||
let code = syscalls::command(DRIVER_NUMBER, command_nr::GET_INFO, nr, arg);
|
||||
code.map_err(|e| StorageError::KernelError {
|
||||
code: e.return_code,
|
||||
})
|
||||
code.map_err(|_| StorageError::CustomError)
|
||||
}
|
||||
|
||||
fn memop(nr: u32, arg: usize) -> StorageResult<usize> {
|
||||
let code = unsafe { syscalls::raw::memop(nr, arg) };
|
||||
if code < 0 {
|
||||
Err(StorageError::KernelError { code })
|
||||
Err(StorageError::CustomError)
|
||||
} else {
|
||||
Ok(code as usize)
|
||||
}
|
||||
@@ -70,7 +68,7 @@ impl SyscallStorage {
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `BadFlash` if any of the following conditions do not hold:
|
||||
/// Returns `CustomError` if any of the following conditions do not hold:
|
||||
/// - The word size is a power of two.
|
||||
/// - The page size is a power of two.
|
||||
/// - The page size is a multiple of the word size.
|
||||
@@ -90,13 +88,13 @@ impl SyscallStorage {
|
||||
|| !syscall.page_size.is_power_of_two()
|
||||
|| !syscall.is_word_aligned(syscall.page_size)
|
||||
{
|
||||
return Err(StorageError::BadFlash);
|
||||
return Err(StorageError::CustomError);
|
||||
}
|
||||
for i in 0..memop(memop_nr::STORAGE_CNT, 0)? {
|
||||
let storage_ptr = memop(memop_nr::STORAGE_PTR, i)?;
|
||||
let max_storage_len = memop(memop_nr::STORAGE_LEN, i)?;
|
||||
if !syscall.is_page_aligned(storage_ptr) || !syscall.is_page_aligned(max_storage_len) {
|
||||
return Err(StorageError::BadFlash);
|
||||
return Err(StorageError::CustomError);
|
||||
}
|
||||
let storage_len = core::cmp::min(num_pages * syscall.page_size, max_storage_len);
|
||||
num_pages -= storage_len / syscall.page_size;
|
||||
@@ -141,12 +139,12 @@ impl Storage for SyscallStorage {
|
||||
self.max_page_erases
|
||||
}
|
||||
|
||||
fn read_slice(&self, index: Index, length: usize) -> StorageResult<&[u8]> {
|
||||
fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]> {
|
||||
let start = index.range(length, self)?.start;
|
||||
find_slice(&self.storage_locations, start, length)
|
||||
}
|
||||
|
||||
fn write_slice(&mut self, index: Index, value: &[u8]) -> StorageResult<()> {
|
||||
fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()> {
|
||||
if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) {
|
||||
return Err(StorageError::NotAligned);
|
||||
}
|
||||
@@ -163,28 +161,24 @@ impl Storage for SyscallStorage {
|
||||
)
|
||||
};
|
||||
if code < 0 {
|
||||
return Err(StorageError::KernelError { code });
|
||||
return Err(StorageError::CustomError);
|
||||
}
|
||||
|
||||
let code = syscalls::command(DRIVER_NUMBER, command_nr::WRITE_SLICE, ptr, value.len());
|
||||
if let Err(e) = code {
|
||||
return Err(StorageError::KernelError {
|
||||
code: e.return_code,
|
||||
});
|
||||
if code.is_err() {
|
||||
return Err(StorageError::CustomError);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn erase_page(&mut self, page: usize) -> StorageResult<()> {
|
||||
let index = Index { page, byte: 0 };
|
||||
let index = StorageIndex { page, byte: 0 };
|
||||
let length = self.page_size();
|
||||
let ptr = self.read_slice(index, length)?.as_ptr() as usize;
|
||||
let code = syscalls::command(DRIVER_NUMBER, command_nr::ERASE_PAGE, ptr, length);
|
||||
if let Err(e) = code {
|
||||
return Err(StorageError::KernelError {
|
||||
code: e.return_code,
|
||||
});
|
||||
if code.is_err() {
|
||||
return Err(StorageError::CustomError);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user