improvements to large blob storage
This commit is contained in:
@@ -60,9 +60,7 @@ const DEFAULT_MIN_PIN_LENGTH_RP_IDS: Vec<String> = Vec::new();
|
||||
// This constant is an attempt to limit storage requirements. If you don't set it to 0,
|
||||
// the stored strings can still be unbounded, but that is true for all RP IDs.
|
||||
pub const MAX_RP_IDS_LENGTH: usize = 8;
|
||||
const SHARD_SIZE: usize = 1023;
|
||||
pub const MAX_LARGE_BLOB_ARRAY_SIZE: usize =
|
||||
SHARD_SIZE * (key::LARGE_BLOB_SHARDS.end - key::LARGE_BLOB_SHARDS.start);
|
||||
pub const MAX_LARGE_BLOB_ARRAY_SIZE: usize = 2048;
|
||||
|
||||
/// Wrapper for master keys.
|
||||
pub struct MasterKeys {
|
||||
@@ -471,38 +469,55 @@ impl PersistentStore {
|
||||
)?)
|
||||
}
|
||||
|
||||
/// The size used for shards of large blobs.
|
||||
///
|
||||
/// This value is constant during the lifetime of the device.
|
||||
fn shard_size(&self) -> usize {
|
||||
self.store.max_value_length()
|
||||
}
|
||||
|
||||
/// Reads the byte vector stored as the serialized large blobs array.
|
||||
///
|
||||
/// If too few bytes exist at that offset, return the maximum number
|
||||
/// available. This includes cases of offset being beyond the stored array.
|
||||
///
|
||||
/// If no large blob is committed to the store, get responds as if an empty
|
||||
/// CBOR array (0x80) was written, together with the 16 byte prefix of its
|
||||
/// SHA256, to a total length of 17 byte (which is the shortest legitemate
|
||||
/// large blob entry possible).
|
||||
pub fn get_large_blob_array(
|
||||
&self,
|
||||
mut byte_count: usize,
|
||||
mut offset: usize,
|
||||
mut byte_count: usize,
|
||||
) -> Result<Vec<u8>, Ctap2StatusCode> {
|
||||
if self.store.find(key::LARGE_BLOB_SHARDS.start)?.is_none() {
|
||||
return Ok(vec![
|
||||
0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5,
|
||||
0x7A, 0x6D, 0x3C,
|
||||
]);
|
||||
}
|
||||
let mut output = Vec::with_capacity(byte_count);
|
||||
while byte_count > 0 {
|
||||
let shard_offset = offset % SHARD_SIZE;
|
||||
let shard_length = cmp::min(SHARD_SIZE - shard_offset, byte_count);
|
||||
|
||||
let shard_key = key::LARGE_BLOB_SHARDS.start + offset / SHARD_SIZE;
|
||||
let shard_key = key::LARGE_BLOB_SHARDS.start + offset / self.shard_size();
|
||||
if !key::LARGE_BLOB_SHARDS.contains(&shard_key) {
|
||||
// This request should have been caught at application level.
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
|
||||
}
|
||||
let shard_entry = self.store.find(shard_key)?.unwrap_or_default();
|
||||
if shard_entry.len() < shard_offset + shard_length {
|
||||
// If fewer bytes exist than requested, return them all.
|
||||
output.extend(&shard_entry[shard_offset..]);
|
||||
return Ok(output);
|
||||
let shard_entry = self.store.find(shard_key)?;
|
||||
let shard_entry = if shard_key == key::LARGE_BLOB_SHARDS.start {
|
||||
shard_entry.unwrap_or_else(|| {
|
||||
vec![
|
||||
0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D,
|
||||
0x6F, 0xA5, 0x7A, 0x6D, 0x3C,
|
||||
]
|
||||
})
|
||||
} else {
|
||||
shard_entry.unwrap_or_default()
|
||||
};
|
||||
|
||||
let shard_offset = offset % self.shard_size();
|
||||
if shard_entry.len() < shard_offset {
|
||||
break;
|
||||
}
|
||||
let shard_length = cmp::min(shard_entry.len() - shard_offset, byte_count);
|
||||
output.extend(&shard_entry[shard_offset..][..shard_length]);
|
||||
if shard_entry.len() < self.shard_size() {
|
||||
break;
|
||||
}
|
||||
output.extend(&shard_entry[shard_offset..shard_offset + shard_length]);
|
||||
offset += shard_length;
|
||||
byte_count -= shard_length;
|
||||
}
|
||||
@@ -517,22 +532,18 @@ impl PersistentStore {
|
||||
if large_blob_array.len() > MAX_LARGE_BLOB_ARRAY_SIZE {
|
||||
return Err(Ctap2StatusCode::CTAP2_ERR_VENDOR_INTERNAL_ERROR);
|
||||
}
|
||||
const MIN_SHARD_KEY: usize = key::LARGE_BLOB_SHARDS.start;
|
||||
const SHARD_COUNT: usize = key::LARGE_BLOB_SHARDS.end - MIN_SHARD_KEY;
|
||||
let mut transactions = Vec::with_capacity(SHARD_COUNT);
|
||||
for shard_key in MIN_SHARD_KEY..key::LARGE_BLOB_SHARDS.end {
|
||||
let large_blob_index = (shard_key - MIN_SHARD_KEY) * SHARD_SIZE;
|
||||
if large_blob_array.len() > large_blob_index {
|
||||
let shard_length = cmp::min(SHARD_SIZE, large_blob_array.len() - large_blob_index);
|
||||
transactions.push(StoreUpdate::Insert {
|
||||
key: shard_key,
|
||||
value: &large_blob_array[large_blob_index..large_blob_index + shard_length],
|
||||
});
|
||||
} else {
|
||||
transactions.push(StoreUpdate::Remove { key: shard_key });
|
||||
}
|
||||
|
||||
let mut shards = large_blob_array.chunks(self.shard_size());
|
||||
let mut updates = Vec::with_capacity(shards.len());
|
||||
for key in key::LARGE_BLOB_SHARDS {
|
||||
let update = match shards.next() {
|
||||
Some(value) => StoreUpdate::Insert { key, value },
|
||||
None if self.store.find(key)?.is_some() => StoreUpdate::Remove { key },
|
||||
_ => break,
|
||||
};
|
||||
updates.push(update);
|
||||
}
|
||||
Ok(self.store.transaction(&transactions)?)
|
||||
Ok(self.store.transaction(&updates)?)
|
||||
}
|
||||
|
||||
/// Returns the attestation private key if defined.
|
||||
@@ -1213,9 +1224,19 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
fn test_max_large_blob_array_size() {
|
||||
assert!(MAX_LARGE_BLOB_ARRAY_SIZE >= 1024);
|
||||
let mut rng = ThreadRng256 {};
|
||||
let persistent_store = PersistentStore::new(&mut rng);
|
||||
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
{
|
||||
assert!(MAX_LARGE_BLOB_ARRAY_SIZE >= 1024);
|
||||
}
|
||||
assert!(
|
||||
MAX_LARGE_BLOB_ARRAY_SIZE
|
||||
<= persistent_store.shard_size()
|
||||
* (key::LARGE_BLOB_SHARDS.end - key::LARGE_BLOB_SHARDS.start)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1227,25 +1248,29 @@ mod test {
|
||||
assert!(persistent_store
|
||||
.commit_large_blob_array(&large_blob_array)
|
||||
.is_ok());
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(1, 0).unwrap();
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(0, 1).unwrap();
|
||||
assert_eq!(vec![0x01], restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(1, 1).unwrap();
|
||||
assert_eq!(vec![0x02], restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(1, 2).unwrap();
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(2, 1).unwrap();
|
||||
assert_eq!(vec![0x03], restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(2, 2).unwrap();
|
||||
assert_eq!(vec![0x03], restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(3, 1).unwrap();
|
||||
assert_eq!(Vec::<u8>::new(), restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(4, 1).unwrap();
|
||||
assert_eq!(Vec::<u8>::new(), restored_large_blob_array);
|
||||
|
||||
let large_blob_array = vec![0xC0; SHARD_SIZE];
|
||||
let large_blob_array = vec![0xC0; persistent_store.shard_size()];
|
||||
assert!(persistent_store
|
||||
.commit_large_blob_array(&large_blob_array)
|
||||
.is_ok());
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE, 0)
|
||||
.get_large_blob_array(0, persistent_store.shard_size())
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE + 1, 0)
|
||||
.get_large_blob_array(0, persistent_store.shard_size() + 1)
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
}
|
||||
@@ -1255,32 +1280,32 @@ mod test {
|
||||
let mut rng = ThreadRng256 {};
|
||||
let mut persistent_store = PersistentStore::new(&mut rng);
|
||||
|
||||
let large_blob_array = vec![0xC0; SHARD_SIZE + 1];
|
||||
let large_blob_array = vec![0xC0; persistent_store.shard_size() + 1];
|
||||
assert!(persistent_store
|
||||
.commit_large_blob_array(&large_blob_array)
|
||||
.is_ok());
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE, 0)
|
||||
.get_large_blob_array(0, persistent_store.shard_size())
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
large_blob_array[..SHARD_SIZE],
|
||||
large_blob_array[..persistent_store.shard_size()],
|
||||
restored_large_blob_array[..]
|
||||
);
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE + 1, 0)
|
||||
.get_large_blob_array(0, persistent_store.shard_size() + 1)
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
|
||||
let large_blob_array = vec![0xC0; 2 * SHARD_SIZE];
|
||||
let large_blob_array = vec![0xC0; 2 * persistent_store.shard_size()];
|
||||
assert!(persistent_store
|
||||
.commit_large_blob_array(&large_blob_array)
|
||||
.is_ok());
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(2 * SHARD_SIZE, 0)
|
||||
.get_large_blob_array(0, 2 * persistent_store.shard_size())
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(2 * SHARD_SIZE + 1, 0)
|
||||
.get_large_blob_array(0, 2 * persistent_store.shard_size() + 1)
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
}
|
||||
@@ -1290,37 +1315,46 @@ mod test {
|
||||
let mut rng = ThreadRng256 {};
|
||||
let mut persistent_store = PersistentStore::new(&mut rng);
|
||||
|
||||
let mut large_blob_array = vec![0x11; SHARD_SIZE];
|
||||
large_blob_array.extend([0x22; SHARD_SIZE].iter());
|
||||
large_blob_array.extend([0x33; 1].iter());
|
||||
let mut large_blob_array = vec![0x11; persistent_store.shard_size()];
|
||||
large_blob_array.extend(vec![0x22; persistent_store.shard_size()]);
|
||||
large_blob_array.extend(&[0x33; 1]);
|
||||
assert!(persistent_store
|
||||
.commit_large_blob_array(&large_blob_array)
|
||||
.is_ok());
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(2 * SHARD_SIZE + 1, 0)
|
||||
.get_large_blob_array(0, 2 * persistent_store.shard_size() + 1)
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(3 * SHARD_SIZE, 0)
|
||||
.get_large_blob_array(0, 3 * persistent_store.shard_size())
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
let shard1 = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE, 0)
|
||||
.get_large_blob_array(0, persistent_store.shard_size())
|
||||
.unwrap();
|
||||
let shard2 = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE, SHARD_SIZE)
|
||||
.get_large_blob_array(persistent_store.shard_size(), persistent_store.shard_size())
|
||||
.unwrap();
|
||||
let shard3 = persistent_store
|
||||
.get_large_blob_array(1, 2 * SHARD_SIZE)
|
||||
.get_large_blob_array(2 * persistent_store.shard_size(), 1)
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array[..SHARD_SIZE], shard1[..]);
|
||||
assert_eq!(large_blob_array[SHARD_SIZE..2 * SHARD_SIZE], shard2[..]);
|
||||
assert_eq!(large_blob_array[2 * SHARD_SIZE..], shard3[..]);
|
||||
assert_eq!(
|
||||
large_blob_array[..persistent_store.shard_size()],
|
||||
shard1[..]
|
||||
);
|
||||
assert_eq!(
|
||||
large_blob_array[persistent_store.shard_size()..2 * persistent_store.shard_size()],
|
||||
shard2[..]
|
||||
);
|
||||
assert_eq!(
|
||||
large_blob_array[2 * persistent_store.shard_size()..],
|
||||
shard3[..]
|
||||
);
|
||||
let shard12 = persistent_store
|
||||
.get_large_blob_array(2, SHARD_SIZE - 1)
|
||||
.get_large_blob_array(persistent_store.shard_size() - 1, 2)
|
||||
.unwrap();
|
||||
let shard23 = persistent_store
|
||||
.get_large_blob_array(2, 2 * SHARD_SIZE - 1)
|
||||
.get_large_blob_array(2 * persistent_store.shard_size() - 1, 2)
|
||||
.unwrap();
|
||||
assert_eq!(vec![0x11, 0x22], shard12);
|
||||
assert_eq!(vec![0x22, 0x33], shard23);
|
||||
@@ -1331,32 +1365,48 @@ mod test {
|
||||
let mut rng = ThreadRng256 {};
|
||||
let mut persistent_store = PersistentStore::new(&mut rng);
|
||||
|
||||
let large_blob_array = vec![0x11; SHARD_SIZE + 1];
|
||||
let large_blob_array = vec![0x11; persistent_store.shard_size() + 1];
|
||||
assert!(persistent_store
|
||||
.commit_large_blob_array(&large_blob_array)
|
||||
.is_ok());
|
||||
let large_blob_array = vec![0x22; SHARD_SIZE];
|
||||
let large_blob_array = vec![0x22; persistent_store.shard_size()];
|
||||
assert!(persistent_store
|
||||
.commit_large_blob_array(&large_blob_array)
|
||||
.is_ok());
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE + 1, 0)
|
||||
.get_large_blob_array(0, persistent_store.shard_size() + 1)
|
||||
.unwrap();
|
||||
assert_eq!(large_blob_array, restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(1, SHARD_SIZE)
|
||||
.get_large_blob_array(persistent_store.shard_size(), 1)
|
||||
.unwrap();
|
||||
assert_eq!(Vec::<u8>::new(), restored_large_blob_array);
|
||||
|
||||
assert!(persistent_store.commit_large_blob_array(&[]).is_ok());
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(SHARD_SIZE + 1, 0)
|
||||
.get_large_blob_array(0, persistent_store.shard_size() + 1)
|
||||
.unwrap();
|
||||
// Committing an empty array resets to the default blob of 17 byte.
|
||||
assert_eq!(restored_large_blob_array.len(), 17);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit_get_large_blob_array_no_commit() {
|
||||
let mut rng = ThreadRng256 {};
|
||||
let persistent_store = PersistentStore::new(&mut rng);
|
||||
|
||||
let empty_blob_array = vec![
|
||||
0x80, 0x76, 0xbe, 0x8b, 0x52, 0x8d, 0x00, 0x75, 0xf7, 0xaa, 0xe9, 0x8d, 0x6f, 0xa5,
|
||||
0x7a, 0x6d, 0x3c,
|
||||
0x80, 0x76, 0xBE, 0x8B, 0x52, 0x8D, 0x00, 0x75, 0xF7, 0xAA, 0xE9, 0x8D, 0x6F, 0xA5,
|
||||
0x7A, 0x6D, 0x3C,
|
||||
];
|
||||
let restored_large_blob_array = persistent_store
|
||||
.get_large_blob_array(0, persistent_store.shard_size())
|
||||
.unwrap();
|
||||
assert_eq!(empty_blob_array, restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(0, 1).unwrap();
|
||||
assert_eq!(vec![0x80], restored_large_blob_array);
|
||||
let restored_large_blob_array = persistent_store.get_large_blob_array(16, 1).unwrap();
|
||||
assert_eq!(vec![0x3C], restored_large_blob_array);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
Reference in New Issue
Block a user