46
.travis.yml
46
.travis.yml
@@ -1,46 +0,0 @@
|
|||||||
# Copyright 2019 Google LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
language: rust
|
|
||||||
rust:
|
|
||||||
- nightly-2020-01-16
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- osx
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- "python3"
|
|
||||||
- "python3-pip"
|
|
||||||
- "python3-setuptools"
|
|
||||||
- "python3-wheel"
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- rust
|
|
||||||
- cargo
|
|
||||||
|
|
||||||
before-install:
|
|
||||||
- openssl version
|
|
||||||
|
|
||||||
install:
|
|
||||||
- rustup target add thumbv7em-none-eabi
|
|
||||||
- rustup component add rustfmt
|
|
||||||
- cargo install cargo-audit
|
|
||||||
|
|
||||||
script:
|
|
||||||
- ./setup.sh
|
|
||||||
- cargo audit
|
|
||||||
- ./run_desktop_tests.sh
|
|
||||||
@@ -28,6 +28,7 @@ ram_storage = []
|
|||||||
verbose = ["debug_ctap", "libtock_drivers/verbose_usb"]
|
verbose = ["debug_ctap", "libtock_drivers/verbose_usb"]
|
||||||
with_ctap1 = ["crypto/with_ctap1"]
|
with_ctap1 = ["crypto/with_ctap1"]
|
||||||
with_ctap2_1 = []
|
with_ctap2_1 = []
|
||||||
|
with_nfc = ["libtock_drivers/with_nfc"]
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
elf2tab = "0.6.0"
|
elf2tab = "0.6.0"
|
||||||
|
|||||||
14
deploy.py
14
deploy.py
@@ -846,6 +846,13 @@ if __name__ == "__main__":
|
|||||||
help=("Compiles the OpenSK application with backward compatible "
|
help=("Compiles the OpenSK application with backward compatible "
|
||||||
"support for CTAP2.1 protocol."),
|
"support for CTAP2.1 protocol."),
|
||||||
)
|
)
|
||||||
|
main_parser.add_argument(
|
||||||
|
"--nfc",
|
||||||
|
action="append_const",
|
||||||
|
const="with_nfc",
|
||||||
|
dest="features",
|
||||||
|
help=("Compiles the OpenSK application with support for nfc."),
|
||||||
|
)
|
||||||
main_parser.add_argument(
|
main_parser.add_argument(
|
||||||
"--regen-keys",
|
"--regen-keys",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
@@ -921,13 +928,6 @@ if __name__ == "__main__":
|
|||||||
const="console_test",
|
const="console_test",
|
||||||
help=("Compiles and installs the console_test example that tests the "
|
help=("Compiles and installs the console_test example that tests the "
|
||||||
"console driver with messages of various lengths."))
|
"console driver with messages of various lengths."))
|
||||||
apps_group.add_argument(
|
|
||||||
"--nfct_test",
|
|
||||||
dest="application",
|
|
||||||
action="store_const",
|
|
||||||
const="nfct_test",
|
|
||||||
help=("Compiles and installs the nfct_test example that tests the "
|
|
||||||
"NFC driver."))
|
|
||||||
|
|
||||||
main_parser.set_defaults(features=["with_ctap1"])
|
main_parser.set_defaults(features=["with_ctap1"])
|
||||||
|
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
#![no_std]
|
|
||||||
|
|
||||||
extern crate alloc;
|
|
||||||
extern crate lang_items;
|
|
||||||
|
|
||||||
use core::fmt::Write;
|
|
||||||
use libtock_drivers::console::Console;
|
|
||||||
use libtock_drivers::nfc::NfcTag;
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
/// Helper function to write a slice into a fixed
|
|
||||||
/// length transmission buffer.
|
|
||||||
fn write_tx_buffer(buf: &mut [u8], slice: &[u8]) {
|
|
||||||
for (i, &byte) in slice.iter().enumerate() {
|
|
||||||
buf[i] = byte;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let mut console = Console::new();
|
|
||||||
|
|
||||||
writeln!(console, "****************************************").unwrap();
|
|
||||||
writeln!(console, "nfct_test application is installed").unwrap();
|
|
||||||
|
|
||||||
// 1. Configure Type 4 tag
|
|
||||||
if NfcTag::configure(4) {
|
|
||||||
writeln!(console, " -- TAG CONFIGURED").unwrap();
|
|
||||||
}
|
|
||||||
// 2. Subscribe to a SELECTED CALLBACK
|
|
||||||
if NfcTag::selected() {
|
|
||||||
writeln!(console, " -- TAG SELECTED").unwrap();
|
|
||||||
// 0xfffff results in 1048575 / 13.56e6 = 77ms
|
|
||||||
NfcTag::set_framedelaymax(0xfffff);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
[_.] TODO: Enable Tag emulation (currently the tag is always activated)
|
|
||||||
needs field detection support in the driver level.
|
|
||||||
*/
|
|
||||||
let mut rx_buf = [0; 64];
|
|
||||||
let mut unknown_cmd_cntr = 0;
|
|
||||||
loop {
|
|
||||||
NfcTag::receive(&mut rx_buf);
|
|
||||||
match rx_buf[0] {
|
|
||||||
0xe0 /* RATS */=> {
|
|
||||||
let mut answer_to_select = [0x05, 0x78, 0x80, 0xB1, 0x00];
|
|
||||||
let amount = answer_to_select.len();
|
|
||||||
NfcTag::transmit(&mut answer_to_select, amount);
|
|
||||||
}
|
|
||||||
0xc2 /* DESELECT */ => {
|
|
||||||
// Ignore the request
|
|
||||||
let mut command_error = [0x6A, 0x81];
|
|
||||||
let amount = command_error.len();
|
|
||||||
NfcTag::transmit(&mut command_error, amount);
|
|
||||||
}
|
|
||||||
0x02 | 0x03 /* APDU Prefix */ => {
|
|
||||||
let mut reply = [rx_buf[0], 0x90, 0x00];
|
|
||||||
let amount = reply.len();
|
|
||||||
NfcTag::transmit(&mut reply, amount);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
unknown_cmd_cntr += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if unknown_cmd_cntr > 50 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeln!(console, "****************************************").unwrap();
|
|
||||||
}
|
|
||||||
152
fuzz/ctap2_commands_parameters_corpus.json
Normal file
152
fuzz/ctap2_commands_parameters_corpus.json
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"hex": "1903e8",
|
||||||
|
"cbor": "unsigned(1000)",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "3829",
|
||||||
|
"cbor": "negative(41)",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "c349010000000000000000",
|
||||||
|
"cbor": "-18446744073709551617",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "f90000",
|
||||||
|
"cbor": "primitive(0)",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "f90001",
|
||||||
|
"cbor": "primitive(1) = 5.960464477539063e-8",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "fa7fc00000",
|
||||||
|
"cbor": "primitive(2143289344) = NaN",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "f818",
|
||||||
|
"cbor": "simple(24)",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "d74401020304",
|
||||||
|
"cbor": "tag 23(h'01020304')",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "6449455446",
|
||||||
|
"cbor": "IETF",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "62225c",
|
||||||
|
"cbor": "\"\\",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "41a8",
|
||||||
|
"cbor": "bytes(a8)",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "623a41",
|
||||||
|
"cbor": "text(:A)",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "83019f0203ff820405",
|
||||||
|
"cbor": "array [1, [2, 3], [4, 5]]",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "9f018202039f0405ffff",
|
||||||
|
"cbor": "indefinite length array [1, [2, 3], [4, 5]]",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "5f44aabbccdd43eeff99ff",
|
||||||
|
"cbor": "indefinite byte string (_ h'AABBCCDD', h'EEFF99')",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "7f657374726561646d696e67ff",
|
||||||
|
"cbor": "indefinite byte string (_ \"strea\", \"ming\")",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a26161016162820203",
|
||||||
|
"cbor": "map {\"a\": 1, \"b\": [2, 3]}",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "bf6346756ef563416d7421ff",
|
||||||
|
"cbor": "indefinite length map {\"Fun\": true, \"Amt\": -2}",
|
||||||
|
"description": "cbor value"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a4015820cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd02a14269645770696E5F616273656E63652E6578616D706C652E636F6D03a262696458201D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D646e616d65644164616d0481a263616c672664747970656a7075626C69632D6B6579",
|
||||||
|
"cbor": "{1: h'CDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCD', 2: {\"id\": \"pin_absence.example.com\"}, 3: {\"id\": h'1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D', \"name\": \"Adam\"}, 4: [{\"alg\": -7, \"type\": \"public-key\"}]}",
|
||||||
|
"description": "make credential parameter 1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a9015820687134968222ec17202e42505f8ed2b16ae22f16bb05b88c25db9e602645f14102a3626964781a6d616b655f6261645f74797065732e6578616d706c652e636f6d6469636f6e6f687474703a2f2f69636f6e2e706e67646e616d65676578616d706c6503a462696458201d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d6469636f6e6f687474703a2f2f69636f6e2e706e67646e616d65684a6f686e20446f656b646973706c61794e616d65624a440481a263616c672664747970656a7075626c69632d6b65790581a26269644064747970656a7075626c69632d6b657906a007a362726bf4627570f5627576f40850610c58506c864a708e26dd0ccf4be3d90901",
|
||||||
|
"cbor": "{1: h'687134968222EC17202E42505F8ED2B16AE22F16BB05B88C25DB9E602645F141', 2: {\"id\": \"make_bad_types.example.com\", \"icon\": \"http://icon.png\", \"name\": \"example\"}, 3: {\"id\": h'1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D', \"icon\": \"http://icon.png\", \"name\": \"John Doe\", \"displayName\": \"JD\"}, 4: [{\"alg\": -7, \"type\": \"public-key\"}], 5: [{\"id\": h'', \"type\": \"public-key\"}], 6: {}, 7: {\"rk\": false, \"up\": true, \"uv\": false}, 8: h'610C58506C864A708E26DD0CCF4BE3D9', 9: 1}",
|
||||||
|
"description": "make credential parameters 2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a9015820cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd02a3626964781a6d616b655f6261645f74797065732e6578616d706c652e636f6d6469636f6e6f687474703a2f2f69636f6e2e706e67646e616d65646A6F686E03a462696458201d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d6469636f6e6f687474703a2f2f69636f6e2e706e67646e616d65684a6f686e20446f656b646973706c61794e616d65624a440481a263616c672664747970656a7075626c69632d6b65790581a26269644064747970656a7075626c69632d6b657906a007a362726bf4627570f5627576f40850610c58506c864a708e26dd0ccf4be3d90901",
|
||||||
|
"cbor": "{1: h'CDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCD', 2: {\"id\": \"make_bad_types.example.com\", \"icon\": \"http://icon.png\", \"name\": \"john\"}, 3: {\"id\": h'1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D1D', \"icon\": \"http://icon.png\", \"name\": \"John Doe\", \"displayName\": \"JD\"}, 4: [{\"alg\": -7, \"type\": \"public-key\"}], 5: [{\"id\": h'', \"type\": \"public-key\"}], 6: {}, 7: {\"rk\": false, \"up\": true, \"uv\": false}, 8: h'610C58506C864A708E26DD0CCF4BE3D9', 9: 1}",
|
||||||
|
"description": "make credential parameters 3"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a5015820687134968222ec17202e42505f8ed2b16ae22f16bb05b88c25db9e602645f14102a26269646b6578616d706c652e636f6d646e616d656441636d6503a462696458203082019330820138a0030201023082019330820138a0030201023082019330826469636f6e782b68747470733a2f2f706963732e6578616d706c652e636f6d2f30302f702f61426a6a6a707150622e706e67646e616d65766a6f686e70736d697468406578616d706c652e636f6d6b646973706c61794e616d656d4a6f686e20502e20536d6974680482a263616c672664747970656a7075626C69632D6B6579a263616c6739010064747970656a7075626C69632D6B657907a162726bf5",
|
||||||
|
"cbor": "{1: h'687134968222EC17202E42505F8ED2B16AE22F16BB05B88C25DB9E602645F141', 2: {\"id\": \"example.com\", \"name\": \"Acme\"}, 3: {\"id\": h'3082019330820138A0030201023082019330820138A003020102308201933082', \"icon\": \"https://pics.example.com/00/p/aBjjjpqPb.png\", \"name\": \"johnpsmith@example.com\", \"displayName\": \"John P. Smith\"}, 4: [{\"alg\": -7, \"type\": \"public-key\"}, {\"alg\": -257, \"type\": \"public-key\"}], 7: {\"rk\": true}}",
|
||||||
|
"description": "make credential parameters 4 (ex 4)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a301667061636b65640258f49d04bd8a43be42e45b38aee975ed4ff0b525e745051ac08014260bc12c86e7dd4100000000e00f21f9fc624cf200000000000000000070a148e03e8a315920691cead2a56117675a35857539dc16b51cc12a3acb525baeb124377f38026c29bf42f5b840285c1cf4b81783f3279f224b52dbff40523df87cc2a391a3ab888356002a5c7478385c9cc74fd0aea2a721247fb9023b3e4f6b080c59ebf62f2faa8345693dcb481932a50102032620012158202e3deacb152877fccbdc3bb03694178ba1c48fdd3943d49701c30a65144405202258204cdb1d0b76685e2652dfec4b5558e7e6dbf093dac0139919f9e14de98d0825cd03a263616c67266373696758453043022034870247c8292052f01ed3be4eccd22bb0ebb0344affce83733e2ac978f0d48b021f7f955405cb09b60f005c1c243f492865e2ca70871aeb35c7791365430ad4bd",
|
||||||
|
"cbor": "{1: \"packed\", 2: h'9D04BD8A43BE42E45B38AEE975ED4FF0B525E745051AC08014260BC12C86E7DD4100000000E00F21F9FC624CF200000000000000000070A148E03E8A315920691CEAD2A56117675A35857539DC16B51CC12A3ACB525BAEB124377F38026C29BF42F5B840285C1CF4B81783F3279F224B52DBFF40523DF87CC2A391A3AB888356002A5C7478385C9CC74FD0AEA2A721247FB9023B3E4F6B080C59EBF62F2FAA8345693DCB481932A50102032620012158202E3DEACB152877FCCBDC3BB03694178BA1C48FDD3943D49701C30A65144405202258204CDB1D0B76685E2652DFEC4B5558E7E6DBF093DAC0139919F9E14DE98D0825CD', 3: {\"alg\": -7, \"sig\": h'3043022034870247C8292052F01ED3BE4ECCD22BB0EBB0344AFFCE83733E2AC978F0D48B021F7F955405CB09B60F005C1C243F492865E2CA70871AEB35C7791365430AD4BD'}}",
|
||||||
|
"description": "get assertion parameters 1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a70178196765745f6261645f74797065732e6578616d706c652e636f6d025820cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd0381a262696458701c0746a765e28acf5305ed91025263648e4b95fe94b19ecc63fa01e2a5b83a933755ca8d7e4c460c6cca9be1bba428c8b3079920e4f5bf4abd327abeb0bc731fedd34f7fe15ad2e45290945122d38f9b1462618a186abd66209aebb8886fad71ae1e83429b628b096b7f2db0c157c00e64747970656a7075626c69632d6b657904a005a2627570f4627576f40650610c58506c864a708e26dd0ccf4be3d90701",
|
||||||
|
"cbor": "{1: \"get_bad_types.example.com\", 2: h'CDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCD', 3: [{\"id\": h'1C0746A765E28ACF5305ED91025263648E4B95FE94B19ECC63FA01E2A5B83A933755CA8D7E4C460C6CCA9BE1BBA428C8B3079920E4F5BF4ABD327ABEB0BC731FEDD34F7FE15AD2E45290945122D38F9B1462618A186ABD66209AEBB8886FAD71AE1E83429B628B096B7F2DB0C157C00E', \"type\": \"public-key\"}], 4: {}, 5: {\"up\": false, \"uv\": false}, 6: h'610C58506C864A708E26DD0CCF4BE3D9', 7: 1}",
|
||||||
|
"description": "get assertion parameters 2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a70178196765745f6261645f74797065732e6578616d706c652e636f6d025820cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd0381a262696458701c0746a765e28acf5305ed91025263648e4b95fe94b19ecc63fa01e2a5b83a933755ca8d7e4c460c6cca9be1bba428c8b3079920e4f5bf4abd327abeb0bc731fedd34f7fe15ad2e45290945122d38f9b1462618a186abd66209aebb8886fad71ae1e83429b628b096b7f2db0c157c00e64747970656a7075626c69632d6b657904a00650610c58506c864a708e26dd0ccf4be3d90701",
|
||||||
|
"cbor": "{1: \"get_bad_types.example.com\", 2: h'CDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCDCD', 3: [{\"id\": h'1C0746A765E28ACF5305ED91025263648E4B95FE94B19ECC63FA01E2A5B83A933755CA8D7E4C460C6CCA9BE1BBA428C8B3079920E4F5BF4ABD327ABEB0BC731FEDD34F7FE15AD2E45290945122D38F9B1462618A186ABD66209AEBB8886FAD71AE1E83429B628B096B7F2DB0C157C00E', \"type\": \"public-key\"}], 4: {},6: h'610C58506C864A708E26DD0CCF4BE3D9', 7: 1}",
|
||||||
|
"description": "get assertion parameters 3 (no key 5)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a4016b6578616d706c652e636f6d025820687134968222ec17202e42505f8ed2b1687134968222ec17202e42505f8ed2b10382a26269645840f22006de4f905af68a43942f024f2a5ece603d9c6d4b3df8be08ed01fc442646d034858ac75bed3fd580bf9808d94fcbee82b9b2ef6677af0adcc35852ea6b9e64747970656a7075626C69632D6B6579a26269645832030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030364747970656a7075626C69632D6B657905a1627576f5",
|
||||||
|
"cbor": "{1: \"example.com\", 2: h'687134968222EC17202E42505F8ED2B1687134968222EC17202E42505F8ED2B1', 3: [{\"id\": h'F22006DE4F905AF68A43942F024F2A5ECE603D9C6D4B3DF8BE08ED01FC442646D034858AC75BED3FD580BF9808D94FCBEE82B9B2EF6677AF0ADCC35852EA6B9E', \"type\": \"public-key\"}, {\"id\": h'0303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303', \"type\": \"public-key\"}], 5: {\"uv\": true}}",
|
||||||
|
"description": "get assertion parameters 4 (ex 5)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a201010202",
|
||||||
|
"cbor": "{1: 1, 2: 2}",
|
||||||
|
"description": "client pin parameters 1 (only key 1,2)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a50101020303a501020338182001215820b20717fbc7c82517f511027d9e80888abd33a1837ce835a50ceffd4dea14337b2258209d132823edd852dcc21e4923168df96fe69ea591e1c2d13e98e4920673ec31b004400540",
|
||||||
|
"cbor": "{1: 1, 2: 3, 3: {1: 2, 3: -25, -1: 1, -2: h'B20717FBC7C82517F511027D9E80888ABD33A1837CE835A50CEFFD4DEA14337B', -3: h'9D132823EDD852DCC21E4923168DF96FE69EA591E1C2D13E98E4920673EC31B0'}, 4: h'', 5: h''}",
|
||||||
|
"description": "client pin parameters 2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a50101020303a50102033818200121582060a086e3e9d1d95618826e706000a66b0809fadd29fbc50bb430d1fd21512f89225820c7d8411433be1e2728a397c66cca8d8b36b738cda54ee027d5efaf72c0db050a04504518a1ba83801245c6f8cad90952cda5055840a9eed54033b9f8fad7f76c69c8469f69c2e623ccb7819a31520b4da7756fc9bd1d4d4fc8d82df3284e9b3f600f03e994c6492a75fc2ed660a33ad343917aa7e2",
|
||||||
|
"cbor": "{1: 1, 2: 3, 3: {1: 2, 3: -25, -1: 1, -2: h'60A086E3E9D1D95618826E706000A66B0809FADD29FBC50BB430D1FD21512F89', -3: h'C7D8411433BE1E2728A397C66CCA8D8B36B738CDA54EE027D5EFAF72C0DB050A'}, 4: h'4518A1BA83801245C6F8CAD90952CDA5', 5: h'A9EED54033B9F8FAD7F76C69C8469F69C2E623CCB7819A31520B4DA7756FC9BD1D4D4FC8D82DF3284E9B3F600F03E994C6492A75FC2ED660A33AD343917AA7E2'}",
|
||||||
|
"description": "client pin parameters 3"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hex": "a40101020503a50102033818200121582060a086e3e9d1d95618826e706000a66b0809fadd29fbc50bb430d1fd21512f89225820c7d8411433be1e2728a397c66cca8d8b36b738cda54ee027d5efaf72c0db050a06509cac212d435c7f03d0ffa29caedf0e35",
|
||||||
|
"cbor": "{1: 1, 2: 5, 3: {1: 2, 3: -25, -1: 1, -2: h'60A086E3E9D1D95618826E706000A66B0809FADD29FBC50BB430D1FD21512F89', -3: h'C7D8411433BE1E2728A397C66CCA8D8B36B738CDA54EE027D5EFAF72C0DB050A'}, 6: h'9CAC212D435C7F03D0FFA29CAEDF0E35'}",
|
||||||
|
"description": "client pin parameters 4"
|
||||||
|
}
|
||||||
|
]
|
||||||
60
fuzz/make_corpus.py
Normal file
60
fuzz/make_corpus.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
# Creates a directory containing seed inputs from a json file having
|
||||||
|
# the following structure:
|
||||||
|
# [
|
||||||
|
# {
|
||||||
|
# "hex": "a901a1182a182a02a3626964781a6d616b655f6261645f7...",
|
||||||
|
# "cbor": "{1: h'42', 2: {\"id\": \"make.example.com\", ...",
|
||||||
|
# "description": "make credential parameters"
|
||||||
|
# },
|
||||||
|
# ...
|
||||||
|
# ]
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# - pass the resulting corpus directory path as the first argument
|
||||||
|
# - pass the json file path to make the corpus from as the second argument
|
||||||
|
# Example:
|
||||||
|
# python make_corpus.py ./corpus ./corpus_file.json
|
||||||
|
|
||||||
|
|
||||||
|
# Creates a corpus directory to the given path from the given json file.
|
||||||
|
def make_corpus(corpus_dir, corpus_json):
|
||||||
|
if not os.path.exists(corpus_dir):
|
||||||
|
os.makedirs(corpus_dir)
|
||||||
|
elif not os.path.isdir(corpus_dir):
|
||||||
|
raise NotADirectoryError
|
||||||
|
|
||||||
|
if os.path.isfile(corpus_json) and \
|
||||||
|
os.path.splitext(corpus_json)[-1] == ".json":
|
||||||
|
with open(corpus_json) as corpus_file:
|
||||||
|
corpus = json.load(corpus_file)
|
||||||
|
else:
|
||||||
|
raise TypeError
|
||||||
|
|
||||||
|
for i, seed_file in enumerate(corpus):
|
||||||
|
seed_file_name = "seed_file_" + str(i)
|
||||||
|
raw_hex = seed_file["hex"].decode("hex")
|
||||||
|
with open(os.path.join(corpus_dir, seed_file_name), "wb") as f:
|
||||||
|
f.write(raw_hex)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"corpus_directory", help="the resulting corpus directory path")
|
||||||
|
parser.add_argument(
|
||||||
|
"corpus_json", help="the json file path to make the corpus from")
|
||||||
|
args = parser.parse_args()
|
||||||
|
try:
|
||||||
|
make_corpus(args.corpus_directory, args.corpus_json)
|
||||||
|
except NotADirectoryError:
|
||||||
|
print(args.corpus_directory, " is not a directory.\n")
|
||||||
|
except TypeError:
|
||||||
|
print(args.corpus_json, " must be a json file.\n")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -13,8 +13,6 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//! Helps manipulate bit fields in 32-bits words.
|
//! Helps manipulate bit fields in 32-bits words.
|
||||||
// TODO(ia0): Remove when the module is used.
|
|
||||||
#![cfg_attr(not(test), allow(dead_code, unused_macros))]
|
|
||||||
|
|
||||||
use crate::{StoreError, StoreResult};
|
use crate::{StoreError, StoreResult};
|
||||||
|
|
||||||
@@ -180,24 +178,29 @@ macro_rules! bitfield_impl {
|
|||||||
// - Input are bit field descriptors
|
// - Input are bit field descriptors
|
||||||
// - Position is the number of bits used by prior bit fields
|
// - Position is the number of bits used by prior bit fields
|
||||||
// - Output are the bit field definitions
|
// - Output are the bit field definitions
|
||||||
([$($output: tt)*]{ pos: $pos: expr }[$name: ident: Bit, $($input: tt)*]) => {
|
([$($output: tt)*]{ pos: $pos: expr }
|
||||||
|
[$(#[$meta: meta])* $name: ident: Bit, $($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
[$($output)* const $name: Bit = Bit { pos: $pos };]
|
[$($output)* $(#[$meta])* const $name: Bit = Bit { pos: $pos };]
|
||||||
{ pos: $pos + 1 }
|
{ pos: $pos + 1 }
|
||||||
[$($input)*]
|
[$($input)*]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
([$($output: tt)*]{ pos: $pos: expr }[$name: ident: Field <= $max: expr, $($input: tt)*]) => {
|
([$($output: tt)*]{ pos: $pos: expr }
|
||||||
|
[$(#[$meta: meta])* $name: ident: Field <= $max: expr, $($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
[$($output)* const $name: Field = Field { pos: $pos, len: num_bits($max) };]
|
[$($output)* $(#[$meta])* const $name: Field = Field {
|
||||||
|
pos: $pos,
|
||||||
|
len: num_bits($max),
|
||||||
|
};]
|
||||||
{ pos: $pos + $name.len }
|
{ pos: $pos + $name.len }
|
||||||
[$($input)*]
|
[$($input)*]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
([$($output: tt)*]{ pos: $pos: expr }
|
([$($output: tt)*]{ pos: $pos: expr }
|
||||||
[$name: ident: Checksum <= $max: expr, $($input: tt)*]) => {
|
[$(#[$meta: meta])* $name: ident: Checksum <= $max: expr, $($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
[$($output)* const $name: Checksum = Checksum {
|
[$($output)* $(#[$meta])* const $name: Checksum = Checksum {
|
||||||
field: Field { pos: $pos, len: num_bits($max) }
|
field: Field { pos: $pos, len: num_bits($max) }
|
||||||
};]
|
};]
|
||||||
{ pos: $pos + $name.field.len }
|
{ pos: $pos + $name.field.len }
|
||||||
@@ -213,9 +216,9 @@ macro_rules! bitfield_impl {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
([$($output: tt)*]{ pos: $pos: expr }
|
([$($output: tt)*]{ pos: $pos: expr }
|
||||||
[$name: ident: ConstField = $bits: tt, $($input: tt)*]) => {
|
[$(#[$meta: meta])* $name: ident: ConstField = $bits: tt, $($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
Reverse $name []$bits
|
Reverse $(#[$meta])* $name []$bits
|
||||||
[$($output)*]{ pos: $pos }[$($input)*]
|
[$($output)*]{ pos: $pos }[$($input)*]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -224,17 +227,17 @@ macro_rules! bitfield_impl {
|
|||||||
// Auxiliary rules for constant bit fields:
|
// Auxiliary rules for constant bit fields:
|
||||||
// - Input is a sequence of bits
|
// - Input is a sequence of bits
|
||||||
// - Output is the reversed sequence of bits
|
// - Output is the reversed sequence of bits
|
||||||
(Reverse $name: ident [$($output_bits: tt)*] [$bit: tt $($input_bits: tt)*]
|
(Reverse $(#[$meta: meta])* $name: ident [$($output_bits: tt)*] [$bit: tt $($input_bits: tt)*]
|
||||||
[$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
[$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
Reverse $name [$bit $($output_bits)*][$($input_bits)*]
|
Reverse $(#[$meta])* $name [$bit $($output_bits)*][$($input_bits)*]
|
||||||
[$($output)*]{ pos: $pos }[$($input)*]
|
[$($output)*]{ pos: $pos }[$($input)*]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
(Reverse $name: ident $bits: tt []
|
(Reverse $(#[$meta: meta])* $name: ident $bits: tt []
|
||||||
[$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
[$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
ConstField $name { len: 0, val: 0 }$bits
|
ConstField $(#[$meta])* $name { len: 0, val: 0 }$bits
|
||||||
[$($output)*]{ pos: $pos }[$($input)*]
|
[$($output)*]{ pos: $pos }[$($input)*]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -242,10 +245,10 @@ macro_rules! bitfield_impl {
|
|||||||
// Auxiliary rules for constant bit fields:
|
// Auxiliary rules for constant bit fields:
|
||||||
// - Input is a sequence of bits in reversed order
|
// - Input is a sequence of bits in reversed order
|
||||||
// - Output is the constant bit field definition with the sequence of bits as value
|
// - Output is the constant bit field definition with the sequence of bits as value
|
||||||
(ConstField $name: ident { len: $len: expr, val: $val: expr }[]
|
(ConstField $(#[$meta: meta])* $name: ident { len: $len: expr, val: $val: expr }[]
|
||||||
[$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
[$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
[$($output)* const $name: ConstField = ConstField {
|
[$($output)* $(#[$meta])* const $name: ConstField = ConstField {
|
||||||
field: Field { pos: $pos, len: $len },
|
field: Field { pos: $pos, len: $len },
|
||||||
value: $val,
|
value: $val,
|
||||||
};]
|
};]
|
||||||
@@ -253,10 +256,10 @@ macro_rules! bitfield_impl {
|
|||||||
[$($input)*]
|
[$($input)*]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
(ConstField $name: ident { len: $len: expr, val: $val: expr }[$bit: tt $($bits: tt)*]
|
(ConstField $(#[$meta: meta])* $name: ident { len: $len: expr, val: $val: expr }
|
||||||
[$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
[$bit: tt $($bits: tt)*][$($output: tt)*]{ pos: $pos: expr }[$($input: tt)*]) => {
|
||||||
bitfield_impl! {
|
bitfield_impl! {
|
||||||
ConstField $name { len: $len + 1, val: $val * 2 + $bit }[$($bits)*]
|
ConstField $(#[$meta])* $name { len: $len + 1, val: $val * 2 + $bit }[$($bits)*]
|
||||||
[$($output)*]{ pos: $pos }[$($input)*]
|
[$($output)*]{ pos: $pos }[$($input)*]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
683
libraries/persistent_store/src/buffer.rs
Normal file
683
libraries/persistent_store/src/buffer.rs
Normal file
@@ -0,0 +1,683 @@
|
|||||||
|
// Copyright 2019-2020 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use crate::{Storage, StorageError, StorageIndex, StorageResult};
|
||||||
|
use alloc::borrow::Borrow;
|
||||||
|
use alloc::boxed::Box;
|
||||||
|
use alloc::vec;
|
||||||
|
|
||||||
|
/// Simulates a flash storage using a buffer in memory.
|
||||||
|
///
|
||||||
|
/// This buffer storage can be used in place of an actual flash storage. It is particularly useful
|
||||||
|
/// for tests and fuzzing, for which it has dedicated functionalities.
|
||||||
|
///
|
||||||
|
/// This storage tracks how many times words are written between page erase cycles, how many times
|
||||||
|
/// pages are erased, and whether an operation flips bits in the wrong direction (optional).
|
||||||
|
/// Operations panic if those conditions are broken. This storage also permits to interrupt
|
||||||
|
/// operations for inspection or to corrupt the operation.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct BufferStorage {
|
||||||
|
/// Content of the storage.
|
||||||
|
storage: Box<[u8]>,
|
||||||
|
|
||||||
|
/// Options of the storage.
|
||||||
|
options: BufferOptions,
|
||||||
|
|
||||||
|
/// Number of times a word was written since the last time its page was erased.
|
||||||
|
word_writes: Box<[usize]>,
|
||||||
|
|
||||||
|
/// Number of times a page was erased.
|
||||||
|
page_erases: Box<[usize]>,
|
||||||
|
|
||||||
|
/// Interruption state.
|
||||||
|
interruption: Interruption,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Options of a buffer storage.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct BufferOptions {
|
||||||
|
/// Size of a word in bytes.
|
||||||
|
pub word_size: usize,
|
||||||
|
|
||||||
|
/// Size of a page in bytes.
|
||||||
|
pub page_size: usize,
|
||||||
|
|
||||||
|
/// How many times a word can be written between page erase cycles.
|
||||||
|
pub max_word_writes: usize,
|
||||||
|
|
||||||
|
/// How many times a page can be erased.
|
||||||
|
pub max_page_erases: usize,
|
||||||
|
|
||||||
|
/// Whether bits cannot be written from 0 to 1.
|
||||||
|
pub strict_write: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Corrupts a slice given actual and expected value.
|
||||||
|
///
|
||||||
|
/// A corruption function is called exactly once and takes 2 arguments:
|
||||||
|
/// - A mutable slice representing the storage before the interrupted operation.
|
||||||
|
/// - A shared slice representing what the storage would have been if the operation was not
|
||||||
|
/// interrupted.
|
||||||
|
///
|
||||||
|
/// The corruption function may flip an arbitrary number of bits in the mutable slice, but may only
|
||||||
|
/// flip bits that differ between both slices.
|
||||||
|
pub type BufferCorruptFunction<'a> = Box<dyn FnOnce(&mut [u8], &[u8]) + 'a>;
|
||||||
|
|
||||||
|
impl BufferStorage {
|
||||||
|
/// Creates a buffer storage.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// The following preconditions must hold:
|
||||||
|
/// - `options.word_size` must be a power of two.
|
||||||
|
/// - `options.page_size` must be a power of two.
|
||||||
|
/// - `options.page_size` must be word-aligned.
|
||||||
|
/// - `storage.len()` must be page-aligned.
|
||||||
|
pub fn new(storage: Box<[u8]>, options: BufferOptions) -> BufferStorage {
|
||||||
|
assert!(options.word_size.is_power_of_two());
|
||||||
|
assert!(options.page_size.is_power_of_two());
|
||||||
|
let num_words = storage.len() / options.word_size;
|
||||||
|
let num_pages = storage.len() / options.page_size;
|
||||||
|
let buffer = BufferStorage {
|
||||||
|
storage,
|
||||||
|
options,
|
||||||
|
word_writes: vec![0; num_words].into_boxed_slice(),
|
||||||
|
page_erases: vec![0; num_pages].into_boxed_slice(),
|
||||||
|
interruption: Interruption::Ready,
|
||||||
|
};
|
||||||
|
assert!(buffer.is_word_aligned(buffer.options.page_size));
|
||||||
|
assert!(buffer.is_page_aligned(buffer.storage.len()));
|
||||||
|
buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Arms an interruption after a given delay.
|
||||||
|
///
|
||||||
|
/// Before each subsequent mutable operation (write or erase), the delay is decremented if
|
||||||
|
/// positive. If the delay is elapsed, the operation is saved and an error is returned.
|
||||||
|
/// Subsequent operations will panic until the interrupted operation is [corrupted] or the
|
||||||
|
/// interruption is [reset].
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if an interruption is already armed.
|
||||||
|
///
|
||||||
|
/// [corrupted]: struct.BufferStorage.html#method.corrupt_operation
|
||||||
|
/// [reset]: struct.BufferStorage.html#method.reset_interruption
|
||||||
|
pub fn arm_interruption(&mut self, delay: usize) {
|
||||||
|
self.interruption.arm(delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disarms an interruption that did not trigger.
|
||||||
|
///
|
||||||
|
/// Returns the remaining delay.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if any of the following conditions hold:
|
||||||
|
/// - An interruption was not [armed].
|
||||||
|
/// - An interruption was armed and it has triggered.
|
||||||
|
///
|
||||||
|
/// [armed]: struct.BufferStorage.html#method.arm_interruption
|
||||||
|
pub fn disarm_interruption(&mut self) -> usize {
|
||||||
|
self.interruption.get().err().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resets an interruption regardless of triggering.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if an interruption was not [armed].
|
||||||
|
///
|
||||||
|
/// [armed]: struct.BufferStorage.html#method.arm_interruption
|
||||||
|
pub fn reset_interruption(&mut self) {
|
||||||
|
let _ = self.interruption.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Corrupts an interrupted operation.
|
||||||
|
///
|
||||||
|
/// Applies the [corruption function] to the storage. Counters are updated accordingly:
|
||||||
|
/// - If a word is fully written, its counter is incremented regardless of whether other words
|
||||||
|
/// of the same operation have been fully written.
|
||||||
|
/// - If a page is fully erased, its counter is incremented (and its word counters are reset).
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if any of the following conditions hold:
|
||||||
|
/// - An interruption was not [armed].
|
||||||
|
/// - An interruption was armed but did not trigger.
|
||||||
|
/// - The corruption function corrupts more bits than allowed.
|
||||||
|
/// - The interrupted operation itself would have panicked.
|
||||||
|
///
|
||||||
|
/// [armed]: struct.BufferStorage.html#method.arm_interruption
|
||||||
|
/// [corruption function]: type.BufferCorruptFunction.html
|
||||||
|
pub fn corrupt_operation(&mut self, corrupt: BufferCorruptFunction) {
|
||||||
|
let operation = self.interruption.get().unwrap();
|
||||||
|
let range = self.operation_range(&operation).unwrap();
|
||||||
|
let mut before = self.storage[range.clone()].to_vec().into_boxed_slice();
|
||||||
|
match operation {
|
||||||
|
BufferOperation::Write { value: after, .. } => {
|
||||||
|
corrupt(&mut before, &after);
|
||||||
|
self.incr_word_writes(range.start, &before, &after);
|
||||||
|
}
|
||||||
|
BufferOperation::Erase { page } => {
|
||||||
|
let after = vec![0xff; self.page_size()].into_boxed_slice();
|
||||||
|
corrupt(&mut before, &after);
|
||||||
|
if before == after {
|
||||||
|
self.incr_page_erases(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.storage[range].copy_from_slice(&before);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of times a word was written.
|
||||||
|
pub fn get_word_writes(&self, word: usize) -> usize {
|
||||||
|
self.word_writes[word]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of times a page was erased.
|
||||||
|
pub fn get_page_erases(&self, page: usize) -> usize {
|
||||||
|
self.page_erases[page]
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the number of times a page was erased.
|
||||||
|
pub fn set_page_erases(&mut self, page: usize, cycle: usize) {
|
||||||
|
self.page_erases[page] = cycle;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether a number is word-aligned.
|
||||||
|
fn is_word_aligned(&self, x: usize) -> bool {
|
||||||
|
x & (self.options.word_size - 1) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether a number is page-aligned.
|
||||||
|
fn is_page_aligned(&self, x: usize) -> bool {
|
||||||
|
x & (self.options.page_size - 1) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the counters as if a page was erased.
|
||||||
|
///
|
||||||
|
/// The page counter of that page is incremented and the word counters of that page are reset.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the maximum number of erase cycles per page is reached.
|
||||||
|
fn incr_page_erases(&mut self, page: usize) {
|
||||||
|
assert!(self.page_erases[page] < self.max_page_erases());
|
||||||
|
self.page_erases[page] += 1;
|
||||||
|
let num_words = self.page_size() / self.word_size();
|
||||||
|
for word in 0..num_words {
|
||||||
|
self.word_writes[page * num_words + word] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the word counters as if a partial write occurred.
|
||||||
|
///
|
||||||
|
/// The partial write is described as if `complete` was supposed to be written to the storage
|
||||||
|
/// starting at byte `index`, but actually only `value` was written. Word counters are
|
||||||
|
/// incremented only if their value would change and they would be completely written.
|
||||||
|
///
|
||||||
|
/// # Preconditions
|
||||||
|
///
|
||||||
|
/// - `index` must be word-aligned.
|
||||||
|
/// - `value` and `complete` must have the same word-aligned length.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the maximum number of writes per word is reached.
|
||||||
|
fn incr_word_writes(&mut self, index: usize, value: &[u8], complete: &[u8]) {
|
||||||
|
let word_size = self.word_size();
|
||||||
|
for i in 0..value.len() / word_size {
|
||||||
|
let range = core::ops::Range {
|
||||||
|
start: i * word_size,
|
||||||
|
end: (i + 1) * word_size,
|
||||||
|
};
|
||||||
|
// Partial word writes do not count.
|
||||||
|
if value[range.clone()] != complete[range.clone()] {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Words are written only if necessary.
|
||||||
|
if value[range.clone()] == self.storage[index..][range] {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let word = index / word_size + i;
|
||||||
|
assert!(self.word_writes[word] < self.max_word_writes());
|
||||||
|
self.word_writes[word] += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the storage range of an operation.
|
||||||
|
fn operation_range(
|
||||||
|
&self,
|
||||||
|
operation: &BufferOperation<impl Borrow<[u8]>>,
|
||||||
|
) -> StorageResult<core::ops::Range<usize>> {
|
||||||
|
match *operation {
|
||||||
|
BufferOperation::Write { index, ref value } => index.range(value.borrow().len(), self),
|
||||||
|
BufferOperation::Erase { page } => {
|
||||||
|
StorageIndex { page, byte: 0 }.range(self.page_size(), self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Storage for BufferStorage {
|
||||||
|
fn word_size(&self) -> usize {
|
||||||
|
self.options.word_size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn page_size(&self) -> usize {
|
||||||
|
self.options.page_size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_pages(&self) -> usize {
|
||||||
|
self.storage.len() / self.options.page_size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_word_writes(&self) -> usize {
|
||||||
|
self.options.max_word_writes
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_page_erases(&self) -> usize {
|
||||||
|
self.options.max_page_erases
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]> {
|
||||||
|
Ok(&self.storage[index.range(length, self)?])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()> {
|
||||||
|
if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) {
|
||||||
|
return Err(StorageError::NotAligned);
|
||||||
|
}
|
||||||
|
let operation = BufferOperation::Write { index, value };
|
||||||
|
let range = self.operation_range(&operation)?;
|
||||||
|
// Interrupt operation if armed and delay expired.
|
||||||
|
self.interruption.tick(&operation)?;
|
||||||
|
// Check and update counters.
|
||||||
|
self.incr_word_writes(range.start, value, value);
|
||||||
|
// Check strict write.
|
||||||
|
if self.options.strict_write {
|
||||||
|
for (byte, &val) in range.clone().zip(value.iter()) {
|
||||||
|
assert_eq!(self.storage[byte] & val, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Write to the storage.
|
||||||
|
self.storage[range].copy_from_slice(value);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn erase_page(&mut self, page: usize) -> StorageResult<()> {
|
||||||
|
let operation = BufferOperation::Erase { page };
|
||||||
|
let range = self.operation_range(&operation)?;
|
||||||
|
// Interrupt operation if armed and delay expired.
|
||||||
|
self.interruption.tick(&operation)?;
|
||||||
|
// Check and update counters.
|
||||||
|
self.incr_page_erases(page);
|
||||||
|
// Write to the storage.
|
||||||
|
for byte in &mut self.storage[range] {
|
||||||
|
*byte = 0xff;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl core::fmt::Display for BufferStorage {
|
||||||
|
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
|
||||||
|
let num_pages = self.num_pages();
|
||||||
|
let num_words = self.page_size() / self.word_size();
|
||||||
|
let num_bytes = self.word_size();
|
||||||
|
for page in 0..num_pages {
|
||||||
|
write!(f, "[{}]", self.page_erases[page])?;
|
||||||
|
for word in 0..num_words {
|
||||||
|
write!(f, " [{}]", self.word_writes[page * num_words + word])?;
|
||||||
|
for byte in 0..num_bytes {
|
||||||
|
let index = (page * num_words + word) * num_bytes + byte;
|
||||||
|
write!(f, "{:02x}", self.storage[index])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
writeln!(f)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents a storage operation.
|
||||||
|
///
|
||||||
|
/// It is polymorphic over the ownership of the byte slice to avoid unnecessary copies.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
enum BufferOperation<ByteSlice: Borrow<[u8]>> {
|
||||||
|
/// Represents a write operation.
|
||||||
|
Write {
|
||||||
|
/// The storage index at which the write should occur.
|
||||||
|
index: StorageIndex,
|
||||||
|
|
||||||
|
/// The slice that should be written.
|
||||||
|
value: ByteSlice,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Represents an erase operation.
|
||||||
|
Erase {
|
||||||
|
/// The page that should be erased.
|
||||||
|
page: usize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents a storage operation owning its byte slices.
|
||||||
|
type OwnedBufferOperation = BufferOperation<Box<[u8]>>;
|
||||||
|
|
||||||
|
/// Represents a storage operation sharing its byte slices.
|
||||||
|
type SharedBufferOperation<'a> = BufferOperation<&'a [u8]>;
|
||||||
|
|
||||||
|
impl<'a> SharedBufferOperation<'a> {
|
||||||
|
fn to_owned(&self) -> OwnedBufferOperation {
|
||||||
|
match *self {
|
||||||
|
BufferOperation::Write { index, value } => BufferOperation::Write {
|
||||||
|
index,
|
||||||
|
value: value.to_vec().into_boxed_slice(),
|
||||||
|
},
|
||||||
|
BufferOperation::Erase { page } => BufferOperation::Erase { page },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Controls when an operation is interrupted.
|
||||||
|
///
|
||||||
|
/// This can be used to simulate power-offs while the device is writing to the storage or erasing a
|
||||||
|
/// page in the storage.
|
||||||
|
#[derive(Clone)]
|
||||||
|
enum Interruption {
|
||||||
|
/// Mutable operations have normal behavior.
|
||||||
|
Ready,
|
||||||
|
|
||||||
|
/// If the delay is positive, mutable operations decrement it. If the count is zero, mutable
|
||||||
|
/// operations fail and are saved.
|
||||||
|
Armed { delay: usize },
|
||||||
|
|
||||||
|
/// Mutable operations panic.
|
||||||
|
Saved { operation: OwnedBufferOperation },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Interruption {
|
||||||
|
/// Arms an interruption for a given delay.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if an interruption is already armed.
|
||||||
|
fn arm(&mut self, delay: usize) {
|
||||||
|
match self {
|
||||||
|
Interruption::Ready => *self = Interruption::Armed { delay },
|
||||||
|
_ => panic!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disarms an interruption.
|
||||||
|
///
|
||||||
|
/// Returns the interrupted operation if any, otherwise the remaining delay.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if an interruption was not armed.
|
||||||
|
fn get(&mut self) -> Result<OwnedBufferOperation, usize> {
|
||||||
|
let mut interruption = Interruption::Ready;
|
||||||
|
core::mem::swap(self, &mut interruption);
|
||||||
|
match interruption {
|
||||||
|
Interruption::Armed { delay } => Err(delay),
|
||||||
|
Interruption::Saved { operation } => Ok(operation),
|
||||||
|
_ => panic!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Interrupts an operation if the delay is over.
|
||||||
|
///
|
||||||
|
/// Decrements the delay if positive. Otherwise, the operation is stored and an error is
|
||||||
|
/// returned to interrupt the operation.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if an operation has already been interrupted and the interruption has not been
|
||||||
|
/// disarmed.
|
||||||
|
fn tick(&mut self, operation: &SharedBufferOperation) -> StorageResult<()> {
|
||||||
|
match self {
|
||||||
|
Interruption::Ready => (),
|
||||||
|
Interruption::Armed { delay } if *delay == 0 => {
|
||||||
|
let operation = operation.to_owned();
|
||||||
|
*self = Interruption::Saved { operation };
|
||||||
|
return Err(StorageError::CustomError);
|
||||||
|
}
|
||||||
|
Interruption::Armed { delay } => *delay -= 1,
|
||||||
|
Interruption::Saved { .. } => panic!(),
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
const NUM_PAGES: usize = 2;
|
||||||
|
const OPTIONS: BufferOptions = BufferOptions {
|
||||||
|
word_size: 4,
|
||||||
|
page_size: 16,
|
||||||
|
max_word_writes: 2,
|
||||||
|
max_page_erases: 3,
|
||||||
|
strict_write: true,
|
||||||
|
};
|
||||||
|
// Those words are decreasing bit patterns. Bits are only changed from 1 to 0 and at least one
|
||||||
|
// bit is changed.
|
||||||
|
const BLANK_WORD: &[u8] = &[0xff, 0xff, 0xff, 0xff];
|
||||||
|
const FIRST_WORD: &[u8] = &[0xee, 0xdd, 0xbb, 0x77];
|
||||||
|
const SECOND_WORD: &[u8] = &[0xca, 0xc9, 0xa9, 0x65];
|
||||||
|
const THIRD_WORD: &[u8] = &[0x88, 0x88, 0x88, 0x44];
|
||||||
|
|
||||||
|
fn new_storage() -> Box<[u8]> {
|
||||||
|
vec![0xff; NUM_PAGES * OPTIONS.page_size].into_boxed_slice()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn words_are_decreasing() {
|
||||||
|
fn assert_is_decreasing(prev: &[u8], next: &[u8]) {
|
||||||
|
for (&prev, &next) in prev.iter().zip(next.iter()) {
|
||||||
|
assert_eq!(prev & next, next);
|
||||||
|
assert!(prev != next);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_is_decreasing(BLANK_WORD, FIRST_WORD);
|
||||||
|
assert_is_decreasing(FIRST_WORD, SECOND_WORD);
|
||||||
|
assert_is_decreasing(SECOND_WORD, THIRD_WORD);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn options_ok() {
|
||||||
|
let buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
assert_eq!(buffer.word_size(), OPTIONS.word_size);
|
||||||
|
assert_eq!(buffer.page_size(), OPTIONS.page_size);
|
||||||
|
assert_eq!(buffer.num_pages(), NUM_PAGES);
|
||||||
|
assert_eq!(buffer.max_word_writes(), OPTIONS.max_word_writes);
|
||||||
|
assert_eq!(buffer.max_page_erases(), OPTIONS.max_page_erases);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn read_write_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 0 };
|
||||||
|
let next_index = StorageIndex { page: 0, byte: 4 };
|
||||||
|
assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD);
|
||||||
|
buffer.write_slice(index, FIRST_WORD).unwrap();
|
||||||
|
assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD);
|
||||||
|
assert_eq!(buffer.read_slice(next_index, 4).unwrap(), BLANK_WORD);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn erase_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 0 };
|
||||||
|
let other_index = StorageIndex { page: 1, byte: 0 };
|
||||||
|
buffer.write_slice(index, FIRST_WORD).unwrap();
|
||||||
|
buffer.write_slice(other_index, FIRST_WORD).unwrap();
|
||||||
|
assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD);
|
||||||
|
assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD);
|
||||||
|
buffer.erase_page(0).unwrap();
|
||||||
|
assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD);
|
||||||
|
assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn invalid_range() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 12 };
|
||||||
|
let half_index = StorageIndex { page: 0, byte: 14 };
|
||||||
|
let over_index = StorageIndex { page: 0, byte: 16 };
|
||||||
|
let bad_page = StorageIndex { page: 2, byte: 0 };
|
||||||
|
|
||||||
|
// Reading a word in the storage is ok.
|
||||||
|
assert!(buffer.read_slice(index, 4).is_ok());
|
||||||
|
// Reading a half-word in the storage is ok.
|
||||||
|
assert!(buffer.read_slice(half_index, 2).is_ok());
|
||||||
|
// Reading even a single byte outside a page is not ok.
|
||||||
|
assert!(buffer.read_slice(over_index, 1).is_err());
|
||||||
|
// But reading an empty slice just after a page is ok.
|
||||||
|
assert!(buffer.read_slice(over_index, 0).is_ok());
|
||||||
|
// Reading even an empty slice outside the storage is not ok.
|
||||||
|
assert!(buffer.read_slice(bad_page, 0).is_err());
|
||||||
|
|
||||||
|
// Writing a word in the storage is ok.
|
||||||
|
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||||
|
// Writing an unaligned word is not ok.
|
||||||
|
assert!(buffer.write_slice(half_index, FIRST_WORD).is_err());
|
||||||
|
// Writing a word outside a page is not ok.
|
||||||
|
assert!(buffer.write_slice(over_index, FIRST_WORD).is_err());
|
||||||
|
// But writing an empty slice just after a page is ok.
|
||||||
|
assert!(buffer.write_slice(over_index, &[]).is_ok());
|
||||||
|
// Writing even an empty slice outside the storage is not ok.
|
||||||
|
assert!(buffer.write_slice(bad_page, &[]).is_err());
|
||||||
|
|
||||||
|
// Only pages in the storage can be erased.
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(2).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn write_twice_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 4 };
|
||||||
|
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||||
|
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn write_twice_and_once_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 0 };
|
||||||
|
let next_index = StorageIndex { page: 0, byte: 4 };
|
||||||
|
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||||
|
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||||
|
assert!(buffer.write_slice(next_index, THIRD_WORD).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn write_three_times_panics() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 4 };
|
||||||
|
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||||
|
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||||
|
let _ = buffer.write_slice(index, THIRD_WORD);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn write_twice_then_once_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 0 };
|
||||||
|
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||||
|
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn erase_three_times_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn erase_three_times_and_once_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(1).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn erase_four_times_panics() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
assert!(buffer.erase_page(0).is_ok());
|
||||||
|
let _ = buffer.erase_page(0).is_ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn switch_zero_to_one_panics() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
let index = StorageIndex { page: 0, byte: 0 };
|
||||||
|
assert!(buffer.write_slice(index, SECOND_WORD).is_ok());
|
||||||
|
let _ = buffer.write_slice(index, FIRST_WORD);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interrupt_delay_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
|
||||||
|
// Interrupt the second operation.
|
||||||
|
buffer.arm_interruption(1);
|
||||||
|
|
||||||
|
// The first operation should not fail.
|
||||||
|
buffer
|
||||||
|
.write_slice(StorageIndex { page: 0, byte: 0 }, &[0x5c; 8])
|
||||||
|
.unwrap();
|
||||||
|
// The delay should be decremented.
|
||||||
|
assert_eq!(buffer.disarm_interruption(), 0);
|
||||||
|
// The storage should have been modified.
|
||||||
|
assert_eq!(&buffer.storage[..8], &[0x5c; 8]);
|
||||||
|
assert!(buffer.storage[8..].iter().all(|&x| x == 0xff));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interrupt_save_ok() {
|
||||||
|
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
|
||||||
|
|
||||||
|
// Interrupt the second operation.
|
||||||
|
buffer.arm_interruption(1);
|
||||||
|
|
||||||
|
// The second operation should fail.
|
||||||
|
buffer
|
||||||
|
.write_slice(StorageIndex { page: 0, byte: 0 }, &[0x5c; 8])
|
||||||
|
.unwrap();
|
||||||
|
assert!(buffer
|
||||||
|
.write_slice(StorageIndex { page: 0, byte: 8 }, &[0x93; 8])
|
||||||
|
.is_err());
|
||||||
|
// The operation should represent the change.
|
||||||
|
buffer.corrupt_operation(Box::new(|_, value| assert_eq!(value, &[0x93; 8])));
|
||||||
|
// The storage should not have been modified.
|
||||||
|
assert_eq!(&buffer.storage[..8], &[0x5c; 8]);
|
||||||
|
assert!(buffer.storage[8..].iter().all(|&x| x == 0xff));
|
||||||
|
}
|
||||||
|
}
|
||||||
957
libraries/persistent_store/src/format.rs
Normal file
957
libraries/persistent_store/src/format.rs
Normal file
@@ -0,0 +1,957 @@
|
|||||||
|
// Copyright 2019-2020 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// TODO(ia0): Remove when the module is used.
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use crate::bitfield::*;
|
||||||
|
use crate::{Storage, StorageIndex, StoreError, StoreResult};
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
use core::cmp::min;
|
||||||
|
|
||||||
|
type WORD = u32;
|
||||||
|
|
||||||
|
/// Size of a word in bytes.
|
||||||
|
///
|
||||||
|
/// Currently, the store only supports storages where a word is 4 bytes.
|
||||||
|
const WORD_SIZE: usize = core::mem::size_of::<WORD>();
|
||||||
|
|
||||||
|
/// Minimum number of words per page.
|
||||||
|
///
|
||||||
|
/// Currently, the store only supports storages where pages have at least 8 words.
|
||||||
|
const MIN_NUM_WORDS_PER_PAGE: usize = 8;
|
||||||
|
|
||||||
|
/// Maximum size of a page in bytes.
|
||||||
|
///
|
||||||
|
/// Currently, the store only supports storages where pages are between 8 and 1024 [words].
|
||||||
|
///
|
||||||
|
/// [words]: constant.WORD_SIZE.html
|
||||||
|
const MAX_PAGE_SIZE: usize = 4096;
|
||||||
|
|
||||||
|
/// Maximum number of erase cycles.
|
||||||
|
///
|
||||||
|
/// Currently, the store only supports storages where the maximum number of erase cycles fits on 16
|
||||||
|
/// bits.
|
||||||
|
const MAX_ERASE_CYCLE: usize = 65535;
|
||||||
|
|
||||||
|
/// Minimum number of pages.
|
||||||
|
///
|
||||||
|
/// Currently, the store only supports storages with at least 3 pages.
|
||||||
|
const MIN_NUM_PAGES: usize = 3;
|
||||||
|
|
||||||
|
/// Maximum page index.
|
||||||
|
///
|
||||||
|
/// Thus the maximum number of pages is one more than this number. Currently, the store only
|
||||||
|
/// supports storages where the number of pages is between 3 and 64.
|
||||||
|
const MAX_PAGE_INDEX: usize = 63;
|
||||||
|
|
||||||
|
/// Maximum key index.
|
||||||
|
///
|
||||||
|
/// Thus the number of keys is one more than this number. Currently, the store only supports 4096
|
||||||
|
/// keys.
|
||||||
|
const MAX_KEY_INDEX: usize = 4095;
|
||||||
|
|
||||||
|
/// Maximum length in bytes of a user payload.
|
||||||
|
///
|
||||||
|
/// Currently, the store only supports values smaller than 1024 bytes.
|
||||||
|
const MAX_VALUE_LEN: usize = 1023;
|
||||||
|
|
||||||
|
/// Maximum number of updates per transaction.
|
||||||
|
///
|
||||||
|
/// Currently, the store only supports transactions with at most 31 updates.
|
||||||
|
const MAX_UPDATES: usize = 31;
|
||||||
|
|
||||||
|
/// Maximum number of words per virtual page.
|
||||||
|
const MAX_VIRT_PAGE_SIZE: usize = div_ceil(MAX_PAGE_SIZE, WORD_SIZE) - CONTENT_WORD;
|
||||||
|
|
||||||
|
/// Word with all bits set to one.
|
||||||
|
const ERASED_WORD: WORD = !(0 as WORD);
|
||||||
|
|
||||||
|
/// Helpers for a given storage configuration.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Format {
|
||||||
|
/// The size in bytes of a page in the storage.
|
||||||
|
///
|
||||||
|
/// # Invariant
|
||||||
|
///
|
||||||
|
/// - Words divide a page evenly.
|
||||||
|
/// - There are at least 8 words in a page.
|
||||||
|
/// - There are at most `MAX_PAGE_SIZE` bytes in a page.
|
||||||
|
page_size: usize,
|
||||||
|
|
||||||
|
/// The number of pages in the storage.
|
||||||
|
///
|
||||||
|
/// # Invariant
|
||||||
|
///
|
||||||
|
/// - There are at least 3 pages.
|
||||||
|
/// - There are at most `MAX_PAGE_INDEX + 1` pages.
|
||||||
|
num_pages: usize,
|
||||||
|
|
||||||
|
/// The maximum number of times a page can be erased.
|
||||||
|
///
|
||||||
|
/// # Invariant
|
||||||
|
///
|
||||||
|
/// - A page can be erased at most `MAX_ERASE_CYCLE` times.
|
||||||
|
max_page_erases: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Format {
|
||||||
|
/// Extracts the format from a storage.
|
||||||
|
///
|
||||||
|
/// Returns `None` if the storage is not [supported].
|
||||||
|
///
|
||||||
|
/// [supported]: struct.Format.html#method.is_storage_supported
|
||||||
|
pub fn new<S: Storage>(storage: &S) -> Option<Format> {
|
||||||
|
if Format::is_storage_supported(storage) {
|
||||||
|
Some(Format {
|
||||||
|
page_size: storage.page_size(),
|
||||||
|
num_pages: storage.num_pages(),
|
||||||
|
max_page_erases: storage.max_page_erases(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether a storage is supported.
|
||||||
|
///
|
||||||
|
/// A storage is supported if the following conditions hold:
|
||||||
|
/// - The size of a word is [`WORD_SIZE`] bytes.
|
||||||
|
/// - The size of a word evenly divides the size of a page.
|
||||||
|
/// - A page contains at least [`MIN_NUM_WORDS_PER_PAGE`] words.
|
||||||
|
/// - A page contains at most [`MAX_PAGE_SIZE`] bytes.
|
||||||
|
/// - There are at least [`MIN_NUM_PAGES`] pages.
|
||||||
|
/// - There are at most [`MAX_PAGE_INDEX`]` + 1` pages.
|
||||||
|
/// - A word can be written at least twice between erase cycles.
|
||||||
|
/// - The maximum number of erase cycles is at most [`MAX_ERASE_CYCLE`].
|
||||||
|
///
|
||||||
|
/// [`WORD_SIZE`]: constant.WORD_SIZE.html
|
||||||
|
/// [`MIN_NUM_WORDS_PER_PAGE`]: constant.MIN_NUM_WORDS_PER_PAGE.html
|
||||||
|
/// [`MAX_PAGE_SIZE`]: constant.MAX_PAGE_SIZE.html
|
||||||
|
/// [`MIN_NUM_PAGES`]: constant.MIN_NUM_PAGES.html
|
||||||
|
/// [`MAX_PAGE_INDEX`]: constant.MAX_PAGE_INDEX.html
|
||||||
|
/// [`MAX_ERASE_CYCLE`]: constant.MAX_ERASE_CYCLE.html
|
||||||
|
fn is_storage_supported<S: Storage>(storage: &S) -> bool {
|
||||||
|
let word_size = storage.word_size();
|
||||||
|
let page_size = storage.page_size();
|
||||||
|
let num_pages = storage.num_pages();
|
||||||
|
let max_word_writes = storage.max_word_writes();
|
||||||
|
let max_page_erases = storage.max_page_erases();
|
||||||
|
word_size == WORD_SIZE
|
||||||
|
&& page_size % word_size == 0
|
||||||
|
&& (MIN_NUM_WORDS_PER_PAGE * word_size <= page_size && page_size <= MAX_PAGE_SIZE)
|
||||||
|
&& (MIN_NUM_PAGES <= num_pages && num_pages <= MAX_PAGE_INDEX + 1)
|
||||||
|
&& max_word_writes >= 2
|
||||||
|
&& max_page_erases <= MAX_ERASE_CYCLE
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The size of a word in bytes.
|
||||||
|
pub fn word_size(&self) -> usize {
|
||||||
|
WORD_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The size of a page in bytes.
|
||||||
|
///
|
||||||
|
/// We have `MIN_NUM_WORDS_PER_PAGE * self.word_size() <= self.page_size() <= MAX_PAGE_SIZE`.
|
||||||
|
pub fn page_size(&self) -> usize {
|
||||||
|
self.page_size
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The number of pages in the storage, denoted by `N`.
|
||||||
|
///
|
||||||
|
/// We have `MIN_NUM_PAGES <= N <= MAX_PAGE_INDEX + 1`.
|
||||||
|
pub fn num_pages(&self) -> usize {
|
||||||
|
self.num_pages
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximum page index.
|
||||||
|
///
|
||||||
|
/// We have `2 <= self.max_page() <= MAX_PAGE_INDEX`.
|
||||||
|
pub fn max_page(&self) -> usize {
|
||||||
|
self.num_pages - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximum number of times a page can be erased, denoted by `E`.
|
||||||
|
///
|
||||||
|
/// We have `E <= MAX_ERASE_CYCLE`.
|
||||||
|
pub fn max_page_erases(&self) -> usize {
|
||||||
|
self.max_page_erases
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximum key.
|
||||||
|
pub fn max_key(&self) -> usize {
|
||||||
|
MAX_KEY_INDEX
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximum number of updates per transaction.
|
||||||
|
pub fn max_updates(&self) -> usize {
|
||||||
|
MAX_UPDATES
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The size of a virtual page in words, denoted by `Q`.
|
||||||
|
///
|
||||||
|
/// A virtual page is stored in a physical page after the page header.
|
||||||
|
///
|
||||||
|
/// We have `MIN_NUM_WORDS_PER_PAGE - 2 <= Q <= MAX_VIRT_PAGE_SIZE`.
|
||||||
|
pub fn virt_page_size(&self) -> usize {
|
||||||
|
self.page_size() / self.word_size() - CONTENT_WORD
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximum length in bytes of a user payload.
|
||||||
|
///
|
||||||
|
/// We have `(MIN_NUM_WORDS_PER_PAGE - 3) * self.word_size() <= self.max_value_len() <=
|
||||||
|
/// MAX_VALUE_LEN`.
|
||||||
|
pub fn max_value_len(&self) -> usize {
|
||||||
|
min(
|
||||||
|
(self.virt_page_size() - 1) * self.word_size(),
|
||||||
|
MAX_VALUE_LEN,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximum prefix length in words, denoted by `M`.
|
||||||
|
///
|
||||||
|
/// A prefix is the first words of a virtual page that belong to the last entry of the previous
|
||||||
|
/// virtual page. This happens because entries may overlap up to 2 virtual pages.
|
||||||
|
///
|
||||||
|
/// We have `MIN_NUM_WORDS_PER_PAGE - 3 <= M < Q`.
|
||||||
|
pub fn max_prefix_len(&self) -> usize {
|
||||||
|
self.bytes_to_words(self.max_value_len())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The total virtual capacity in words, denoted by `V`.
|
||||||
|
///
|
||||||
|
/// We have `V = (N - 1) * (Q - 1) - M`.
|
||||||
|
///
|
||||||
|
/// We can show `V >= (N - 2) * (Q - 1)` with the following steps:
|
||||||
|
/// - `M <= Q - 1` from `M < Q` from [`M`] definition
|
||||||
|
/// - `-M >= -(Q - 1)` from above
|
||||||
|
/// - `V >= (N - 1) * (Q - 1) - (Q - 1)` from `V` definition
|
||||||
|
///
|
||||||
|
/// [`M`]: struct.Format.html#method.max_prefix_len
|
||||||
|
pub fn virt_size(&self) -> usize {
|
||||||
|
(self.num_pages() - 1) * (self.virt_page_size() - 1) - self.max_prefix_len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The total user capacity in words, denoted by `C`.
|
||||||
|
///
|
||||||
|
/// We have `C = V - N = (N - 1) * (Q - 2) - M - 1`.
|
||||||
|
///
|
||||||
|
/// We can show `C >= (N - 2) * (Q - 2) - 2` with the following steps:
|
||||||
|
/// - `V >= (N - 2) * (Q - 1)` from [`V`] definition
|
||||||
|
/// - `C >= (N - 2) * (Q - 1) - N` from `C` definition
|
||||||
|
/// - `(N - 2) * (Q - 1) - N = (N - 2) * (Q - 2) - 2` by calculus
|
||||||
|
///
|
||||||
|
/// [`V`]: struct.Format.html#method.virt_size
|
||||||
|
pub fn total_capacity(&self) -> usize {
|
||||||
|
// From the virtual capacity, we reserve N - 1 words for `Erase` entries and 1 word for a
|
||||||
|
// `Clear` entry.
|
||||||
|
self.virt_size() - self.num_pages()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The total virtual lifetime in words, denoted by `L`.
|
||||||
|
///
|
||||||
|
/// We have `L = (E * N + N - 1) * Q`.
|
||||||
|
pub fn total_lifetime(&self) -> Position {
|
||||||
|
Position::new(self, self.max_page_erases(), self.num_pages() - 1, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the word position of the first entry of a page.
|
||||||
|
///
|
||||||
|
/// The init info of the page must be provided to know where the first entry of the page
|
||||||
|
/// starts.
|
||||||
|
pub fn page_head(&self, init: InitInfo, page: usize) -> Position {
|
||||||
|
Position::new(self, init.cycle, page, init.prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the storage index of the init info of a page.
|
||||||
|
pub fn index_init(&self, page: usize) -> StorageIndex {
|
||||||
|
let byte = INIT_WORD * self.word_size();
|
||||||
|
StorageIndex { page, byte }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parses the init info of a page from its storage representation.
|
||||||
|
pub fn parse_init(&self, word: WORD) -> StoreResult<WordState<InitInfo>> {
|
||||||
|
Ok(if word == ERASED_WORD {
|
||||||
|
WordState::Erased
|
||||||
|
} else if WORD_CHECKSUM.get(word)? != 0 {
|
||||||
|
WordState::Partial
|
||||||
|
} else {
|
||||||
|
let cycle = INIT_CYCLE.get(word);
|
||||||
|
let prefix = INIT_PREFIX.get(word);
|
||||||
|
if cycle > self.max_page_erases() || prefix > self.max_prefix_len() {
|
||||||
|
return Err(StoreError::InvalidStorage);
|
||||||
|
}
|
||||||
|
WordState::Valid(InitInfo { cycle, prefix })
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the storage representation of an init info.
|
||||||
|
pub fn build_init(&self, init: InitInfo) -> [u8; WORD_SIZE] {
|
||||||
|
let mut word = ERASED_WORD;
|
||||||
|
INIT_CYCLE.set(&mut word, init.cycle);
|
||||||
|
INIT_PREFIX.set(&mut word, init.prefix);
|
||||||
|
WORD_CHECKSUM.set(&mut word, 0);
|
||||||
|
word.to_ne_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the storage index of the compact info of a page.
|
||||||
|
pub fn index_compact(&self, page: usize) -> StorageIndex {
|
||||||
|
let byte = COMPACT_WORD * self.word_size();
|
||||||
|
StorageIndex { page, byte }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parses the compact info of a page from its storage representation.
|
||||||
|
pub fn parse_compact(&self, word: WORD) -> StoreResult<WordState<CompactInfo>> {
|
||||||
|
Ok(if word == ERASED_WORD {
|
||||||
|
WordState::Erased
|
||||||
|
} else if WORD_CHECKSUM.get(word)? != 0 {
|
||||||
|
WordState::Partial
|
||||||
|
} else {
|
||||||
|
let tail = COMPACT_TAIL.get(word);
|
||||||
|
if tail > self.virt_size() + self.max_prefix_len() {
|
||||||
|
return Err(StoreError::InvalidStorage);
|
||||||
|
}
|
||||||
|
WordState::Valid(CompactInfo { tail })
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the storage representation of a compact info.
|
||||||
|
pub fn build_compact(&self, compact: CompactInfo) -> [u8; WORD_SIZE] {
|
||||||
|
let mut word = ERASED_WORD;
|
||||||
|
COMPACT_TAIL.set(&mut word, compact.tail);
|
||||||
|
WORD_CHECKSUM.set(&mut word, 0);
|
||||||
|
word.to_ne_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the storage representation of an internal entry.
|
||||||
|
pub fn build_internal(&self, internal: InternalEntry) -> [u8; WORD_SIZE] {
|
||||||
|
let mut word = ERASED_WORD;
|
||||||
|
match internal {
|
||||||
|
InternalEntry::Erase { page } => {
|
||||||
|
ID_ERASE.set(&mut word);
|
||||||
|
ERASE_PAGE.set(&mut word, page);
|
||||||
|
}
|
||||||
|
InternalEntry::Clear { min_key } => {
|
||||||
|
ID_CLEAR.set(&mut word);
|
||||||
|
CLEAR_MIN_KEY.set(&mut word, min_key);
|
||||||
|
}
|
||||||
|
InternalEntry::Marker { count } => {
|
||||||
|
ID_MARKER.set(&mut word);
|
||||||
|
MARKER_COUNT.set(&mut word, count);
|
||||||
|
}
|
||||||
|
InternalEntry::Remove { key } => {
|
||||||
|
ID_REMOVE.set(&mut word);
|
||||||
|
REMOVE_KEY.set(&mut word, key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
WORD_CHECKSUM.set(&mut word, 0);
|
||||||
|
word.to_ne_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parses the first word of an entry from its storage representation.
|
||||||
|
pub fn parse_word(&self, word: WORD) -> StoreResult<WordState<ParsedWord>> {
|
||||||
|
let valid = if ID_PADDING.check(word) {
|
||||||
|
ParsedWord::Padding(Padding { length: 0 })
|
||||||
|
} else if ID_HEADER.check(word) {
|
||||||
|
if HEADER_DELETED.get(word) {
|
||||||
|
let length = HEADER_LENGTH.get(word);
|
||||||
|
if length > self.max_value_len() {
|
||||||
|
return Err(StoreError::InvalidStorage);
|
||||||
|
}
|
||||||
|
let length = self.bytes_to_words(length);
|
||||||
|
ParsedWord::Padding(Padding { length })
|
||||||
|
} else {
|
||||||
|
let flipped = HEADER_FLIPPED.get(word);
|
||||||
|
let length = HEADER_LENGTH.get(word);
|
||||||
|
let key = HEADER_KEY.get(word);
|
||||||
|
let checksum = HEADER_CHECKSUM.get(word)?;
|
||||||
|
ParsedWord::Header(Header {
|
||||||
|
flipped,
|
||||||
|
length,
|
||||||
|
key,
|
||||||
|
checksum,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
} else if ID_ERASE.check(word) {
|
||||||
|
let page = ERASE_PAGE.get(word);
|
||||||
|
ParsedWord::Internal(InternalEntry::Erase { page })
|
||||||
|
} else if ID_CLEAR.check(word) {
|
||||||
|
let min_key = CLEAR_MIN_KEY.get(word);
|
||||||
|
ParsedWord::Internal(InternalEntry::Clear { min_key })
|
||||||
|
} else if ID_MARKER.check(word) {
|
||||||
|
let count = MARKER_COUNT.get(word);
|
||||||
|
ParsedWord::Internal(InternalEntry::Marker { count })
|
||||||
|
} else if ID_REMOVE.check(word) {
|
||||||
|
let key = REMOVE_KEY.get(word);
|
||||||
|
ParsedWord::Internal(InternalEntry::Remove { key })
|
||||||
|
} else if word == ERASED_WORD {
|
||||||
|
return Ok(WordState::Erased);
|
||||||
|
} else {
|
||||||
|
return Ok(WordState::Partial);
|
||||||
|
};
|
||||||
|
if let ParsedWord::Internal(internal) = &valid {
|
||||||
|
if WORD_CHECKSUM.get(word)? != 0 {
|
||||||
|
return Ok(WordState::Partial);
|
||||||
|
}
|
||||||
|
let invalid = match internal {
|
||||||
|
InternalEntry::Erase { page } => *page > self.max_page(),
|
||||||
|
InternalEntry::Clear { min_key } => *min_key > self.max_key(),
|
||||||
|
InternalEntry::Marker { count } => *count > MAX_UPDATES,
|
||||||
|
InternalEntry::Remove { key } => *key > self.max_key(),
|
||||||
|
};
|
||||||
|
if invalid {
|
||||||
|
return Err(StoreError::InvalidStorage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(WordState::Valid(valid))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builds the storage representation of a user entry.
|
||||||
|
pub fn build_user(&self, key: usize, value: &[u8]) -> Vec<u8> {
|
||||||
|
let length = value.len();
|
||||||
|
let word_size = self.word_size();
|
||||||
|
let footer = self.bytes_to_words(length);
|
||||||
|
let mut result = vec![0xff; (1 + footer) * word_size];
|
||||||
|
result[word_size..][..length].copy_from_slice(value);
|
||||||
|
let mut word = ERASED_WORD;
|
||||||
|
ID_HEADER.set(&mut word);
|
||||||
|
if footer > 0 && is_erased(&result[footer * word_size..]) {
|
||||||
|
HEADER_FLIPPED.set(&mut word);
|
||||||
|
*result.last_mut().unwrap() = 0x7f;
|
||||||
|
}
|
||||||
|
HEADER_LENGTH.set(&mut word, length);
|
||||||
|
HEADER_KEY.set(&mut word, key);
|
||||||
|
HEADER_CHECKSUM.set(&mut word, count_zeros(&result[footer * word_size..]));
|
||||||
|
result[..word_size].copy_from_slice(&word.to_ne_bytes());
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the padding bit in the first word of a user entry.
|
||||||
|
pub fn set_padding(&self, word: &mut WORD) {
|
||||||
|
ID_PADDING.set(word);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the deleted bit in the first word of a user entry.
|
||||||
|
pub fn set_deleted(&self, word: &mut WORD) {
|
||||||
|
HEADER_DELETED.set(word);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the minimum number of words to represent a given number of bytes.
|
||||||
|
///
|
||||||
|
/// # Preconditions
|
||||||
|
///
|
||||||
|
/// - `bytes + self.word_size()` does not overflow.
|
||||||
|
pub fn bytes_to_words(&self, bytes: usize) -> usize {
|
||||||
|
div_ceil(bytes, self.word_size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The word index of the init info in a page.
|
||||||
|
const INIT_WORD: usize = 0;
|
||||||
|
|
||||||
|
/// The word index of the compact info in a page.
|
||||||
|
const COMPACT_WORD: usize = 1;
|
||||||
|
|
||||||
|
/// The word index of the content of a page.
|
||||||
|
///
|
||||||
|
/// Since a page is at least 8 words, there is always at least 6 words of content.
|
||||||
|
const CONTENT_WORD: usize = 2;
|
||||||
|
|
||||||
|
/// The checksum for a single word.
|
||||||
|
///
|
||||||
|
/// Since checksums are the number of bits set to zero and a word is 32 bits, we need 5 bits to
|
||||||
|
/// store numbers between 0 and 27 (which is 32 - 5).
|
||||||
|
const WORD_CHECKSUM: Checksum = Checksum {
|
||||||
|
field: Field { pos: 27, len: 5 },
|
||||||
|
};
|
||||||
|
|
||||||
|
// The fields of the init info of a page.
|
||||||
|
bitfield! {
|
||||||
|
/// The number of times the page has been erased.
|
||||||
|
INIT_CYCLE: Field <= MAX_ERASE_CYCLE,
|
||||||
|
|
||||||
|
/// The word index of the first entry in this virtual page.
|
||||||
|
INIT_PREFIX: Field <= div_ceil(MAX_VALUE_LEN, WORD_SIZE),
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
LEN_INIT: Length,
|
||||||
|
}
|
||||||
|
|
||||||
|
// The fields of the compact info of a page.
|
||||||
|
bitfield! {
|
||||||
|
/// The distance in words between head and tail at compaction.
|
||||||
|
///
|
||||||
|
/// In particular, compaction copies non-deleted user entries from the head to the tail as long
|
||||||
|
/// as entries span the page to be compacted.
|
||||||
|
COMPACT_TAIL: Field <= MAX_VIRT_PAGE_SIZE * MAX_PAGE_INDEX,
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
LEN_COMPACT: Length,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overview of the first word of the different kind of entries.
|
||||||
|
//
|
||||||
|
// Each column represents a bit of the word. The first 2 lines give the position in hexadecimal of
|
||||||
|
// the bit in the word (the exponent of 2 when the word is written in binary). Each entry starts
|
||||||
|
// with the sequence of bits of its identifier. The dots following the identifier are the number of
|
||||||
|
// bits necessary to hold the information of the entry (including the checksum). The remaining free
|
||||||
|
// bits after the dots are not used by the entry.
|
||||||
|
//
|
||||||
|
// 0 1
|
||||||
|
// 0123456789abcdef0123456789abcdef
|
||||||
|
// padding 0
|
||||||
|
// header 10..............................
|
||||||
|
// erase 11000...........
|
||||||
|
// clear 11001.................
|
||||||
|
// marker 11010..........
|
||||||
|
// remove 11011.................
|
||||||
|
//
|
||||||
|
// NOTE: We could pad the internal entries to the right by extending their identifier. This permits
|
||||||
|
// to free some space for shorter identifier for future kind of entries.
|
||||||
|
|
||||||
|
// The fields of a padding entry.
|
||||||
|
bitfield! {
|
||||||
|
/// The identifier for padding entries.
|
||||||
|
ID_PADDING: ConstField = [0],
|
||||||
|
}
|
||||||
|
|
||||||
|
// The fields of a user entry.
|
||||||
|
bitfield! {
|
||||||
|
/// The identifier for user entries.
|
||||||
|
ID_HEADER: ConstField = [1 0],
|
||||||
|
|
||||||
|
/// Whether the user entry is deleted.
|
||||||
|
HEADER_DELETED: Bit,
|
||||||
|
|
||||||
|
/// Whether the last bit of the user data is flipped.
|
||||||
|
HEADER_FLIPPED: Bit,
|
||||||
|
|
||||||
|
/// The length in bytes of the user data.
|
||||||
|
// NOTE: It is possible to support values of length 1024 by having a separate kind of entries
|
||||||
|
// when the value is empty. We could then subtract one from the length here.
|
||||||
|
HEADER_LENGTH: Field <= MAX_VALUE_LEN,
|
||||||
|
|
||||||
|
/// The key of the user entry.
|
||||||
|
HEADER_KEY: Field <= MAX_KEY_INDEX,
|
||||||
|
|
||||||
|
/// The checksum of the user entry.
|
||||||
|
///
|
||||||
|
/// This counts the number of bits set to zero in both the first and last words of the user
|
||||||
|
/// entry, except in the checksum itself. So it needs 6 bits to store numbers between 0 and 58.
|
||||||
|
// NOTE: It may be possible to save one bit by storing:
|
||||||
|
// - the footer checksum (as a field) if the value is not empty
|
||||||
|
// - the header checksum (as a checksum) if the value is empty
|
||||||
|
HEADER_CHECKSUM: Checksum <= 58,
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
LEN_HEADER: Length,
|
||||||
|
}
|
||||||
|
|
||||||
|
// The fields of an erase entry.
|
||||||
|
bitfield! {
|
||||||
|
/// The identifier for erase entries.
|
||||||
|
ID_ERASE: ConstField = [1 1 0 0 0],
|
||||||
|
|
||||||
|
/// The page to be erased.
|
||||||
|
ERASE_PAGE: Field <= MAX_PAGE_INDEX,
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
LEN_ERASE: Length,
|
||||||
|
}
|
||||||
|
|
||||||
|
// The fields of a clear entry.
|
||||||
|
bitfield! {
|
||||||
|
/// The identifier for clear entries.
|
||||||
|
ID_CLEAR: ConstField = [1 1 0 0 1],
|
||||||
|
|
||||||
|
/// The minimum key to be cleared.
|
||||||
|
///
|
||||||
|
/// All entries with a key below this limit are not cleared. All other entries are deleted.
|
||||||
|
CLEAR_MIN_KEY: Field <= MAX_KEY_INDEX,
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
LEN_CLEAR: Length,
|
||||||
|
}
|
||||||
|
|
||||||
|
// The fields of a marker entry.
|
||||||
|
bitfield! {
|
||||||
|
/// The identifier for marker entries.
|
||||||
|
ID_MARKER: ConstField = [1 1 0 1 0],
|
||||||
|
|
||||||
|
/// The number of updates in this transaction.
|
||||||
|
///
|
||||||
|
/// The update entries follow this marker entry.
|
||||||
|
MARKER_COUNT: Field <= MAX_UPDATES,
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
LEN_MARKER: Length,
|
||||||
|
}
|
||||||
|
|
||||||
|
// The fields of a remove entry.
|
||||||
|
bitfield! {
|
||||||
|
/// The identifier for remove entries.
|
||||||
|
ID_REMOVE: ConstField = [1 1 0 1 1],
|
||||||
|
|
||||||
|
/// The key of the user entry to be removed.
|
||||||
|
REMOVE_KEY: Field <= MAX_KEY_INDEX,
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
LEN_REMOVE: Length,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The position of a word in the virtual storage.
|
||||||
|
///
|
||||||
|
/// With the notations defined in `Format`, let:
|
||||||
|
/// - `w` a virtual word offset in a page which is between `0` and `Q - 1`
|
||||||
|
/// - `p` a page offset which is between `0` and `N - 1`
|
||||||
|
/// - `c` the number of erase cycles of a page which is between `0` and `E`
|
||||||
|
///
|
||||||
|
/// Then the position of a word is `(c*N + p)*Q + w`. This position monotonically increases and
|
||||||
|
/// represents the consumed lifetime of the storage.
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct Position(usize);
|
||||||
|
|
||||||
|
impl core::ops::Add<usize> for Position {
|
||||||
|
type Output = Position;
|
||||||
|
|
||||||
|
fn add(self, delta: usize) -> Position {
|
||||||
|
Position(self.0 + delta)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl core::ops::Sub<Position> for Position {
|
||||||
|
type Output = usize;
|
||||||
|
|
||||||
|
fn sub(self, base: Position) -> usize {
|
||||||
|
self.0 - base.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl core::ops::AddAssign<usize> for Position {
|
||||||
|
fn add_assign(&mut self, delta: usize) {
|
||||||
|
self.0 += delta;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Position {
|
||||||
|
/// Create a word position given its coordinates.
|
||||||
|
///
|
||||||
|
/// The coordinates of a word are:
|
||||||
|
/// - Its word index in its page.
|
||||||
|
/// - Its page index in the storage.
|
||||||
|
/// - The number of times that page was erased.
|
||||||
|
pub fn new(format: &Format, cycle: usize, page: usize, word: usize) -> Position {
|
||||||
|
Position((cycle * format.num_pages() + page) * format.virt_page_size() + word)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accesses the underlying position as a natural number.
|
||||||
|
pub fn get(self) -> usize {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the associated storage index.
|
||||||
|
pub fn index(self, format: &Format) -> StorageIndex {
|
||||||
|
let page = self.page(format);
|
||||||
|
let word = CONTENT_WORD + self.word(format);
|
||||||
|
let byte = word * format.word_size();
|
||||||
|
StorageIndex { page, byte }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the beginning of the current virtual page.
|
||||||
|
pub fn page_begin(self, format: &Format) -> Position {
|
||||||
|
let virt_page_size = format.virt_page_size();
|
||||||
|
Position((self.0 / virt_page_size) * virt_page_size)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the beginning of the next virtual page.
|
||||||
|
pub fn next_page(self, format: &Format) -> Position {
|
||||||
|
let virt_page_size = format.virt_page_size();
|
||||||
|
Position((self.0 / virt_page_size + 1) * virt_page_size)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of times the current page was erased.
|
||||||
|
pub fn cycle(self, format: &Format) -> usize {
|
||||||
|
(self.0 / format.virt_page_size()) / format.num_pages()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the current page index.
|
||||||
|
pub fn page(self, format: &Format) -> usize {
|
||||||
|
(self.0 / format.virt_page_size()) % format.num_pages()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the current word index in the page.
|
||||||
|
pub fn word(self, format: &Format) -> usize {
|
||||||
|
self.0 % format.virt_page_size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Possible states of some storage representation as a word.
|
||||||
|
pub enum WordState<T> {
|
||||||
|
/// The word is still erased.
|
||||||
|
Erased,
|
||||||
|
|
||||||
|
/// The word is partially written.
|
||||||
|
Partial,
|
||||||
|
|
||||||
|
/// Holds the decoded version of a valid word.
|
||||||
|
Valid(T),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information for an initialized page.
|
||||||
|
pub struct InitInfo {
|
||||||
|
/// The number of times this page has been erased.
|
||||||
|
pub cycle: usize,
|
||||||
|
|
||||||
|
/// The word index of the first entry in this virtual page.
|
||||||
|
pub prefix: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information for a page being compacted.
|
||||||
|
pub struct CompactInfo {
|
||||||
|
/// The distance in words between head and tail at compaction.
|
||||||
|
pub tail: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The first word of an entry.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum ParsedWord {
|
||||||
|
/// Padding entry.
|
||||||
|
Padding(Padding),
|
||||||
|
|
||||||
|
/// Header of a user entry.
|
||||||
|
Header(Header),
|
||||||
|
|
||||||
|
/// Internal entry.
|
||||||
|
Internal(InternalEntry),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Padding entry.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Padding {
|
||||||
|
/// The number of following padding words after the first word of the padding entry.
|
||||||
|
pub length: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Header of a user entry.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Header {
|
||||||
|
/// Whether the last bit of the user data is flipped.
|
||||||
|
pub flipped: bool,
|
||||||
|
|
||||||
|
/// The length in bytes of the user data.
|
||||||
|
pub length: usize,
|
||||||
|
|
||||||
|
/// The key of the user entry.
|
||||||
|
pub key: usize,
|
||||||
|
|
||||||
|
/// The checksum of the user entry.
|
||||||
|
pub checksum: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Header {
|
||||||
|
/// Checks the validity of a user entry.
|
||||||
|
///
|
||||||
|
/// If the user entry has no payload, the `footer` must be set to `None`. Otherwise it should be
|
||||||
|
/// the last word of the entry.
|
||||||
|
pub fn check(&self, footer: Option<&[u8]>) -> bool {
|
||||||
|
footer.map_or(0, count_zeros) == self.checksum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal entry.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum InternalEntry {
|
||||||
|
/// Indicates that a page should be erased.
|
||||||
|
Erase {
|
||||||
|
/// The page to be erased.
|
||||||
|
page: usize,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Indicates that user entries with high key should be deleted.
|
||||||
|
Clear {
|
||||||
|
/// The minimum key a user entry should have to be deleted.
|
||||||
|
min_key: usize,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Marks the start of a transaction.
|
||||||
|
///
|
||||||
|
/// The marker is followed by a given number of updates, which are either user entries or remove
|
||||||
|
/// entries.
|
||||||
|
Marker {
|
||||||
|
/// The number of updates in the transaction.
|
||||||
|
count: usize,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Indicates that a user entry should be removed.
|
||||||
|
///
|
||||||
|
/// This is only useful (and valid) as part of a transaction, since removing a single entry is
|
||||||
|
/// already atomic.
|
||||||
|
Remove {
|
||||||
|
/// The key of the user entry to be removed.
|
||||||
|
key: usize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether a slice has all bits equal to one.
|
||||||
|
pub fn is_erased(slice: &[u8]) -> bool {
|
||||||
|
slice.iter().all(|&x| x == 0xff)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Divides then takes ceiling.
|
||||||
|
///
|
||||||
|
/// Returns `ceil(x / m)` in mathematical notations (not Rust code).
|
||||||
|
///
|
||||||
|
/// # Preconditions
|
||||||
|
///
|
||||||
|
/// - `x + m` does not overflow.
|
||||||
|
const fn div_ceil(x: usize, m: usize) -> usize {
|
||||||
|
(x + m - 1) / m
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn size_of_format() {
|
||||||
|
assert_eq!(std::mem::size_of::<Format>(), 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn checksum_ok() {
|
||||||
|
let Field { pos, len } = WORD_CHECKSUM.field;
|
||||||
|
// There is enough bits to represents the number of zeros preceding the checksum.
|
||||||
|
assert_eq!(len, num_bits(pos));
|
||||||
|
// The checksum is the last field of a word.
|
||||||
|
assert_eq!(pos + len, 8 * WORD_SIZE);
|
||||||
|
// The data of words using the checksum don't overlap the checksum.
|
||||||
|
let words = &[
|
||||||
|
&LEN_INIT,
|
||||||
|
&LEN_COMPACT,
|
||||||
|
&LEN_ERASE,
|
||||||
|
&LEN_CLEAR,
|
||||||
|
&LEN_MARKER,
|
||||||
|
&LEN_REMOVE,
|
||||||
|
];
|
||||||
|
for word in words {
|
||||||
|
assert!(word.pos < pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn init_ok() {
|
||||||
|
assert_eq!(INIT_CYCLE.pos, 0);
|
||||||
|
assert_eq!(INIT_CYCLE.len, 16);
|
||||||
|
assert_eq!(INIT_PREFIX.pos, 16);
|
||||||
|
assert_eq!(INIT_PREFIX.len, 9);
|
||||||
|
assert_eq!(LEN_INIT.pos, 25);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn compact_ok() {
|
||||||
|
assert_eq!(COMPACT_TAIL.pos, 0);
|
||||||
|
assert_eq!(COMPACT_TAIL.len, 16);
|
||||||
|
assert_eq!(LEN_COMPACT.pos, 16);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn header_ok() {
|
||||||
|
assert_eq!(ID_HEADER.field.pos, 0);
|
||||||
|
assert_eq!(ID_HEADER.field.len, 2);
|
||||||
|
assert_eq!(ID_HEADER.value, 0b01);
|
||||||
|
assert_eq!(HEADER_DELETED.pos, 2);
|
||||||
|
assert_eq!(HEADER_FLIPPED.pos, 3);
|
||||||
|
assert_eq!(HEADER_LENGTH.pos, 4);
|
||||||
|
assert_eq!(HEADER_LENGTH.len, 10);
|
||||||
|
assert_eq!(HEADER_KEY.pos, 14);
|
||||||
|
assert_eq!(HEADER_KEY.len, 12);
|
||||||
|
assert_eq!(HEADER_CHECKSUM.field.pos, 26);
|
||||||
|
assert_eq!(HEADER_CHECKSUM.field.len, 6);
|
||||||
|
assert_eq!(LEN_HEADER.pos, 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn erase_ok() {
|
||||||
|
assert_eq!(ID_ERASE.field.pos, 0);
|
||||||
|
assert_eq!(ID_ERASE.field.len, 5);
|
||||||
|
assert_eq!(ID_ERASE.value, 0b00011);
|
||||||
|
assert_eq!(ERASE_PAGE.pos, 5);
|
||||||
|
assert_eq!(ERASE_PAGE.len, 6);
|
||||||
|
assert_eq!(LEN_ERASE.pos, 11);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn clear_ok() {
|
||||||
|
assert_eq!(ID_CLEAR.field.pos, 0);
|
||||||
|
assert_eq!(ID_CLEAR.field.len, 5);
|
||||||
|
assert_eq!(ID_CLEAR.value, 0b10011);
|
||||||
|
assert_eq!(CLEAR_MIN_KEY.pos, 5);
|
||||||
|
assert_eq!(CLEAR_MIN_KEY.len, 12);
|
||||||
|
assert_eq!(LEN_CLEAR.pos, 17);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn marker_ok() {
|
||||||
|
assert_eq!(ID_MARKER.field.pos, 0);
|
||||||
|
assert_eq!(ID_MARKER.field.len, 5);
|
||||||
|
assert_eq!(ID_MARKER.value, 0b01011);
|
||||||
|
assert_eq!(MARKER_COUNT.pos, 5);
|
||||||
|
assert_eq!(MARKER_COUNT.len, 5);
|
||||||
|
assert_eq!(LEN_MARKER.pos, 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn remove_ok() {
|
||||||
|
assert_eq!(ID_REMOVE.field.pos, 0);
|
||||||
|
assert_eq!(ID_REMOVE.field.len, 5);
|
||||||
|
assert_eq!(ID_REMOVE.value, 0b11011);
|
||||||
|
assert_eq!(REMOVE_KEY.pos, 5);
|
||||||
|
assert_eq!(REMOVE_KEY.len, 12);
|
||||||
|
assert_eq!(LEN_REMOVE.pos, 17);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn is_erased_ok() {
|
||||||
|
assert!(is_erased(&[]));
|
||||||
|
assert!(is_erased(&[0xff]));
|
||||||
|
assert!(is_erased(&[0xff, 0xff]));
|
||||||
|
assert!(!is_erased(&[0x00]));
|
||||||
|
assert!(!is_erased(&[0xff, 0xfe]));
|
||||||
|
assert!(!is_erased(&[0x7f, 0xff]));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn div_ceil_ok() {
|
||||||
|
assert_eq!(div_ceil(0, 1), 0);
|
||||||
|
assert_eq!(div_ceil(1, 1), 1);
|
||||||
|
assert_eq!(div_ceil(2, 1), 2);
|
||||||
|
assert_eq!(div_ceil(0, 2), 0);
|
||||||
|
assert_eq!(div_ceil(1, 2), 1);
|
||||||
|
assert_eq!(div_ceil(2, 2), 1);
|
||||||
|
assert_eq!(div_ceil(3, 2), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn positions_fit_in_a_word() {
|
||||||
|
// All reachable positions are smaller than this value, which is one past the last position.
|
||||||
|
// It is simply the total number of virtual words, i.e. the number of words per virtual page
|
||||||
|
// times the number of virtual pages times the number of times a virtual page can be used
|
||||||
|
// (one more than the number of times it can be erased since we can write before the first
|
||||||
|
// erase cycle and after the last erase cycle).
|
||||||
|
assert_eq!(
|
||||||
|
(MAX_ERASE_CYCLE + 1) * (MAX_PAGE_INDEX + 1) * MAX_VIRT_PAGE_SIZE,
|
||||||
|
0xff800000
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,12 +12,353 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// TODO(ia0): Add links once the code is complete.
|
||||||
|
//! Store abstraction for flash storage
|
||||||
|
//!
|
||||||
|
//! # Specification
|
||||||
|
//!
|
||||||
|
//! The store provides a partial function from keys to values on top of a storage
|
||||||
|
//! interface. The store total capacity depends on the size of the storage. Store
|
||||||
|
//! updates may be bundled in transactions. Mutable operations are atomic, including
|
||||||
|
//! when interrupted.
|
||||||
|
//!
|
||||||
|
//! The store is flash-efficient in the sense that it uses the storage lifetime
|
||||||
|
//! efficiently. For each page, all words are written at least once between erase
|
||||||
|
//! cycles and all erase cycles are used. However, not all written words are user
|
||||||
|
//! content: lifetime is also consumed with metadata and compaction.
|
||||||
|
//!
|
||||||
|
//! The store is extendable with other entries than key-values. It is essentially a
|
||||||
|
//! framework providing access to the storage lifetime. The partial function is
|
||||||
|
//! simply the most common usage and can be used to encode other usages.
|
||||||
|
//!
|
||||||
|
//! ## Definitions
|
||||||
|
//!
|
||||||
|
//! An _entry_ is a pair of a key and a value. A _key_ is a number between 0
|
||||||
|
//! and 4095. A _value_ is a byte slice with a length between 0 and 1023 bytes (for
|
||||||
|
//! large enough pages).
|
||||||
|
//!
|
||||||
|
//! The store provides the following _updates_:
|
||||||
|
//! - Given a key and a value, `Insert` updates the store such that the value is
|
||||||
|
//! associated with the key. The values for other keys are left unchanged.
|
||||||
|
//! - Given a key, `Remove` updates the store such that no value is associated with
|
||||||
|
//! the key. The values for other keys are left unchanged. Additionally, if there
|
||||||
|
//! was a value associated with the key, the value is wiped from the storage
|
||||||
|
//! (all its bits are set to 0).
|
||||||
|
//!
|
||||||
|
//! The store provides the following _read-only operations_:
|
||||||
|
//! - `Iter` iterates through the store returning all entries exactly once. The
|
||||||
|
//! iteration order is not specified but stable between mutable operations.
|
||||||
|
//! - `Capacity` returns how many words can be stored before the store is full.
|
||||||
|
//! - `Lifetime` returns how many words can be written before the storage lifetime
|
||||||
|
//! is consumed.
|
||||||
|
//!
|
||||||
|
//! The store provides the following _mutable operations_:
|
||||||
|
//! - Given a set of independent updates, `Transaction` applies the sequence of
|
||||||
|
//! updates.
|
||||||
|
//! - Given a threshold, `Clear` removes all entries with a key greater or equal
|
||||||
|
//! to the threshold.
|
||||||
|
//! - Given a length in words, `Prepare` makes one step of compaction unless that
|
||||||
|
//! many words can be written without compaction. This operation has no effect
|
||||||
|
//! on the store but may still mutate its storage. In particular, the store has
|
||||||
|
//! the same capacity but a possibly reduced lifetime.
|
||||||
|
//!
|
||||||
|
//! A mutable operation is _atomic_ if, when power is lost during the operation, the
|
||||||
|
//! store is either updated (as if the operation succeeded) or left unchanged (as if
|
||||||
|
//! the operation did not occur). If the store is left unchanged, lifetime may still
|
||||||
|
//! be consumed.
|
||||||
|
//!
|
||||||
|
//! The store relies on the following _storage interface_:
|
||||||
|
//! - It is possible to read a byte slice. The slice won't span multiple pages.
|
||||||
|
//! - It is possible to write a word slice. The slice won't span multiple pages.
|
||||||
|
//! - It is possible to erase a page.
|
||||||
|
//! - The pages are sequentially indexed from 0. If the actual underlying storage
|
||||||
|
//! is segmented, then the storage layer should translate those indices to
|
||||||
|
//! actual page addresses.
|
||||||
|
//!
|
||||||
|
//! The store has a _total capacity_ of `C = (N - 1) * (P - 4) - M - 1` words, where
|
||||||
|
//! `P` is the number of words per page, `N` is the number of pages, and `M` is the
|
||||||
|
//! maximum length in words of a value (256 for large enough pages). The capacity
|
||||||
|
//! used by each mutable operation is given below (a transient word only uses
|
||||||
|
//! capacity during the operation):
|
||||||
|
//! - `Insert` uses `1 + ceil(len / 4)` words where `len` is the length of the
|
||||||
|
//! value in bytes. If an entry was replaced, the words used by its insertion
|
||||||
|
//! are freed.
|
||||||
|
//! - `Remove` doesn't use capacity if alone in the transaction and 1 transient
|
||||||
|
//! word otherwise. If an entry was deleted, the words used by its insertion are
|
||||||
|
//! freed.
|
||||||
|
//! - `Transaction` uses 1 transient word. In addition, the updates of the
|
||||||
|
//! transaction use and free words as described above.
|
||||||
|
//! - `Clear` doesn't use capacity and frees the words used by the insertion of
|
||||||
|
//! the deleted entries.
|
||||||
|
//! - `Prepare` doesn't use capacity.
|
||||||
|
//!
|
||||||
|
//! The _total lifetime_ of the store is below `L = ((E + 1) * N - 1) * (P - 2)` and
|
||||||
|
//! above `L - M` words, where `E` is the maximum number of erase cycles. The
|
||||||
|
//! lifetime is used when capacity is used, including transiently, as well as when
|
||||||
|
//! compaction occurs. Compaction frequency and lifetime consumption are positively
|
||||||
|
//! correlated to the store load factor (the ratio of used capacity to total capacity).
|
||||||
|
//!
|
||||||
|
//! It is possible to approximate the cost of transient words in terms of capacity:
|
||||||
|
//! `L` transient words are equivalent to `C - x` words of capacity where `x` is the
|
||||||
|
//! average capacity (including transient) of operations.
|
||||||
|
//!
|
||||||
|
//! ## Preconditions
|
||||||
|
//!
|
||||||
|
//! The following assumptions need to hold, or the store may behave in unexpected ways:
|
||||||
|
//! - A word can be written twice between erase cycles.
|
||||||
|
//! - A page can be erased `E` times after the first boot of the store.
|
||||||
|
//! - When power is lost while writing a slice or erasing a page, the next read
|
||||||
|
//! returns a slice where a subset (possibly none or all) of the bits that
|
||||||
|
//! should have been modified have been modified.
|
||||||
|
//! - Reading a slice is deterministic. When power is lost while writing a slice
|
||||||
|
//! or erasing a slice (erasing a page containing that slice), reading that
|
||||||
|
//! slice repeatedly returns the same result (until it is overwritten or its
|
||||||
|
//! page is erased).
|
||||||
|
//! - To decide whether a page has been erased, it is enough to test if all its
|
||||||
|
//! bits are equal to 1.
|
||||||
|
//! - When power is lost while writing a slice or erasing a page, that operation
|
||||||
|
//! does not count towards the limits. However, completing that write or erase
|
||||||
|
//! operation would count towards the limits, as if the number of writes per
|
||||||
|
//! word and number of erase cycles could be fractional.
|
||||||
|
//! - The storage is only modified by the store. Note that completely erasing the
|
||||||
|
//! storage is supported, essentially losing all content and lifetime tracking.
|
||||||
|
//! It is preferred to use `Clear` with a threshold of 0 to keep the lifetime
|
||||||
|
//! tracking.
|
||||||
|
//!
|
||||||
|
//! The store properties may still hold outside some of those assumptions, but with
|
||||||
|
//! an increasing chance of failure.
|
||||||
|
//!
|
||||||
|
//! # Implementation
|
||||||
|
//!
|
||||||
|
//! We define the following constants:
|
||||||
|
//! - `E < 65536` the number of times a page can be erased.
|
||||||
|
//! - `3 <= N < 64` the number of pages in the storage.
|
||||||
|
//! - `8 <= P <= 1024` the number of words in a page.
|
||||||
|
//! - `Q = P - 2` the number of words in a virtual page.
|
||||||
|
//! - `K = 4096` the maximum number of keys.
|
||||||
|
//! - `M = min(Q - 1, 256)` the maximum length in words of a value.
|
||||||
|
//! - `V = (N - 1) * (Q - 1) - M` the virtual capacity.
|
||||||
|
//! - `C = V - N` the user capacity.
|
||||||
|
//!
|
||||||
|
//! We build a virtual storage from the physical storage using the first 2 words of
|
||||||
|
//! each page:
|
||||||
|
//! - The first word contains the number of times the page has been erased.
|
||||||
|
//! - The second word contains the starting word to which this page is being moved
|
||||||
|
//! during compaction.
|
||||||
|
//!
|
||||||
|
//! The virtual storage has a length of `(E + 1) * N * Q` words and represents the
|
||||||
|
//! lifetime of the store. (We reserve the last `Q + M` words to support adding
|
||||||
|
//! emergency lifetime.) This virtual storage has a linear address space.
|
||||||
|
//!
|
||||||
|
//! We define a set of overlapping windows of `N * Q` words at each `Q`-aligned
|
||||||
|
//! boundary. We call `i` the window spanning from `i * Q` to `(i + N) * Q`. Only
|
||||||
|
//! those windows actually exist in the underlying storage. We use compaction to
|
||||||
|
//! shift the current window from `i` to `i + 1`, preserving the content of the
|
||||||
|
//! store.
|
||||||
|
//!
|
||||||
|
//! For a given state of the virtual storage, we define `h_i` as the position of the
|
||||||
|
//! first entry of the window `i`. We call it the head of the window `i`. Because
|
||||||
|
//! entries are at most `M + 1` words, they can overlap on the next page only by `M`
|
||||||
|
//! words. So we have `i * Q <= h_i <= i * Q + M` . Since there are no entries
|
||||||
|
//! before the first page, we have `h_0 = 0`.
|
||||||
|
//!
|
||||||
|
//! We define `t_i` as one past the last entry of the window `i`. If there are no
|
||||||
|
//! entries in that window, we have `t_i = h_i`. We call `t_i` the tail of the
|
||||||
|
//! window `i`. We define the compaction invariant as `t_i - h_i <= V`.
|
||||||
|
//!
|
||||||
|
//! We define `|x|` as the capacity used before position `x`. We have `|x| <= x`. We
|
||||||
|
//! define the capacity invariant as `|t_i| - |h_i| <= C`.
|
||||||
|
//!
|
||||||
|
//! Using this virtual storage, entries are appended to the tail as long as there is
|
||||||
|
//! both virtual capacity to preserve the compaction invariant and capacity to
|
||||||
|
//! preserve the capacity invariant. When virtual capacity runs out, the first page
|
||||||
|
//! of the window is compacted and the window is shifted.
|
||||||
|
//!
|
||||||
|
//! Entries are identified by a prefix of bits. The prefix has to contain at least
|
||||||
|
//! one bit set to zero to differentiate from the tail. Entries can be one of:
|
||||||
|
//! - Padding: A word whose first bit is set to zero. The rest is arbitrary. This
|
||||||
|
//! entry is used to mark words partially written after an interrupted operation
|
||||||
|
//! as padding such that they are ignored by future operations.
|
||||||
|
//! - Header: A word whose second bit is set to zero. It contains the following fields:
|
||||||
|
//! - A bit indicating whether the entry is deleted.
|
||||||
|
//! - A bit indicating whether the value is word-aligned and has all bits set
|
||||||
|
//! to 1 in its last word. The last word of an entry is used to detect that
|
||||||
|
//! an entry has been fully written. As such it must contain at least one
|
||||||
|
//! bit equal to zero.
|
||||||
|
//! - The key of the entry.
|
||||||
|
//! - The length in bytes of the value. The value follows the header. The
|
||||||
|
//! entry is word-aligned if the value is not.
|
||||||
|
//! - The checksum of the first and last word of the entry.
|
||||||
|
//! - Erase: A word used during compaction. It contains the page to be erased and
|
||||||
|
//! a checksum.
|
||||||
|
//! - Clear: A word used during the `Clear` operation. It contains the threshold
|
||||||
|
//! and a checksum.
|
||||||
|
//! - Marker: A word used during the `Transaction` operation. It contains the
|
||||||
|
//! number of updates following the marker and a checksum.
|
||||||
|
//! - Remove: A word used during the `Transaction` operation. It contains the key
|
||||||
|
//! of the entry to be removed and a checksum.
|
||||||
|
//!
|
||||||
|
//! Checksums are the number of bits equal to 0.
|
||||||
|
//!
|
||||||
|
//! # Proofs
|
||||||
|
//!
|
||||||
|
//! ## Compaction
|
||||||
|
//!
|
||||||
|
//! It should always be possible to fully compact the store, after what the
|
||||||
|
//! remaining capacity should be available in the current window (restoring the
|
||||||
|
//! compaction invariant). We consider all notations on the virtual storage after
|
||||||
|
//! the full compaction. We will use the `|x|` notation although we update the state
|
||||||
|
//! of the virtual storage. This is fine because compaction doesn't change the
|
||||||
|
//! status of an existing word.
|
||||||
|
//!
|
||||||
|
//! We want to show that the next `N - 1` compactions won't move the tail past the
|
||||||
|
//! last page of their window, with `I` the initial window:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! forall 1 <= i <= N - 1, t_{I + i} <= (I + i + N - 1) * Q
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! We assume `i` between `1` and `N - 1`.
|
||||||
|
//!
|
||||||
|
//! One step of compaction advances the tail by how many words were used in the
|
||||||
|
//! first page of the window with the last entry possibly overlapping on the next
|
||||||
|
//! page.
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! forall j, t_{j + 1} = t_j + |h_{j + 1}| - |h_j| + 1
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! By induction, we have:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! t_{I + i} <= t_I + |h_{I + i}| - |h_I| + i
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! We have the following properties:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! t_I <= h_I + V
|
||||||
|
//! |h_{I + i}| - |h_I| <= h_{I + i} - h_I
|
||||||
|
//! h_{I + i} <= (I + i) * Q + M
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Replacing into our previous equality, we can conclude:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! t_{I + i} = t_I + |h_{I + i}| - |h_I| + i
|
||||||
|
//! <= h_I + V + (I + i) * Q + M - h_I + i
|
||||||
|
//! = (N - 1) * (Q - 1) - M + (I + i) * Q + M + i
|
||||||
|
//! = (N - 1) * (Q - 1) + (I + i) * Q + i
|
||||||
|
//! = (I + i + N - 1) * Q + i - (N - 1)
|
||||||
|
//! <= (I + i + N - 1) * Q
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! We also want to show that after `N - 1` compactions, the remaining capacity is
|
||||||
|
//! available without compaction.
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= // The available words in the window.
|
||||||
|
//! C - (|t_{I + N - 1}| - |h_{I + N - 1}|) // The remaining capacity.
|
||||||
|
//! + 1 // Reserved for Clear.
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! We can replace the definition of `C` and simplify:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! V - (t_{I + N - 1} - h_{I + N - 1}) >= V - N - (|t_{I + N - 1}| - |h_{I + N - 1}|) + 1
|
||||||
|
//! iff t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! We have the following properties:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! t_{I + N - 1} = t_I + |h_{I + N - 1}| - |h_I| + N - 1
|
||||||
|
//! |t_{I + N - 1}| - |h_{I + N - 1}| = |t_I| - |h_I| // Compaction preserves capacity.
|
||||||
|
//! |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! From which we conclude:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! t_{I + N - 1} - h_{I + N - 1} <= |t_{I + N - 1}| - |h_{I + N - 1}| + N - 1
|
||||||
|
//! iff t_I + |h_{I + N - 1}| - |h_I| + N - 1 - h_{I + N - 1} <= |t_I| - |h_I| + N - 1
|
||||||
|
//! iff t_I + |h_{I + N - 1}| - h_{I + N - 1} <= |t_I|
|
||||||
|
//! iff |h_{I + N - 1}| - |t_I| <= h_{I + N - 1} - t_I
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//!
|
||||||
|
//! ## Checksum
|
||||||
|
//!
|
||||||
|
//! The main property we want is that all partially written/erased words are either
|
||||||
|
//! the initial word, the final word, or invalid.
|
||||||
|
//!
|
||||||
|
//! We say that a bit sequence `TARGET` is reachable from a bit sequence `SOURCE` if
|
||||||
|
//! both have the same length and `SOURCE & TARGET == TARGET` where `&` is the
|
||||||
|
//! bitwise AND operation on bit sequences of that length. In other words, when
|
||||||
|
//! `SOURCE` has a bit equal to 0 then `TARGET` also has that bit equal to 0.
|
||||||
|
//!
|
||||||
|
//! The only written entries start with `101` or `110` and are written from an
|
||||||
|
//! erased word. Marking an entry as padding or deleted is a single bit operation,
|
||||||
|
//! so the property trivially holds. For those cases, the proof relies on the fact
|
||||||
|
//! that there is exactly one bit equal to 0 in the 3 first bits. Either the 3 first
|
||||||
|
//! bits are still `111` in which case we expect the remaining bits to be equal
|
||||||
|
//! to 1. Otherwise we can use the checksum of the given type of entry because those
|
||||||
|
//! 2 types of entries are not reachable from each other. Here is a visualization of
|
||||||
|
//! the partitioning based on the first 3 bits:
|
||||||
|
//!
|
||||||
|
//! | First 3 bits | Description | How to check |
|
||||||
|
//! | ------------:| ------------------ | ---------------------------- |
|
||||||
|
//! | `111` | Erased word | All bits set to `1` |
|
||||||
|
//! | `101` | User entry | Contains a checksum |
|
||||||
|
//! | `110` | Internal entry | Contains a checksum |
|
||||||
|
//! | `100` | Deleted user entry | No check, atomically written |
|
||||||
|
//! | `0??` | Padding entry | No check, atomically written |
|
||||||
|
//!
|
||||||
|
//! To show that valid entries of a given type are not reachable from each other, we
|
||||||
|
//! show 3 lemmas:
|
||||||
|
//!
|
||||||
|
//! 1. A bit sequence is not reachable from another if its number of bits equal to
|
||||||
|
//! 0 is smaller.
|
||||||
|
//!
|
||||||
|
//! 2. A bit sequence is not reachable from another if they have the same number of
|
||||||
|
//! bits equals to 0 and are different.
|
||||||
|
//!
|
||||||
|
//! 3. A bit sequence is not reachable from another if it is bigger when they are
|
||||||
|
//! interpreted as numbers in binary representation.
|
||||||
|
//!
|
||||||
|
//! From those lemmas we consider the 2 cases. If both entries have the same number
|
||||||
|
//! of bits equal to 0, they are either equal or not reachable from each other
|
||||||
|
//! because of the second lemma. If they don't have the same number of bits equal to
|
||||||
|
//! 0, then the one with less bits equal to 0 is not reachable from the other
|
||||||
|
//! because of the first lemma and the one with more bits equal to 0 is not
|
||||||
|
//! reachable from the other because of the third lemma and the definition of the
|
||||||
|
//! checksum.
|
||||||
|
//!
|
||||||
|
//! # Fuzzing
|
||||||
|
//!
|
||||||
|
//! For any sequence of operations and interruptions starting from an erased
|
||||||
|
//! storage, the store is checked against its model and some internal invariant at
|
||||||
|
//! each step.
|
||||||
|
//!
|
||||||
|
//! For any sequence of operations and interruptions starting from an arbitrary
|
||||||
|
//! storage, the store is checked not to crash.
|
||||||
|
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod bitfield;
|
mod bitfield;
|
||||||
|
mod buffer;
|
||||||
|
mod format;
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
mod model;
|
||||||
mod storage;
|
mod storage;
|
||||||
mod store;
|
mod store;
|
||||||
|
|
||||||
|
pub use self::buffer::{BufferCorruptFunction, BufferOptions, BufferStorage};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use self::model::{StoreModel, StoreOperation};
|
||||||
pub use self::storage::{Storage, StorageError, StorageIndex, StorageResult};
|
pub use self::storage::{Storage, StorageError, StorageIndex, StorageResult};
|
||||||
pub use self::store::{StoreError, StoreResult};
|
pub use self::store::{StoreError, StoreRatio, StoreResult, StoreUpdate};
|
||||||
|
|||||||
168
libraries/persistent_store/src/model.rs
Normal file
168
libraries/persistent_store/src/model.rs
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
// Copyright 2019-2020 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use crate::format::Format;
|
||||||
|
use crate::{StoreError, StoreRatio, StoreResult, StoreUpdate};
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
/// Models the mutable operations of a store.
|
||||||
|
///
|
||||||
|
/// The model doesn't model the storage and read-only operations. This is done by the driver.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct StoreModel {
|
||||||
|
/// Represents the content of the store.
|
||||||
|
content: HashMap<usize, Box<[u8]>>,
|
||||||
|
|
||||||
|
/// The modeled storage configuration.
|
||||||
|
format: Format,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mutable operations on a store.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum StoreOperation {
|
||||||
|
/// Applies a transaction.
|
||||||
|
Transaction {
|
||||||
|
/// The list of updates to be applied.
|
||||||
|
updates: Vec<StoreUpdate>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Deletes all keys above a threshold.
|
||||||
|
Clear {
|
||||||
|
/// The minimum key to be deleted.
|
||||||
|
min_key: usize,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Compacts the store until a given capacity is immediately available.
|
||||||
|
Prepare {
|
||||||
|
/// How much capacity should be immediately available after compaction.
|
||||||
|
length: usize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StoreModel {
|
||||||
|
/// Creates an empty model for a given storage configuration.
|
||||||
|
pub fn new(format: Format) -> StoreModel {
|
||||||
|
let content = HashMap::new();
|
||||||
|
StoreModel { content, format }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the modeled content.
|
||||||
|
pub fn content(&self) -> &HashMap<usize, Box<[u8]>> {
|
||||||
|
&self.content
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the storage configuration.
|
||||||
|
pub fn format(&self) -> &Format {
|
||||||
|
&self.format
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Simulates a store operation.
|
||||||
|
pub fn apply(&mut self, operation: StoreOperation) -> StoreResult<()> {
|
||||||
|
match operation {
|
||||||
|
StoreOperation::Transaction { updates } => self.transaction(updates),
|
||||||
|
StoreOperation::Clear { min_key } => self.clear(min_key),
|
||||||
|
StoreOperation::Prepare { length } => self.prepare(length),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the capacity according to the model.
|
||||||
|
pub fn capacity(&self) -> StoreRatio {
|
||||||
|
let total = self.format.total_capacity();
|
||||||
|
let used: usize = self.content.values().map(|x| self.entry_size(x)).sum();
|
||||||
|
StoreRatio { used, total }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Applies a transaction.
|
||||||
|
fn transaction(&mut self, updates: Vec<StoreUpdate>) -> StoreResult<()> {
|
||||||
|
// Fail if too many updates.
|
||||||
|
if updates.len() > self.format.max_updates() {
|
||||||
|
return Err(StoreError::InvalidArgument);
|
||||||
|
}
|
||||||
|
// Fail if an update is invalid.
|
||||||
|
if !updates.iter().all(|x| self.update_valid(x)) {
|
||||||
|
return Err(StoreError::InvalidArgument);
|
||||||
|
}
|
||||||
|
// Fail if updates are not disjoint, i.e. there are duplicate keys.
|
||||||
|
let keys: HashSet<_> = updates.iter().map(|x| x.key()).collect();
|
||||||
|
if keys.len() != updates.len() {
|
||||||
|
return Err(StoreError::InvalidArgument);
|
||||||
|
}
|
||||||
|
// Fail if there is not enough capacity.
|
||||||
|
let capacity = match updates.len() {
|
||||||
|
// An empty transaction doesn't consume anything.
|
||||||
|
0 => 0,
|
||||||
|
// Transactions with a single update are optimized by avoiding a marker entry.
|
||||||
|
1 => match &updates[0] {
|
||||||
|
StoreUpdate::Insert { value, .. } => self.entry_size(value),
|
||||||
|
// Transactions with a single update which is a removal don't consume anything.
|
||||||
|
StoreUpdate::Remove { .. } => 0,
|
||||||
|
},
|
||||||
|
// A transaction consumes one word for the marker entry in addition to its updates.
|
||||||
|
_ => 1 + updates.iter().map(|x| self.update_size(x)).sum::<usize>(),
|
||||||
|
};
|
||||||
|
if self.capacity().remaining() < capacity {
|
||||||
|
return Err(StoreError::NoCapacity);
|
||||||
|
}
|
||||||
|
// Apply the updates.
|
||||||
|
for update in updates {
|
||||||
|
match update {
|
||||||
|
StoreUpdate::Insert { key, value } => {
|
||||||
|
self.content.insert(key, value.into_boxed_slice());
|
||||||
|
}
|
||||||
|
StoreUpdate::Remove { key } => {
|
||||||
|
self.content.remove(&key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Applies a clear operation.
|
||||||
|
fn clear(&mut self, min_key: usize) -> StoreResult<()> {
|
||||||
|
if min_key > self.format.max_key() {
|
||||||
|
return Err(StoreError::InvalidArgument);
|
||||||
|
}
|
||||||
|
self.content.retain(|&k, _| k < min_key);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Applies a prepare operation.
|
||||||
|
fn prepare(&self, length: usize) -> StoreResult<()> {
|
||||||
|
if self.capacity().remaining() < length {
|
||||||
|
return Err(StoreError::NoCapacity);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the word capacity of an update.
|
||||||
|
fn update_size(&self, update: &StoreUpdate) -> usize {
|
||||||
|
match update {
|
||||||
|
StoreUpdate::Insert { value, .. } => self.entry_size(value),
|
||||||
|
StoreUpdate::Remove { .. } => 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the word capacity of an entry.
|
||||||
|
fn entry_size(&self, value: &[u8]) -> usize {
|
||||||
|
1 + self.format.bytes_to_words(value.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether an update is valid.
|
||||||
|
fn update_valid(&self, update: &StoreUpdate) -> bool {
|
||||||
|
update.key() <= self.format.max_key()
|
||||||
|
&& update
|
||||||
|
.value()
|
||||||
|
.map_or(true, |x| x.len() <= self.format.max_value_len())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -13,7 +13,9 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::StorageError;
|
use crate::StorageError;
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
/// Errors returned by store operations.
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub enum StoreError {
|
pub enum StoreError {
|
||||||
/// Invalid argument.
|
/// Invalid argument.
|
||||||
@@ -60,4 +62,69 @@ impl From<StorageError> for StoreError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Result of store operations.
|
||||||
pub type StoreResult<T> = Result<T, StoreError>;
|
pub type StoreResult<T> = Result<T, StoreError>;
|
||||||
|
|
||||||
|
/// Progression ratio for store metrics.
|
||||||
|
///
|
||||||
|
/// This is used for the [capacity] and [lifetime] metrics. Those metrics are measured in words.
|
||||||
|
///
|
||||||
|
/// # Invariant
|
||||||
|
///
|
||||||
|
/// - The used value does not exceed the total: `used <= total`.
|
||||||
|
///
|
||||||
|
/// [capacity]: struct.Store.html#method.capacity
|
||||||
|
/// [lifetime]: struct.Store.html#method.lifetime
|
||||||
|
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||||
|
pub struct StoreRatio {
|
||||||
|
/// How much of the metric is used.
|
||||||
|
pub(crate) used: usize,
|
||||||
|
|
||||||
|
/// How much of the metric can be used at most.
|
||||||
|
pub(crate) total: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StoreRatio {
|
||||||
|
/// How much of the metric is used.
|
||||||
|
pub fn used(self) -> usize {
|
||||||
|
self.used
|
||||||
|
}
|
||||||
|
|
||||||
|
/// How much of the metric can be used at most.
|
||||||
|
pub fn total(self) -> usize {
|
||||||
|
self.total
|
||||||
|
}
|
||||||
|
|
||||||
|
/// How much of the metric is remaining.
|
||||||
|
pub fn remaining(self) -> usize {
|
||||||
|
self.total - self.used
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents an update to the store as part of a transaction.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum StoreUpdate {
|
||||||
|
/// Inserts or replaces an entry in the store.
|
||||||
|
Insert { key: usize, value: Vec<u8> },
|
||||||
|
|
||||||
|
/// Removes an entry from the store.
|
||||||
|
Remove { key: usize },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StoreUpdate {
|
||||||
|
/// Returns the key affected by the update.
|
||||||
|
pub fn key(&self) -> usize {
|
||||||
|
match *self {
|
||||||
|
StoreUpdate::Insert { key, .. } => key,
|
||||||
|
StoreUpdate::Remove { key } => key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the value written by the update.
|
||||||
|
pub fn value(&self) -> Option<&[u8]> {
|
||||||
|
match self {
|
||||||
|
StoreUpdate::Insert { value, .. } => Some(value),
|
||||||
|
StoreUpdate::Remove { .. } => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
dd5920dfb172d9371b29d019b6a37fae1a995bf9d814000944d9ef36bad31513 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840dk.bin
|
dd5920dfb172d9371b29d019b6a37fae1a995bf9d814000944d9ef36bad31513 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840dk.bin
|
||||||
cf2fb98364ab8520d771090aa59859c1e628c32c01c7b73c000162f579411fc7 target/nrf52840dk_merged.hex
|
18f111cd3f86b9e06979f2f16465d217bde6af4613e561883c32235992b57099 target/nrf52840dk_merged.hex
|
||||||
e4acfa602a5cc5d7c61d465f873918e8e0858628d0e5f8e0db26a7b7dd0b94d4 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle.bin
|
e4acfa602a5cc5d7c61d465f873918e8e0858628d0e5f8e0db26a7b7dd0b94d4 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle.bin
|
||||||
588c55ece0bf45d783ca513e12d4072668d9749b9320a13380fcc498a2855a7b target/nrf52840_dongle_merged.hex
|
dd283a1949a9ffb7bf81ef5a0dcd4c45adfb8dda8344a672bff160a917e0b6b9 target/nrf52840_dongle_merged.hex
|
||||||
c0ace9f13ef3fd18c576a735ae23b3956bf8dd346f20c6217086e748d6bad8a2 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle_dfu.bin
|
c0ace9f13ef3fd18c576a735ae23b3956bf8dd346f20c6217086e748d6bad8a2 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle_dfu.bin
|
||||||
4b3f87e9bb992198f5885cc689e1cf1f8e93519f107614d316473ef43f8be7a4 target/nrf52840_dongle_dfu_merged.hex
|
11c0dad7abd513066732952fd5ad9988b0b45339683d93fcd8f4660d62d93aa1 target/nrf52840_dongle_dfu_merged.hex
|
||||||
06a38a0d6d356145467a73c765e28a945878f663664016f888393207097bfe10 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_mdk_dfu.bin
|
06a38a0d6d356145467a73c765e28a945878f663664016f888393207097bfe10 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_mdk_dfu.bin
|
||||||
c35a1501eed3024821f0f17f2a50e1c27925ccabee4c633e10fa10e5bcd197ac target/nrf52840_mdk_dfu_merged.hex
|
c584d6e22b0a4a80fccc1e557ed95c744a02f12107f7a5b3c5ec31f06a0f781f target/nrf52840_mdk_dfu_merged.hex
|
||||||
17cd41e1ab1bfa683b5ec79333779a95965668fea146d2a178ca35ab20b16ece target/tab/ctap2.tab
|
c56962d46ad423b61bb0edc39d8a4a135c22e59fee440ddbfcade3f8136e7b39 target/tab/ctap2.tab
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
2426ee9a6c75e325537818081d45445d95468a4c0a77feacdc6133d7d9aa227a third_party/tock/target/thumbv7em-none-eabi/release/nrf52840dk.bin
|
2426ee9a6c75e325537818081d45445d95468a4c0a77feacdc6133d7d9aa227a third_party/tock/target/thumbv7em-none-eabi/release/nrf52840dk.bin
|
||||||
c0cdbad7d4d7d9bd64ad7db43046ada19990b8f43a88e6ec3f508bb233827966 target/nrf52840dk_merged.hex
|
a5c6deb3931715c003ad79ccd2847bf5085b20d888908f5b589531077f55752f target/nrf52840dk_merged.hex
|
||||||
c53d1e1db72df25950fa6d28699a2d38757def0dcbeb0d09d2366481cf0149a6 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle.bin
|
c53d1e1db72df25950fa6d28699a2d38757def0dcbeb0d09d2366481cf0149a6 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle.bin
|
||||||
7bb26ae63b60e136852133c0da1d1cbf550a98012f9ba3bb4d27fc682a504880 target/nrf52840_dongle_merged.hex
|
eff4f034398895536391d52e3874c15b7952dd113e455994b09847ed9fb04160 target/nrf52840_dongle_merged.hex
|
||||||
233b5ba4459523759e3171cee83cdb3a383bbe65727c8ece64dfe5321d6ebe34 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle_dfu.bin
|
233b5ba4459523759e3171cee83cdb3a383bbe65727c8ece64dfe5321d6ebe34 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_dongle_dfu.bin
|
||||||
fbcc441dc5faf87baffe57226fd52c8e63804cd0d76f04057474954a74985217 target/nrf52840_dongle_dfu_merged.hex
|
29c664a35a3e400a1608573e56313bf1364e648174467e4a64de78c434a5caf0 target/nrf52840_dongle_dfu_merged.hex
|
||||||
1baaf518a74c6077cb936d9cf178b6dd0232e7562fa56174886b05b77886cc32 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_mdk_dfu.bin
|
1baaf518a74c6077cb936d9cf178b6dd0232e7562fa56174886b05b77886cc32 third_party/tock/target/thumbv7em-none-eabi/release/nrf52840_mdk_dfu.bin
|
||||||
80c430b911c967dcf1d60d7456d381162a342733e4e42a6f64f855120339de35 target/nrf52840_mdk_dfu_merged.hex
|
0d175e760518c1734b425e291f0d60c39b4f5e8c96dbffca5f17f4fc225551f0 target/nrf52840_mdk_dfu_merged.hex
|
||||||
62b4bea855a5757fe70c2690f8c05d8ed0078d688de07b6695f9f73bc265f2f6 target/tab/ctap2.tab
|
cfad3b9f3d6ee1a80f4e47a66af49875c19c37c363699780529f946c6c9c29b9 target/tab/ctap2.tab
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187320 (0x2dbb8) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 187288 (0x2db98) bytes.
|
||||||
Adding .stack section. Offset: 187448 (0x2dc38). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187416 (0x2dc18). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
@@ -30,8 +30,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187320 (0x2dbb8) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 187288 (0x2db98) bytes.
|
||||||
Adding .stack section. Offset: 187448 (0x2dc38). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187416 (0x2dc18). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
@@ -54,8 +54,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187320 (0x2dbb8) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 187288 (0x2db98) bytes.
|
||||||
Adding .stack section. Offset: 187448 (0x2dc38). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187416 (0x2dc18). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
@@ -78,8 +78,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187320 (0x2dbb8) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 187288 (0x2db98) bytes.
|
||||||
Adding .stack section. Offset: 187448 (0x2dc38). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187416 (0x2dc18). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187008 (0x2da80) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 186992 (0x2da70) bytes.
|
||||||
Adding .stack section. Offset: 187136 (0x2db00). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187120 (0x2daf0). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
@@ -30,8 +30,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187008 (0x2da80) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 186992 (0x2da70) bytes.
|
||||||
Adding .stack section. Offset: 187136 (0x2db00). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187120 (0x2daf0). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
@@ -54,8 +54,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187008 (0x2da80) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 186992 (0x2da70) bytes.
|
||||||
Adding .stack section. Offset: 187136 (0x2db00). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187120 (0x2daf0). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
@@ -78,8 +78,8 @@ Min RAM size from segments in ELF: 20 bytes
|
|||||||
Number of writeable flash regions: 0
|
Number of writeable flash regions: 0
|
||||||
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
Adding .crt0_header section. Offset: 64 (0x40). Length: 64 (0x40) bytes.
|
||||||
Entry point is in .text section
|
Entry point is in .text section
|
||||||
Adding .text section. Offset: 128 (0x80). Length: 187008 (0x2da80) bytes.
|
Adding .text section. Offset: 128 (0x80). Length: 186992 (0x2da70) bytes.
|
||||||
Adding .stack section. Offset: 187136 (0x2db00). Length: 16384 (0x4000) bytes.
|
Adding .stack section. Offset: 187120 (0x2daf0). Length: 16384 (0x4000) bytes.
|
||||||
Searching for .rel.X sections to add.
|
Searching for .rel.X sections to add.
|
||||||
TBF Header:
|
TBF Header:
|
||||||
version: 2 0x2
|
version: 2 0x2
|
||||||
|
|||||||
@@ -373,7 +373,7 @@ where
|
|||||||
}
|
}
|
||||||
(extensions.hmac_secret, cred_protect)
|
(extensions.hmac_secret, cred_protect)
|
||||||
} else {
|
} else {
|
||||||
(false, None)
|
(false, DEFAULT_CRED_PROTECT)
|
||||||
};
|
};
|
||||||
|
|
||||||
let cred_random = if use_hmac_extension {
|
let cred_random = if use_hmac_extension {
|
||||||
|
|||||||
1
third_party/libtock-drivers/Cargo.toml
vendored
1
third_party/libtock-drivers/Cargo.toml
vendored
@@ -14,3 +14,4 @@ libtock_core = { path = "../../third_party/libtock-rs/core" }
|
|||||||
[features]
|
[features]
|
||||||
debug_ctap = []
|
debug_ctap = []
|
||||||
verbose_usb = ["debug_ctap"]
|
verbose_usb = ["debug_ctap"]
|
||||||
|
with_nfc=[]
|
||||||
|
|||||||
1
third_party/libtock-drivers/src/lib.rs
vendored
1
third_party/libtock-drivers/src/lib.rs
vendored
@@ -3,6 +3,7 @@
|
|||||||
pub mod buttons;
|
pub mod buttons;
|
||||||
pub mod console;
|
pub mod console;
|
||||||
pub mod led;
|
pub mod led;
|
||||||
|
#[cfg(feature = "with_nfc")]
|
||||||
pub mod nfc;
|
pub mod nfc;
|
||||||
pub mod result;
|
pub mod result;
|
||||||
pub mod rng;
|
pub mod rng;
|
||||||
|
|||||||
143
third_party/libtock-drivers/src/nfc.rs
vendored
143
third_party/libtock-drivers/src/nfc.rs
vendored
@@ -1,21 +1,22 @@
|
|||||||
|
use crate::result::TockResult;
|
||||||
use crate::util;
|
use crate::util;
|
||||||
use core::cell::Cell;
|
use core::cell::Cell;
|
||||||
|
use core::mem;
|
||||||
use libtock_core::{callback, syscalls};
|
use libtock_core::{callback, syscalls};
|
||||||
|
|
||||||
const DRIVER_NUMBER: usize = 0x30003;
|
const DRIVER_NUMBER: usize = 0x30003;
|
||||||
|
|
||||||
mod command_nr {
|
mod command_nr {
|
||||||
|
pub const CHECK: usize = 0;
|
||||||
pub const TRANSMIT: usize = 1;
|
pub const TRANSMIT: usize = 1;
|
||||||
pub const RECEIVE: usize = 2;
|
pub const RECEIVE: usize = 2;
|
||||||
pub const EMULATE: usize = 3;
|
pub const EMULATE: usize = 3;
|
||||||
pub const CONFIGURE: usize = 4;
|
pub const CONFIGURE: usize = 4;
|
||||||
pub const FRAMEDELAYMAX: usize = 5;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod subscribe_nr {
|
mod subscribe_nr {
|
||||||
pub const TRANSMIT: usize = 1;
|
pub const TRANSMIT: usize = 1;
|
||||||
pub const RECEIVE: usize = 2;
|
pub const RECEIVE: usize = 2;
|
||||||
pub const SELECT: usize = 3;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod allow_nr {
|
mod allow_nr {
|
||||||
@@ -23,122 +24,80 @@ mod allow_nr {
|
|||||||
pub const RECEIVE: usize = 2;
|
pub const RECEIVE: usize = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub struct RecvOp {
|
||||||
|
pub result_code: usize,
|
||||||
|
pub recv_amount: usize,
|
||||||
|
}
|
||||||
|
|
||||||
pub struct NfcTag {}
|
pub struct NfcTag {}
|
||||||
|
|
||||||
impl NfcTag {
|
impl NfcTag {
|
||||||
pub fn enable_emulation() {
|
/// Check the existence of an NFC driver.
|
||||||
NfcTag::emulate(true);
|
pub fn setup() -> bool {
|
||||||
|
syscalls::command(DRIVER_NUMBER, command_nr::CHECK, 0, 0).is_ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disable_emulation() {
|
pub fn enable_emulation() -> bool {
|
||||||
NfcTag::emulate(false);
|
NfcTag::emulate(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn emulate(enabled: bool) -> bool {
|
pub fn disable_emulation() -> bool {
|
||||||
let result_code =
|
NfcTag::emulate(false)
|
||||||
syscalls::command(DRIVER_NUMBER, command_nr::EMULATE, enabled as usize, 0);
|
|
||||||
if result_code.is_err() {
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
true
|
fn emulate(enabled: bool) -> bool {
|
||||||
}
|
syscalls::command(DRIVER_NUMBER, command_nr::EMULATE, enabled as usize, 0).is_ok()
|
||||||
|
|
||||||
/// Subscribe to the tag being SELECTED callback.
|
|
||||||
pub fn selected() -> bool {
|
|
||||||
let is_selected = Cell::new(false);
|
|
||||||
let mut is_selected_alarm = || is_selected.set(true);
|
|
||||||
let subscription = syscalls::subscribe::<callback::Identity0Consumer, _>(
|
|
||||||
DRIVER_NUMBER,
|
|
||||||
subscribe_nr::SELECT,
|
|
||||||
&mut is_selected_alarm,
|
|
||||||
);
|
|
||||||
if subscription.is_err() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
util::yieldk_for(|| is_selected.get());
|
|
||||||
true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configure the tag type command.
|
/// Configure the tag type command.
|
||||||
pub fn configure(tag_type: u8) -> bool {
|
pub fn configure(tag_type: u8) -> bool {
|
||||||
let result_code =
|
syscalls::command(DRIVER_NUMBER, command_nr::CONFIGURE, tag_type as usize, 0).is_ok()
|
||||||
syscalls::command(DRIVER_NUMBER, command_nr::CONFIGURE, tag_type as usize, 0);
|
|
||||||
if result_code.is_err() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the maximum frame delay value to support
|
|
||||||
/// transmission with the reader.
|
|
||||||
pub fn set_framedelaymax(delay: u32) -> bool {
|
|
||||||
let result_code =
|
|
||||||
syscalls::command(DRIVER_NUMBER, command_nr::FRAMEDELAYMAX, delay as usize, 0);
|
|
||||||
if result_code.is_err() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// 1. Share with the driver a buffer.
|
/// 1. Share with the driver a buffer.
|
||||||
/// 2. Subscribe to having a successful receive callback.
|
/// 2. Subscribe to having a successful receive callback.
|
||||||
/// 3. Issue the request for reception.
|
/// 3. Issue the request for reception.
|
||||||
pub fn receive(buf: &mut [u8]) -> bool {
|
pub fn receive(buf: &mut [u8; 256]) -> TockResult<RecvOp> {
|
||||||
let result = syscalls::allow(DRIVER_NUMBER, allow_nr::RECEIVE, buf);
|
let result = syscalls::allow(DRIVER_NUMBER, allow_nr::RECEIVE, buf)?;
|
||||||
if result.is_err() {
|
// set callback with 2 arguments, to receive ReturnCode and RX Amount
|
||||||
return false;
|
let recv_data = Cell::new(None);
|
||||||
}
|
let mut callback = |result, amount| {
|
||||||
|
recv_data.set(Some(RecvOp {
|
||||||
let done = Cell::new(false);
|
result_code: result,
|
||||||
let mut alarm = || done.set(true);
|
recv_amount: amount,
|
||||||
let subscription = syscalls::subscribe::<callback::Identity0Consumer, _>(
|
}))
|
||||||
|
};
|
||||||
|
let subscription = syscalls::subscribe::<callback::Identity2Consumer, _>(
|
||||||
DRIVER_NUMBER,
|
DRIVER_NUMBER,
|
||||||
subscribe_nr::RECEIVE,
|
subscribe_nr::RECEIVE,
|
||||||
&mut alarm,
|
&mut callback,
|
||||||
);
|
)?;
|
||||||
if subscription.is_err() {
|
syscalls::command(DRIVER_NUMBER, command_nr::RECEIVE, 0, 0)?;
|
||||||
return false;
|
util::yieldk_for(|| recv_data.get().is_some());
|
||||||
}
|
mem::drop(subscription);
|
||||||
|
mem::drop(result);
|
||||||
let result_code = syscalls::command(DRIVER_NUMBER, command_nr::RECEIVE, 0, 0);
|
Ok(recv_data.get().unwrap())
|
||||||
if result_code.is_err() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
util::yieldk_for(|| done.get());
|
|
||||||
true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// 1. Share with the driver a buffer containing the app's reply.
|
/// 1. Share with the driver a buffer containing the app's reply.
|
||||||
/// 2. Subscribe to having a successful transmission callback.
|
/// 2. Subscribe to having a successful transmission callback.
|
||||||
/// 3. Issue the request for transmitting.
|
/// 3. Issue the request for transmitting.
|
||||||
pub fn transmit(buf: &mut [u8], amount: usize) -> bool {
|
pub fn transmit(buf: &mut [u8], amount: usize) -> TockResult<usize> {
|
||||||
let result = syscalls::allow(DRIVER_NUMBER, allow_nr::TRANSMIT, buf);
|
let result = syscalls::allow(DRIVER_NUMBER, allow_nr::TRANSMIT, buf)?;
|
||||||
if result.is_err() {
|
// set callback with 1 argument, to receive ReturnCode
|
||||||
return false;
|
let result_code = Cell::new(None);
|
||||||
}
|
let mut callback = |result| result_code.set(Some(result));
|
||||||
|
let subscription = syscalls::subscribe::<callback::Identity1Consumer, _>(
|
||||||
let done = Cell::new(false);
|
|
||||||
let mut alarm = || done.set(true);
|
|
||||||
let subscription = syscalls::subscribe::<callback::Identity0Consumer, _>(
|
|
||||||
DRIVER_NUMBER,
|
DRIVER_NUMBER,
|
||||||
subscribe_nr::TRANSMIT,
|
subscribe_nr::TRANSMIT,
|
||||||
&mut alarm,
|
&mut callback,
|
||||||
);
|
)?;
|
||||||
if subscription.is_err() {
|
syscalls::command(DRIVER_NUMBER, command_nr::TRANSMIT, amount, 0)?;
|
||||||
return false;
|
util::yieldk_for(|| result_code.get().is_some());
|
||||||
}
|
mem::drop(subscription);
|
||||||
|
mem::drop(result);
|
||||||
let result_code = syscalls::command(DRIVER_NUMBER, command_nr::TRANSMIT, amount, 0);
|
Ok(result_code.get().unwrap())
|
||||||
if result_code.is_err() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
util::yieldk_for(|| done.get());
|
|
||||||
true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user