Merge branch 'master' into master

This commit is contained in:
Jean-Michel Picod
2020-04-24 15:37:46 +02:00
committed by GitHub
31 changed files with 446 additions and 185 deletions

View File

@@ -27,15 +27,11 @@ jobs:
- name: Set up OpenSK - name: Set up OpenSK
run: ./setup.sh run: ./setup.sh
- name: Building board nrf52840dk
run: ./deploy.py --board=nrf52840dk --no-app --programmer=none
- name: Building board nrf52840_dongle
run: ./deploy.py --board=nrf52840_dongle --no-app --programmer=none
- name: Building board nrf52840_dongle_dfu - name: Building board nrf52840_dongle_dfu
run: ./deploy.py --board=nrf52840_dongle_dfu --no-app --programmer=none run: ./deploy.py --board=nrf52840_dongle_dfu --no-app --programmer=none
- name: Building board nrf52840_mdk_dfu - name: Building board nrf52840_mdk_dfu
run: ./deploy.py --board=nrf52840_mdk_dfu --no-app --programmer=none run: ./deploy.py --board=nrf52840_mdk_dfu --no-app --programmer=none
- name: Create a long build directory
run: mkdir this-is-a-long-build-directory-0123456789abcdefghijklmnopqrstuvwxyz && mv third_party this-is-a-long-build-directory-0123456789abcdefghijklmnopqrstuvwxyz/
- name: Building board nrf52840dk
run: make -C this-is-a-long-build-directory-0123456789abcdefghijklmnopqrstuvwxyz/third_party/tock/boards/nordic/nrf52840dk
- name: Building board nrf52840_dongle
run: make -C this-is-a-long-build-directory-0123456789abcdefghijklmnopqrstuvwxyz/third_party/tock/boards/nordic/nrf52840_dongle

View File

@@ -64,17 +64,23 @@ jobs:
command: check command: check
args: --target thumbv7em-none-eabi --release --features ram_storage args: --target thumbv7em-none-eabi --release --features ram_storage
- name: Check OpenSK verbose
uses: actions-rs/cargo@v1
with:
command: check
args: --target thumbv7em-none-eabi --release --features verbose
- name: Check OpenSK debug_ctap,with_ctap1 - name: Check OpenSK debug_ctap,with_ctap1
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: check command: check
args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1 args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1
- name: Check OpenSK debug_ctap,with_ctap1,panic_console,debug_allocations - name: Check OpenSK debug_ctap,with_ctap1,panic_console,debug_allocations,verbose
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: check command: check
args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1,panic_console,debug_allocations args: --target thumbv7em-none-eabi --release --features debug_ctap,with_ctap1,panic_console,debug_allocations,verbose
- name: Check examples - name: Check examples
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1

View File

@@ -20,6 +20,7 @@ jobs:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
target: thumbv7em-none-eabi target: thumbv7em-none-eabi
components: rustfmt
- uses: actions/setup-python@v1 - uses: actions/setup-python@v1
with: with:
python-version: 3.7 python-version: 3.7

View File

@@ -18,12 +18,13 @@ arrayref = "0.3.6"
subtle = { version = "2.2", default-features = false, features = ["nightly"] } subtle = { version = "2.2", default-features = false, features = ["nightly"] }
[features] [features]
std = ["cbor/std", "crypto/std", "crypto/derive_debug"]
debug_ctap = ["crypto/derive_debug"]
with_ctap1 = ["crypto/with_ctap1"]
panic_console = ["libtock/panic_console"]
debug_allocations = ["libtock/debug_allocations"] debug_allocations = ["libtock/debug_allocations"]
debug_ctap = ["crypto/derive_debug"]
panic_console = ["libtock/panic_console"]
std = ["cbor/std", "crypto/std", "crypto/derive_debug"]
ram_storage = [] ram_storage = []
verbose = ["debug_ctap"]
with_ctap1 = ["crypto/with_ctap1"]
[dev-dependencies] [dev-dependencies]
elf2tab = "0.4.0" elf2tab = "0.4.0"

View File

@@ -0,0 +1,18 @@
[package]
name = "nrf52840_dongle_dfu"
version = "0.1.0"
authors = ["Tock Project Developers <tock-dev@googlegroups.com>"]
build = "build.rs"
edition = "2018"
[[bin]]
path = "../nrf52840_dongle/src/main.rs"
name = "nrf52840_dongle_dfu"
[dependencies]
components = { path = "../../components" }
cortexm4 = { path = "../../../arch/cortex-m4" }
capsules = { path = "../../../capsules" }
kernel = { path = "../../../kernel" }
nrf52840 = { path = "../../../chips/nrf52840" }
nrf52dk_base = { path = "../nrf52dk_base" }

View File

@@ -4,7 +4,7 @@ TOCK_ARCH=cortex-m4
TARGET=thumbv7em-none-eabi TARGET=thumbv7em-none-eabi
PLATFORM=nrf52840_dongle_dfu PLATFORM=nrf52840_dongle_dfu
include ../../third_party/tock/boards/Makefile.common include ../../Makefile.common
TOCKLOADER=tockloader TOCKLOADER=tockloader
@@ -20,10 +20,10 @@ TOCKLOADER_JTAG_FLAGS = --jlink --arch $(TOCK_ARCH) --board $(PLATFORM) --page-s
# Upload the kernel over JTAG # Upload the kernel over JTAG
.PHONY: flash .PHONY: flash
flash: target/$(TARGET)/release/$(PLATFORM).bin flash: $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM).bin
$(TOCKLOADER) $(TOCKLOADER_GENERAL_FLAGS) flash --address $(KERNEL_ADDRESS) $(TOCKLOADER_JTAG_FLAGS) $< $(TOCKLOADER) $(TOCKLOADER_GENERAL_FLAGS) flash --address $(KERNEL_ADDRESS) $(TOCKLOADER_JTAG_FLAGS) $<
# Upload the kernel over serial/bootloader # Upload the kernel over serial/bootloader
.PHONY: program .PHONY: program
program: target/$(TARGET)/release/$(PLATFORM).hex program: $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM).hex
$(error Cannot program nRF52 Dongle over USB. Use \`make flash\` and JTAG) $(error Cannot program nRF52 Dongle over USB. Use \`make flash\` and JTAG)

View File

@@ -0,0 +1,4 @@
fn main() {
println!("cargo:rerun-if-changed=layout.ld");
println!("cargo:rerun-if-changed=../../kernel_layout.ld");
}

View File

@@ -7,4 +7,4 @@ MEMORY
MPU_MIN_ALIGN = 8K; MPU_MIN_ALIGN = 8K;
INCLUDE ../../third_party/tock/boards/kernel_layout.ld INCLUDE ../../kernel_layout.ld

View File

@@ -0,0 +1,14 @@
[package]
name = "nrf52840_mdk_dfu"
version = "0.1.0"
authors = ["Yihui Xiong <yihui.xiong@hotmail.com>"]
build = "build.rs"
edition = "2018"
[dependencies]
components = { path = "../../components" }
cortexm4 = { path = "../../../arch/cortex-m4" }
capsules = { path = "../../../capsules" }
kernel = { path = "../../../kernel" }
nrf52840 = { path = "../../../chips/nrf52840" }
nrf52dk_base = { path = "../nrf52dk_base" }

View File

@@ -4,7 +4,7 @@ TOCK_ARCH=cortex-m4
TARGET=thumbv7em-none-eabi TARGET=thumbv7em-none-eabi
PLATFORM=nrf52840_mdk_dfu PLATFORM=nrf52840_mdk_dfu
include ../../third_party/tock/boards/Makefile.common include ../../Makefile.common
TOCKLOADER=tockloader TOCKLOADER=tockloader
@@ -20,10 +20,10 @@ TOCKLOADER_JTAG_FLAGS = --jlink --arch $(TOCK_ARCH) --board $(PLATFORM) --page-s
# Upload the kernel over JTAG # Upload the kernel over JTAG
.PHONY: flash .PHONY: flash
flash: target/$(TARGET)/release/$(PLATFORM).bin flash: $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM).bin
$(TOCKLOADER) $(TOCKLOADER_GENERAL_FLAGS) flash --address $(KERNEL_ADDRESS) $(TOCKLOADER_JTAG_FLAGS) $< $(TOCKLOADER) $(TOCKLOADER_GENERAL_FLAGS) flash --address $(KERNEL_ADDRESS) $(TOCKLOADER_JTAG_FLAGS) $<
# Upload the kernel over serial/bootloader # Upload the kernel over serial/bootloader
.PHONY: program .PHONY: program
program: target/$(TARGET)/release/$(PLATFORM).hex program: $(TOCK_ROOT_DIRECTORY)target/$(TARGET)/release/$(PLATFORM).hex
$(error Cannot program nRF52 Dongle over USB. Use \`make flash\` and JTAG) $(error Cannot program nRF52840-MDK over USB. Use \`make flash\` and JTAG)

View File

@@ -0,0 +1,4 @@
fn main() {
println!("cargo:rerun-if-changed=layout.ld");
println!("cargo:rerun-if-changed=../../kernel_layout.ld");
}

View File

@@ -7,4 +7,4 @@ MEMORY
MPU_MIN_ALIGN = 8K; MPU_MIN_ALIGN = 8K;
INCLUDE ../../third_party/tock/boards/kernel_layout.ld INCLUDE ../../kernel_layout.ld

View File

@@ -1,30 +0,0 @@
[package]
name = "nrf52840_dongle_dfu"
version = "0.1.0"
authors = ["Tock Project Developers <tock-dev@googlegroups.com>"]
build = "build.rs"
edition = "2018"
[profile.dev]
panic = "abort"
lto = false
opt-level = "z"
debug = true
[profile.release]
panic = "abort"
lto = true
opt-level = "z"
debug = true
[[bin]]
path = "../../third_party/tock/boards/nordic/nrf52840_dongle/src/main.rs"
name = "nrf52840_dongle_dfu"
[dependencies]
components = { path = "../../third_party/tock/boards/components" }
cortexm4 = { path = "../../third_party/tock/arch/cortex-m4" }
capsules = { path = "../../third_party/tock/capsules" }
kernel = { path = "../../third_party/tock/kernel" }
nrf52840 = { path = "../../third_party/tock/chips/nrf52840" }
nrf52dk_base = { path = "../../third_party/tock/boards/nordic/nrf52dk_base" }

View File

@@ -1,4 +0,0 @@
fn main() {
println!("cargo:rerun-if-changed=layout.ld");
println!("cargo:rerun-if-changed=../../third_party/tock/boards/kernel_layout.ld");
}

View File

@@ -1,26 +0,0 @@
[package]
name = "nrf52840_mdk_dfu"
version = "0.1.0"
authors = ["Yihui Xiong <yihui.xiong@hotmail.com>"]
build = "build.rs"
edition = "2018"
[profile.dev]
panic = "abort"
lto = false
opt-level = "z"
debug = true
[profile.release]
panic = "abort"
lto = true
opt-level = "z"
debug = true
[dependencies]
components = { path = "../../third_party/tock/boards/components" }
cortexm4 = { path = "../../third_party/tock/arch/cortex-m4" }
capsules = { path = "../../third_party/tock/capsules" }
kernel = { path = "../../third_party/tock/kernel" }
nrf52840 = { path = "../../third_party/tock/chips/nrf52840" }
nrf52dk_base = { path = "../../third_party/tock/boards/nordic/nrf52dk_base" }

View File

@@ -1,4 +0,0 @@
fn main() {
println!("cargo:rerun-if-changed=layout.ld");
println!("cargo:rerun-if-changed=../../third_party/tock/boards/kernel_layout.ld");
}

View File

@@ -115,7 +115,7 @@ SUPPORTED_BOARDS = {
), ),
"nrf52840_dongle_dfu": "nrf52840_dongle_dfu":
OpenSKBoard( OpenSKBoard(
path="boards/nrf52840_dongle_dfu", path="third_party/tock/boards/nordic/nrf52840_dongle_dfu",
arch="thumbv7em-none-eabi", arch="thumbv7em-none-eabi",
page_size=4096, page_size=4096,
kernel_address=0x1000, kernel_address=0x1000,
@@ -132,7 +132,7 @@ SUPPORTED_BOARDS = {
), ),
"nrf52840_mdk_dfu": "nrf52840_mdk_dfu":
OpenSKBoard( OpenSKBoard(
path="boards/nrf52840_mdk_dfu", path="third_party/tock/boards/nordic/nrf52840_mdk_dfu",
arch="thumbv7em-none-eabi", arch="thumbv7em-none-eabi",
page_size=4096, page_size=4096,
kernel_address=0x1000, kernel_address=0x1000,
@@ -304,7 +304,8 @@ class OpenSKInstaller:
def build_tockos(self): def build_tockos(self):
info("Building Tock OS for board {}".format(self.args.board)) info("Building Tock OS for board {}".format(self.args.board))
props = SUPPORTED_BOARDS[self.args.board] props = SUPPORTED_BOARDS[self.args.board]
out_directory = os.path.join(props.path, "target", props.arch, "release") out_directory = os.path.join("third_party", "tock", "target", props.arch,
"release")
os.makedirs(out_directory, exist_ok=True) os.makedirs(out_directory, exist_ok=True)
self.checked_command_output(["make"], cwd=props.path) self.checked_command_output(["make"], cwd=props.path)
@@ -418,8 +419,9 @@ class OpenSKInstaller:
def install_tock_os(self): def install_tock_os(self):
board_props = SUPPORTED_BOARDS[self.args.board] board_props = SUPPORTED_BOARDS[self.args.board]
kernel_file = os.path.join(board_props.path, "target", board_props.arch, kernel_file = os.path.join("third_party", "tock", "target",
"release", "{}.bin".format(self.args.board)) board_props.arch, "release",
"{}.bin".format(self.args.board))
info("Flashing file {}.".format(kernel_file)) info("Flashing file {}.".format(kernel_file))
with open(kernel_file, "rb") as f: with open(kernel_file, "rb") as f:
kernel = f.read() kernel = f.read()
@@ -481,8 +483,9 @@ class OpenSKInstaller:
if self.args.tockos: if self.args.tockos:
# Process kernel # Process kernel
kernel_path = os.path.join(board_props.path, "target", board_props.arch, kernel_path = os.path.join("third_party", "tock", "target",
"release", "{}.bin".format(self.args.board)) board_props.arch, "release",
"{}.bin".format(self.args.board))
with open(kernel_path, "rb") as kernel: with open(kernel_path, "rb") as kernel:
kern_hex = intelhex.IntelHex() kern_hex = intelhex.IntelHex()
kern_hex.frombytes(kernel.read(), offset=board_props.kernel_address) kern_hex.frombytes(kernel.read(), offset=board_props.kernel_address)
@@ -705,6 +708,15 @@ if __name__ == "__main__":
"output messages before starting blinking the LEDs on the " "output messages before starting blinking the LEDs on the "
"board."), "board."),
) )
main_parser.add_argument(
"--debug",
action="append_const",
const="debug_ctap",
dest="features",
help=("Compiles and installs the OpenSK application in debug mode "
"(i.e. more debug messages will be sent over the console port "
"such as hexdumps of packets)."),
)
main_parser.add_argument( main_parser.add_argument(
"--debug-allocations", "--debug-allocations",
action="append_const", action="append_const",
@@ -713,6 +725,14 @@ if __name__ == "__main__":
help=("The console will be used to output allocator statistics every " help=("The console will be used to output allocator statistics every "
"time an allocation/deallocation happens."), "time an allocation/deallocation happens."),
) )
main_parser.add_argument(
"--verbose",
action="append_const",
const="verbose",
dest="features",
help=("The console will be used to output verbose information about the "
"OpenSK application. This also automatically activates --debug."),
)
main_parser.add_argument( main_parser.add_argument(
"--no-u2f", "--no-u2f",
action=RemoveConstAction, action=RemoveConstAction,
@@ -731,15 +751,6 @@ if __name__ == "__main__":
"This is useful to allow flashing multiple OpenSK authenticators " "This is useful to allow flashing multiple OpenSK authenticators "
"in a row without them being considered clones."), "in a row without them being considered clones."),
) )
main_parser.add_argument(
"--debug",
action="append_const",
const="debug_ctap",
dest="features",
help=("Compiles and installs the OpenSK application in debug mode "
"(i.e. more debug messages will be sent over the console port "
"such as hexdumps of packets)."),
)
main_parser.add_argument( main_parser.add_argument(
"--no-persistent-storage", "--no-persistent-storage",
action="append_const", action="append_const",

View File

@@ -50,8 +50,9 @@ In order to compile and flash a working OpenSK firmware, you will need the
following: following:
* rustup (can be installed with [Rustup](https://rustup.rs/)) * rustup (can be installed with [Rustup](https://rustup.rs/))
* python3 and pip * python3 and pip (can be installed with the `python3-pip` package on Debian)
* the OpenSSL command line tool * the OpenSSL command line tool (can be installed with the `libssl-dev`
package on Debian)
The scripts provided in this project have been tested under Linux and OS X. We The scripts provided in this project have been tested under Linux and OS X. We
haven't tested them on Windows and other platforms. haven't tested them on Windows and other platforms.

View File

@@ -0,0 +1,13 @@
diff --git a/Cargo.toml b/Cargo.toml
index 18f4a10d..db88dc1d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,6 +13,8 @@ members = [
"boards/launchxl",
"boards/nordic/nrf52840dk",
"boards/nordic/nrf52840_dongle",
+ "boards/nordic/nrf52840_dongle_dfu",
+ "boards/nordic/nrf52840_mdk_dfu",
"boards/nordic/nrf52dk",
"boards/nucleo_f429zi",
"boards/nucleo_f446re",

View File

@@ -1,21 +0,0 @@
diff --git a/chips/nrf52/src/crt1.rs b/chips/nrf52/src/crt1.rs
index 9703aac..281ceeb 100644
--- a/chips/nrf52/src/crt1.rs
+++ b/chips/nrf52/src/crt1.rs
@@ -1,4 +1,4 @@
-use cortexm4::{generic_isr, hard_fault_handler, nvic, svc_handler, systick_handler};
+use cortexm4::{generic_isr, hard_fault_handler, nvic, scb, svc_handler, systick_handler};
use tock_rt0;
/*
@@ -168,5 +168,9 @@ pub unsafe extern "C" fn init() {
tock_rt0::init_data(&mut _etext, &mut _srelocate, &mut _erelocate);
tock_rt0::zero_bss(&mut _szero, &mut _ezero);
+ // Ensure that we are compatible with a bootloader.
+ // For this we need to offset our vector table
+ scb::set_vector_table_offset(BASE_VECTORS.as_ptr() as *const ());
+
nvic::enable_all();
}

View File

@@ -34,8 +34,9 @@ cargo check --release --target=thumbv7em-none-eabi --features debug_ctap
cargo check --release --target=thumbv7em-none-eabi --features panic_console cargo check --release --target=thumbv7em-none-eabi --features panic_console
cargo check --release --target=thumbv7em-none-eabi --features debug_allocations cargo check --release --target=thumbv7em-none-eabi --features debug_allocations
cargo check --release --target=thumbv7em-none-eabi --features ram_storage cargo check --release --target=thumbv7em-none-eabi --features ram_storage
cargo check --release --target=thumbv7em-none-eabi --features verbose
cargo check --release --target=thumbv7em-none-eabi --features debug_ctap,with_ctap1 cargo check --release --target=thumbv7em-none-eabi --features debug_ctap,with_ctap1
cargo check --release --target=thumbv7em-none-eabi --features debug_ctap,with_ctap1,panic_console,debug_allocations cargo check --release --target=thumbv7em-none-eabi --features debug_ctap,with_ctap1,panic_console,debug_allocations,verbose
echo "Checking that examples build properly..." echo "Checking that examples build properly..."
cargo check --release --target=thumbv7em-none-eabi --examples cargo check --release --target=thumbv7em-none-eabi --examples
@@ -49,8 +50,16 @@ make -C third_party/tock/boards/nordic/nrf52840dk
make -C third_party/tock/boards/nordic/nrf52840_dongle make -C third_party/tock/boards/nordic/nrf52840_dongle
echo "Checking that other boards build properly..." echo "Checking that other boards build properly..."
make -C boards/nrf52840_dongle_dfu make -C third_party/tock/boards/nordic/nrf52840_dongle_dfu
make -C boards/nrf52840_mdk_dfu make -C third_party/tock/boards/nordic/nrf52840_mdk_dfu
echo "Checking deployment of supported boards..."
./deploy.py --board=nrf52840dk --no-app --programmer=none
./deploy.py --board=nrf52840_dongle --no-app --programmer=none
echo "Checking deployment of other boards..."
./deploy.py --board=nrf52840_dongle_dfu --no-app --programmer=none
./deploy.py --board=nrf52840_mdk_dfu --no-app --programmer=none
if [ -z "${TRAVIS_OS_NAME}" -o "${TRAVIS_OS_NAME}" = "linux" ] if [ -z "${TRAVIS_OS_NAME}" -o "${TRAVIS_OS_NAME}" = "linux" ]
then then

View File

@@ -46,6 +46,11 @@ EOF
exit 1 exit 1
} }
# Copy additional boards to the kernel.
echo -n '[-] Copying additional boards to Tock... '
cp -r boards/* third_party/tock/boards
echo $done_text
# Apply patches to kernel. Do that in a sub-shell # Apply patches to kernel. Do that in a sub-shell
( (
cd third_party/tock/ && \ cd third_party/tock/ && \

View File

@@ -462,6 +462,8 @@ pub struct CoseKey(pub BTreeMap<cbor::KeyType, cbor::Value>);
// here: https://www.iana.org/assignments/cose/cose.xhtml#algorithms // here: https://www.iana.org/assignments/cose/cose.xhtml#algorithms
// In fact, this is just used for compatibility with older specification versions. // In fact, this is just used for compatibility with older specification versions.
const ECDH_ALGORITHM: i64 = -25; const ECDH_ALGORITHM: i64 = -25;
// This is the identifier used by OpenSSH. To be compatible, we accept both.
const ES256_ALGORITHM: i64 = -7;
const EC2_KEY_TYPE: i64 = 2; const EC2_KEY_TYPE: i64 = 2;
const P_256_CURVE: i64 = 1; const P_256_CURVE: i64 = 1;
@@ -497,7 +499,7 @@ impl TryFrom<CoseKey> for ecdh::PubKey {
return Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_ALGORITHM); return Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_ALGORITHM);
} }
let algorithm = read_integer(ok_or_missing(cose_key.0.get(&cbor_int!(3)))?)?; let algorithm = read_integer(ok_or_missing(cose_key.0.get(&cbor_int!(3)))?)?;
if algorithm != ECDH_ALGORITHM { if algorithm != ECDH_ALGORITHM && algorithm != ES256_ALGORITHM {
return Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_ALGORITHM); return Err(Ctap2StatusCode::CTAP2_ERR_UNSUPPORTED_ALGORITHM);
} }
let curve = read_integer(ok_or_missing(cose_key.0.get(&cbor_int!(-1)))?)?; let curve = read_integer(ok_or_missing(cose_key.0.get(&cbor_int!(-1)))?)?;

View File

@@ -198,6 +198,7 @@ impl PersistentStore {
.insert(StoreEntry { .insert(StoreEntry {
tag: MASTER_KEYS, tag: MASTER_KEYS,
data: &master_keys, data: &master_keys,
sensitive: true,
}) })
.unwrap(); .unwrap();
} }
@@ -206,6 +207,7 @@ impl PersistentStore {
.insert(StoreEntry { .insert(StoreEntry {
tag: PIN_RETRIES, tag: PIN_RETRIES,
data: &[MAX_PIN_RETRIES], data: &[MAX_PIN_RETRIES],
sensitive: false,
}) })
.unwrap(); .unwrap();
} }
@@ -245,6 +247,7 @@ impl PersistentStore {
let new_entry = StoreEntry { let new_entry = StoreEntry {
tag: TAG_CREDENTIAL, tag: TAG_CREDENTIAL,
data: &credential, data: &credential,
sensitive: true,
}; };
match old_entry { match old_entry {
None => self.store.insert(new_entry)?, None => self.store.insert(new_entry)?,
@@ -299,6 +302,7 @@ impl PersistentStore {
.insert(StoreEntry { .insert(StoreEntry {
tag: GLOBAL_SIGNATURE_COUNTER, tag: GLOBAL_SIGNATURE_COUNTER,
data: &buffer, data: &buffer,
sensitive: false,
}) })
.unwrap(); .unwrap();
} }
@@ -312,6 +316,7 @@ impl PersistentStore {
StoreEntry { StoreEntry {
tag: GLOBAL_SIGNATURE_COUNTER, tag: GLOBAL_SIGNATURE_COUNTER,
data: &buffer, data: &buffer,
sensitive: false,
}, },
) )
.unwrap(); .unwrap();
@@ -339,6 +344,7 @@ impl PersistentStore {
let entry = StoreEntry { let entry = StoreEntry {
tag: PIN_HASH, tag: PIN_HASH,
data: pin_hash, data: pin_hash,
sensitive: true,
}; };
match self.store.find_one(&Key::PinHash) { match self.store.find_one(&Key::PinHash) {
None => self.store.insert(entry).unwrap(), None => self.store.insert(entry).unwrap(),
@@ -368,6 +374,7 @@ impl PersistentStore {
StoreEntry { StoreEntry {
tag: PIN_RETRIES, tag: PIN_RETRIES,
data: &[new_value], data: &[new_value],
sensitive: false,
}, },
) )
.unwrap(); .unwrap();
@@ -381,6 +388,7 @@ impl PersistentStore {
StoreEntry { StoreEntry {
tag: PIN_RETRIES, tag: PIN_RETRIES,
data: &[MAX_PIN_RETRIES], data: &[MAX_PIN_RETRIES],
sensitive: false,
}, },
) )
.unwrap(); .unwrap();
@@ -466,9 +474,9 @@ mod test {
let storage = Storage::new(store, options); let storage = Storage::new(store, options);
let store = embedded_flash::Store::new(storage, Config).unwrap(); let store = embedded_flash::Store::new(storage, Config).unwrap();
// We can replace 3 bytes with minimal overhead. // We can replace 3 bytes with minimal overhead.
assert_eq!(store.replace_len(0), 2 * WORD_SIZE); assert_eq!(store.replace_len(false, 0), 2 * WORD_SIZE);
assert_eq!(store.replace_len(3), 2 * WORD_SIZE); assert_eq!(store.replace_len(false, 3), 3 * WORD_SIZE);
assert_eq!(store.replace_len(4), 3 * WORD_SIZE); assert_eq!(store.replace_len(false, 4), 3 * WORD_SIZE);
} }
#[test] #[test]

View File

@@ -55,6 +55,11 @@ impl ByteGap {
bit + 8 * self.length bit + 8 * self.length
} }
} }
/// Returns the slice of `data` corresponding to the gap.
pub fn slice(self, data: &[u8]) -> &[u8] {
&data[self.start..self.start + self.length]
}
} }
/// Returns whether a bit is set in a sequence of bits. /// Returns whether a bit is set in a sequence of bits.

View File

@@ -59,6 +59,17 @@ pub struct Format {
/// - 1 for insert entries. /// - 1 for insert entries.
replace_bit: usize, replace_bit: usize,
/// Whether a user entry has sensitive data.
///
/// - 0 for sensitive data.
/// - 1 for non-sensitive data.
///
/// When a user entry with sensitive data is deleted, the data is overwritten with zeroes. This
/// feature is subject to the same guarantees as all other features of the store, in particular
/// deleting a sensitive entry is atomic. See the store module-level documentation for more
/// information.
sensitive_bit: usize,
/// The data length of a user entry. /// The data length of a user entry.
length_range: bitfield::BitRange, length_range: bitfield::BitRange,
@@ -138,8 +149,9 @@ impl Format {
let deleted_bit = present_bit + 1; let deleted_bit = present_bit + 1;
let internal_bit = deleted_bit + 1; let internal_bit = deleted_bit + 1;
let replace_bit = internal_bit + 1; let replace_bit = internal_bit + 1;
let sensitive_bit = replace_bit + 1;
let length_range = bitfield::BitRange { let length_range = bitfield::BitRange {
start: replace_bit + 1, start: sensitive_bit + 1,
length: byte_bits, length: byte_bits,
}; };
let tag_range = bitfield::BitRange { let tag_range = bitfield::BitRange {
@@ -182,6 +194,7 @@ impl Format {
deleted_bit, deleted_bit,
internal_bit, internal_bit,
replace_bit, replace_bit,
sensitive_bit,
length_range, length_range,
tag_range, tag_range,
replace_page_range, replace_page_range,
@@ -196,10 +209,11 @@ impl Format {
// Make sure all the following conditions hold: // Make sure all the following conditions hold:
// - The page header is one word. // - The page header is one word.
// - The internal entry is one word. // - The internal entry is one word.
// - The entry header fits in one word. // - The entry header fits in one word (which is equivalent to the entry header size being
// exactly one word for sensitive entries).
if format.page_header_size() != word_size if format.page_header_size() != word_size
|| format.internal_entry_size() != word_size || format.internal_entry_size() != word_size
|| format.header_size() > word_size || format.header_size(true) != word_size
{ {
return None; return None;
} }
@@ -220,28 +234,46 @@ impl Format {
/// Returns the entry header length in bytes. /// Returns the entry header length in bytes.
/// ///
/// This is the smallest number of bytes necessary to store all fields of the entry info up to /// This is the smallest number of bytes necessary to store all fields of the entry info up to
/// and including `length`. /// and including `length`. For sensitive entries, the result is word-aligned.
pub fn header_size(&self) -> usize { pub fn header_size(&self, sensitive: bool) -> usize {
self.bits_to_bytes(self.length_range.end()) let mut size = self.bits_to_bytes(self.length_range.end());
if sensitive {
// We need to align to the next word boundary so that wiping the user data will not
// count as a write to the header.
size = self.align_word(size);
}
size
}
/// Returns the entry header length in bytes.
///
/// This is a convenience function for `header_size` above.
fn header_offset(&self, entry: &[u8]) -> usize {
self.header_size(self.is_sensitive(entry))
} }
/// Returns the entry info length in bytes. /// Returns the entry info length in bytes.
/// ///
/// This is the number of bytes necessary to store all fields of the entry info. This also /// This is the number of bytes necessary to store all fields of the entry info. This also
/// includes the internal padding to protect the `committed` bit from the `deleted` bit. /// includes the internal padding to protect the `committed` bit from the `deleted` bit and to
fn info_size(&self, is_replace: IsReplace) -> usize { /// protect the entry info from the user data for sensitive entries.
fn info_size(&self, is_replace: IsReplace, sensitive: bool) -> usize {
let suffix_bits = 2; // committed + complete let suffix_bits = 2; // committed + complete
let info_bits = match is_replace { let info_bits = match is_replace {
IsReplace::Replace => self.replace_byte_range.end() + suffix_bits, IsReplace::Replace => self.replace_byte_range.end() + suffix_bits,
IsReplace::Insert => self.tag_range.end() + suffix_bits, IsReplace::Insert => self.tag_range.end() + suffix_bits,
}; };
let info_size = self.bits_to_bytes(info_bits); let mut info_size = self.bits_to_bytes(info_bits);
// If the suffix bits would end up in the header, we need to add one byte for them. // If the suffix bits would end up in the header, we need to add one byte for them.
if info_size == self.header_size() { let header_size = self.header_size(sensitive);
info_size + 1 if info_size <= header_size {
} else { info_size = header_size + 1;
info_size
} }
// If the entry is sensitive, we need to align to the next word boundary.
if sensitive {
info_size = self.align_word(info_size);
}
info_size
} }
/// Returns the length in bytes of an entry. /// Returns the length in bytes of an entry.
@@ -249,8 +281,8 @@ impl Format {
/// This depends on the length of the user data and whether the entry replaces an old entry or /// This depends on the length of the user data and whether the entry replaces an old entry or
/// is an insertion. This also includes the internal padding to protect the `committed` bit from /// is an insertion. This also includes the internal padding to protect the `committed` bit from
/// the `deleted` bit. /// the `deleted` bit.
pub fn entry_size(&self, is_replace: IsReplace, length: usize) -> usize { pub fn entry_size(&self, is_replace: IsReplace, sensitive: bool, length: usize) -> usize {
let mut entry_size = length + self.info_size(is_replace); let mut entry_size = length + self.info_size(is_replace, sensitive);
let word_size = self.word_size; let word_size = self.word_size;
entry_size = self.align_word(entry_size); entry_size = self.align_word(entry_size);
// The entry must be at least 2 words such that the `committed` and `deleted` bits are on // The entry must be at least 2 words such that the `committed` and `deleted` bits are on
@@ -308,6 +340,14 @@ impl Format {
bitfield::set_zero(self.replace_bit, header, bitfield::NO_GAP) bitfield::set_zero(self.replace_bit, header, bitfield::NO_GAP)
} }
pub fn is_sensitive(&self, header: &[u8]) -> bool {
bitfield::is_zero(self.sensitive_bit, header, bitfield::NO_GAP)
}
pub fn set_sensitive(&self, header: &mut [u8]) {
bitfield::set_zero(self.sensitive_bit, header, bitfield::NO_GAP)
}
pub fn get_length(&self, header: &[u8]) -> usize { pub fn get_length(&self, header: &[u8]) -> usize {
bitfield::get_range(self.length_range, header, bitfield::NO_GAP) bitfield::get_range(self.length_range, header, bitfield::NO_GAP)
} }
@@ -317,16 +357,19 @@ impl Format {
} }
pub fn get_data<'a>(&self, entry: &'a [u8]) -> &'a [u8] { pub fn get_data<'a>(&self, entry: &'a [u8]) -> &'a [u8] {
&entry[self.header_size()..][..self.get_length(entry)] &entry[self.header_offset(entry)..][..self.get_length(entry)]
} }
/// Returns the span of user data in an entry. /// Returns the span of user data in an entry.
/// ///
/// The complement of this gap in the entry is exactly the entry info. The header is before the /// The complement of this gap in the entry is exactly the entry info. The header is before the
/// gap and the footer is after the gap. /// gap and the footer is after the gap.
fn entry_gap(&self, entry: &[u8]) -> bitfield::ByteGap { pub fn entry_gap(&self, entry: &[u8]) -> bitfield::ByteGap {
let start = self.header_size(); let start = self.header_offset(entry);
let length = self.get_length(entry); let mut length = self.get_length(entry);
if self.is_sensitive(entry) {
length = self.align_word(length);
}
bitfield::ByteGap { start, length } bitfield::ByteGap { start, length }
} }
@@ -406,16 +449,23 @@ impl Format {
/// Builds an entry for replace or insert operations. /// Builds an entry for replace or insert operations.
pub fn build_entry(&self, replace: Option<Index>, user_entry: StoreEntry) -> Vec<u8> { pub fn build_entry(&self, replace: Option<Index>, user_entry: StoreEntry) -> Vec<u8> {
let StoreEntry { tag, data } = user_entry; let StoreEntry {
tag,
data,
sensitive,
} = user_entry;
let is_replace = match replace { let is_replace = match replace {
None => IsReplace::Insert, None => IsReplace::Insert,
Some(_) => IsReplace::Replace, Some(_) => IsReplace::Replace,
}; };
let entry_len = self.entry_size(is_replace, data.len()); let entry_len = self.entry_size(is_replace, sensitive, data.len());
let mut entry = Vec::with_capacity(entry_len); let mut entry = Vec::with_capacity(entry_len);
// Build the header. // Build the header.
entry.resize(self.header_size(), 0xff); entry.resize(self.header_size(sensitive), 0xff);
self.set_present(&mut entry[..]); self.set_present(&mut entry[..]);
if sensitive {
self.set_sensitive(&mut entry[..]);
}
self.set_length(&mut entry[..], data.len()); self.set_length(&mut entry[..], data.len());
// Add the data. // Add the data.
entry.extend_from_slice(data); entry.extend_from_slice(data);

View File

@@ -43,6 +43,28 @@
//! The data-structure can be configured with the `StoreConfig` trait. By implementing this trait, //! The data-structure can be configured with the `StoreConfig` trait. By implementing this trait,
//! the number of possible tags and the association between keys and entries are defined. //! the number of possible tags and the association between keys and entries are defined.
//! //!
//! # Properties
//!
//! The data-structure provides the following properties:
//! - When an operation returns success, then the represented multi-set is updated accordingly. For
//! example, an inserted entry can be found without alteration until replaced or deleted.
//! - When an operation returns an error, the resulting multi-set state is described in the error
//! documentation.
//! - When power is lost before an operation returns, the operation will either succeed or be
//! rolled-back on the next initialization. So the multi-set would be either left unchanged or
//! updated accordingly.
//!
//! Those properties rely on the following assumptions:
//! - Writing a word to flash is atomic. When power is lost, the word is either fully written or not
//! written at all.
//! - Reading a word from flash is deterministic. When power is lost while writing or erasing a word
//! (erasing a page containing that word), reading that word repeatedly returns the same result
//! (until it is written or its page is erased).
//! - To decide whether a page has been erased, it is enough to test if all its bits are equal to 1.
//!
//! The properties may still hold outside those assumptions but with weaker probabilities as the
//! usage diverges from the assumptions.
//!
//! # Implementation //! # Implementation
//! //!
//! The store is a page-aligned sequence of bits. It matches the following grammar: //! The store is a page-aligned sequence of bits. It matches the following grammar:
@@ -57,7 +79,7 @@
//! new_page:page_bits //! new_page:page_bits
//! Padding(word) //! Padding(word)
//! Entry := Header Data Footer //! Entry := Header Data Footer
//! // Let X be the byte following `length` in `Info`. //! // Let X be the byte (word-aligned for sensitive queries) following `length` in `Info`.
//! Header := Info[..X] // must fit in one word //! Header := Info[..X] // must fit in one word
//! Footer := Info[X..] // must fit in one word //! Footer := Info[X..] // must fit in one word
//! Info := //! Info :=
@@ -65,6 +87,7 @@
//! deleted:1 //! deleted:1
//! internal=1 //! internal=1
//! replace:1 //! replace:1
//! sensitive:1
//! length:byte_bits //! length:byte_bits
//! tag:tag_bits //! tag:tag_bits
//! [ // present if `replace` is 0 //! [ // present if `replace` is 0
@@ -109,15 +132,16 @@
//! 0.1 deleted //! 0.1 deleted
//! 0.2 internal //! 0.2 internal
//! 0.3 replace //! 0.3 replace
//! 0.4 length (9 bits) //! 0.4 sensitive
//! 1.5 tag (least significant 3 bits out of 5) //! 0.5 length (9 bits)
//! 1.6 tag (least significant 2 bits out of 5)
//! (the header ends at the first byte boundary after `length`) //! (the header ends at the first byte boundary after `length`)
//! 2.0 <user data> (2 bytes in this example) //! 2.0 <user data> (2 bytes in this example)
//! (the footer starts immediately after the user data) //! (the footer starts immediately after the user data)
//! 4.0 tag (most significant 2 bits out of 5) //! 4.0 tag (most significant 3 bits out of 5)
//! 4.2 replace_page (6 bits) //! 4.3 replace_page (6 bits)
//! 5.0 replace_byte (9 bits) //! 5.1 replace_byte (9 bits)
//! 6.1 padding (make sure the 2 properties below hold) //! 6.2 padding (make sure the 2 properties below hold)
//! 7.6 committed //! 7.6 committed
//! 7.7 complete (on a different word than `present`) //! 7.7 complete (on a different word than `present`)
//! 8.0 <end> (word-aligned) //! 8.0 <end> (word-aligned)
@@ -203,6 +227,11 @@ pub struct StoreEntry<'a> {
/// The data of the entry. /// The data of the entry.
pub data: &'a [u8], pub data: &'a [u8],
/// Whether the data is sensitive.
///
/// Sensitive data is overwritten with zeroes when the entry is deleted.
pub sensitive: bool,
} }
/// Implements a configurable multi-set on top of any storage. /// Implements a configurable multi-set on top of any storage.
@@ -262,6 +291,7 @@ impl<S: Storage, C: StoreConfig> Store<S, C> {
StoreEntry { StoreEntry {
tag: self.format.get_tag(entry), tag: self.format.get_tag(entry),
data: self.format.get_data(entry), data: self.format.get_data(entry),
sensitive: self.format.is_sensitive(entry),
}, },
)) ))
} else { } else {
@@ -326,7 +356,7 @@ impl<S: Storage, C: StoreConfig> Store<S, C> {
self.format.validate_entry(new)?; self.format.validate_entry(new)?;
let mut old_index = old.index; let mut old_index = old.index;
// Find a slot. // Find a slot.
let entry_len = self.replace_len(new.data.len()); let entry_len = self.replace_len(new.sensitive, new.data.len());
let index = self.find_slot_for_write(entry_len, Some(&mut old_index))?; let index = self.find_slot_for_write(entry_len, Some(&mut old_index))?;
// Build a new entry replacing the old one. // Build a new entry replacing the old one.
let entry = self.format.build_entry(Some(old_index), new); let entry = self.format.build_entry(Some(old_index), new);
@@ -360,17 +390,20 @@ impl<S: Storage, C: StoreConfig> Store<S, C> {
/// Returns the byte cost of a replace operation. /// Returns the byte cost of a replace operation.
/// ///
/// Computes the length in bytes that would be used in the storage if a replace operation is /// Computes the length in bytes that would be used in the storage if a replace operation is
/// executed provided the data of the new entry has `length` bytes. /// executed provided the data of the new entry has `length` bytes and whether this data is
pub fn replace_len(&self, length: usize) -> usize { /// sensitive.
self.format.entry_size(IsReplace::Replace, length) pub fn replace_len(&self, sensitive: bool, length: usize) -> usize {
self.format
.entry_size(IsReplace::Replace, sensitive, length)
} }
/// Returns the byte cost of an insert operation. /// Returns the byte cost of an insert operation.
/// ///
/// Computes the length in bytes that would be used in the storage if an insert operation is /// Computes the length in bytes that would be used in the storage if an insert operation is
/// executed provided the data of the inserted entry has `length` bytes. /// executed provided the data of the inserted entry has `length` bytes and whether this data is
pub fn insert_len(&self, length: usize) -> usize { /// sensitive.
self.format.entry_size(IsReplace::Insert, length) pub fn insert_len(&self, sensitive: bool, length: usize) -> usize {
self.format.entry_size(IsReplace::Insert, sensitive, length)
} }
/// Returns the erase count of all pages. /// Returns the erase count of all pages.
@@ -410,8 +443,11 @@ impl<S: Storage, C: StoreConfig> Store<S, C> {
let entry_index = index; let entry_index = index;
let entry = self.read_entry(index); let entry = self.read_entry(index);
index.byte += entry.len(); index.byte += entry.len();
if !self.format.is_alive(entry) { if !self.format.is_present(entry) {
// Skip deleted entries (or the page padding). // Reached the end of the page.
} else if self.format.is_deleted(entry) {
// Wipe sensitive data if needed.
self.wipe_sensitive_data(entry_index);
} else if self.format.is_internal(entry) { } else if self.format.is_internal(entry) {
// Finish page compaction. // Finish page compaction.
self.erase_page(entry_index); self.erase_page(entry_index);
@@ -449,6 +485,31 @@ impl<S: Storage, C: StoreConfig> Store<S, C> {
/// The provided index must point to the beginning of an entry. /// The provided index must point to the beginning of an entry.
fn delete_index(&mut self, index: Index) { fn delete_index(&mut self, index: Index) {
self.update_word(index, |format, word| format.set_deleted(word)); self.update_word(index, |format, word| format.set_deleted(word));
self.wipe_sensitive_data(index);
}
/// Wipes the data of a sensitive entry.
///
/// If the entry at the provided index is sensitive, overwrites the data with zeroes. Otherwise,
/// does nothing.
fn wipe_sensitive_data(&mut self, mut index: Index) {
let entry = self.read_entry(index);
debug_assert!(self.format.is_present(entry));
debug_assert!(self.format.is_deleted(entry));
if self.format.is_internal(entry) || !self.format.is_sensitive(entry) {
// No need to wipe the data.
return;
}
let gap = self.format.entry_gap(entry);
let data = gap.slice(entry);
if data.iter().all(|&byte| byte == 0x00) {
// The data is already wiped.
return;
}
index.byte += gap.start;
self.storage
.write_slice(index, &vec![0; gap.length])
.unwrap();
} }
/// Finds a page with enough free space. /// Finds a page with enough free space.
@@ -555,10 +616,13 @@ impl<S: Storage, C: StoreConfig> Store<S, C> {
} else if self.format.is_internal(first_byte) { } else if self.format.is_internal(first_byte) {
self.format.internal_entry_size() self.format.internal_entry_size()
} else { } else {
let header = self.read_slice(index, self.format.header_size()); // We don't know if the entry is sensitive or not, but it doesn't matter here. We just
// need to read the replace, sensitive, and length fields.
let header = self.read_slice(index, self.format.header_size(false));
let replace = self.format.is_replace(header); let replace = self.format.is_replace(header);
let sensitive = self.format.is_sensitive(header);
let length = self.format.get_length(header); let length = self.format.get_length(header);
self.format.entry_size(replace, length) self.format.entry_size(replace, sensitive, length)
}; };
// Truncate the length to fit the page. This can only happen in case of corruption or // Truncate the length to fit the page. This can only happen in case of corruption or
// partial writes. // partial writes.
@@ -673,7 +737,7 @@ impl<S: Storage, C: StoreConfig> Store<S, C> {
// Save the old page index and erase count to the new page. // Save the old page index and erase count to the new page.
let erase_index = new_index; let erase_index = new_index;
let erase_entry = self.format.build_erase_entry(old_page, erase_count); let erase_entry = self.format.build_erase_entry(old_page, erase_count);
self.storage.write_slice(new_index, &erase_entry).unwrap(); self.write_entry(new_index, &erase_entry);
// Erase the page. // Erase the page.
self.erase_page(erase_index); self.erase_page(erase_index);
// Increase generation. // Increase generation.
@@ -728,6 +792,25 @@ impl<C: StoreConfig> Store<BufferStorage, C> {
pub fn set_erase_count(&mut self, page: usize, erase_count: usize) { pub fn set_erase_count(&mut self, page: usize, erase_count: usize) {
self.initialize_page(page, erase_count); self.initialize_page(page, erase_count);
} }
/// Returns whether all deleted sensitive entries have been wiped.
pub fn deleted_entries_are_wiped(&self) -> bool {
for (_, entry) in Iter::new(self) {
if !self.format.is_present(entry)
|| !self.format.is_deleted(entry)
|| self.format.is_internal(entry)
|| !self.format.is_sensitive(entry)
{
continue;
}
let gap = self.format.entry_gap(entry);
let data = gap.slice(entry);
if !data.iter().all(|&byte| byte == 0x00) {
return false;
}
}
true
}
} }
/// Maps an index from an old page to a new page if needed. /// Maps an index from an old page to a new page if needed.
@@ -843,7 +926,27 @@ mod tests {
let tag = 0; let tag = 0;
let key = 1; let key = 1;
let data = &[key, 2]; let data = &[key, 2];
let entry = StoreEntry { tag, data }; let entry = StoreEntry {
tag,
data,
sensitive: false,
};
store.insert(entry).unwrap();
assert_eq!(store.iter().count(), 1);
assert_eq!(store.find_one(&key).unwrap().1, entry);
}
#[test]
fn insert_sensitive_ok() {
let mut store = new_store();
let tag = 0;
let key = 1;
let data = &[key, 4];
let entry = StoreEntry {
tag,
data,
sensitive: true,
};
store.insert(entry).unwrap(); store.insert(entry).unwrap();
assert_eq!(store.iter().count(), 1); assert_eq!(store.iter().count(), 1);
assert_eq!(store.find_one(&key).unwrap().1, entry); assert_eq!(store.find_one(&key).unwrap().1, entry);
@@ -857,6 +960,7 @@ mod tests {
let entry = StoreEntry { let entry = StoreEntry {
tag, tag,
data: &[key, 2], data: &[key, 2],
sensitive: false,
}; };
store.insert(entry).unwrap(); store.insert(entry).unwrap();
assert_eq!(store.find_all(&key).count(), 1); assert_eq!(store.find_all(&key).count(), 1);
@@ -866,6 +970,25 @@ mod tests {
assert_eq!(store.iter().count(), 0); assert_eq!(store.iter().count(), 0);
} }
#[test]
fn delete_sensitive_ok() {
let mut store = new_store();
let tag = 0;
let key = 1;
let entry = StoreEntry {
tag,
data: &[key, 2],
sensitive: true,
};
store.insert(entry).unwrap();
assert_eq!(store.find_all(&key).count(), 1);
let (index, _) = store.find_one(&key).unwrap();
store.delete(index).unwrap();
assert_eq!(store.find_all(&key).count(), 0);
assert_eq!(store.iter().count(), 0);
assert!(store.deleted_entries_are_wiped());
}
#[test] #[test]
fn insert_until_full() { fn insert_until_full() {
let mut store = new_store(); let mut store = new_store();
@@ -875,6 +998,7 @@ mod tests {
.insert(StoreEntry { .insert(StoreEntry {
tag, tag,
data: &[key, 0], data: &[key, 0],
sensitive: false,
}) })
.is_ok() .is_ok()
{ {
@@ -892,6 +1016,7 @@ mod tests {
.insert(StoreEntry { .insert(StoreEntry {
tag, tag,
data: &[key, 0], data: &[key, 0],
sensitive: false,
}) })
.is_ok() .is_ok()
{ {
@@ -903,6 +1028,7 @@ mod tests {
.insert(StoreEntry { .insert(StoreEntry {
tag: 0, tag: 0,
data: &[key, 0], data: &[key, 0],
sensitive: false,
}) })
.unwrap(); .unwrap();
for k in 1..=key { for k in 1..=key {
@@ -916,7 +1042,11 @@ mod tests {
let tag = 0; let tag = 0;
let key = 1; let key = 1;
let data = &[key, 2]; let data = &[key, 2];
let entry = StoreEntry { tag, data }; let entry = StoreEntry {
tag,
data,
sensitive: false,
};
store.insert(entry).unwrap(); store.insert(entry).unwrap();
// Reboot the store. // Reboot the store.
@@ -934,10 +1064,12 @@ mod tests {
let old_entry = StoreEntry { let old_entry = StoreEntry {
tag, tag,
data: &[key, 2, 3, 4, 5, 6], data: &[key, 2, 3, 4, 5, 6],
sensitive: false,
}; };
let new_entry = StoreEntry { let new_entry = StoreEntry {
tag, tag,
data: &[key, 7, 8, 9], data: &[key, 7, 8, 9],
sensitive: false,
}; };
let mut delay = 0; let mut delay = 0;
loop { loop {
@@ -973,6 +1105,7 @@ mod tests {
.insert(StoreEntry { .insert(StoreEntry {
tag, tag,
data: &[key, 0], data: &[key, 0],
sensitive: false,
}) })
.is_ok() .is_ok()
{ {
@@ -983,7 +1116,14 @@ mod tests {
let (index, _) = store.find_one(&1).unwrap(); let (index, _) = store.find_one(&1).unwrap();
store.arm_snapshot(delay); store.arm_snapshot(delay);
store store
.replace(index, StoreEntry { tag, data: &[1, 1] }) .replace(
index,
StoreEntry {
tag,
data: &[1, 1],
sensitive: false,
},
)
.unwrap(); .unwrap();
let (complete, store) = match store.get_snapshot() { let (complete, store) = match store.get_snapshot() {
Err(_) => (true, store.get_storage()), Err(_) => (true, store.get_storage()),
@@ -995,7 +1135,11 @@ mod tests {
assert_eq!(store.find_all(&k).count(), 1); assert_eq!(store.find_all(&k).count(), 1);
assert_eq!( assert_eq!(
store.find_one(&k).unwrap().1, store.find_one(&k).unwrap().1,
StoreEntry { tag, data: &[k, 0] } StoreEntry {
tag,
data: &[k, 0],
sensitive: false,
}
); );
} }
assert_eq!(store.find_all(&1).count(), 1); assert_eq!(store.find_all(&1).count(), 1);
@@ -1012,7 +1156,11 @@ mod tests {
#[test] #[test]
fn invalid_tag() { fn invalid_tag() {
let mut store = new_store(); let mut store = new_store();
let entry = StoreEntry { tag: 1, data: &[] }; let entry = StoreEntry {
tag: 1,
data: &[],
sensitive: false,
};
assert_eq!(store.insert(entry), Err(StoreError::InvalidTag)); assert_eq!(store.insert(entry), Err(StoreError::InvalidTag));
} }
@@ -1022,6 +1170,7 @@ mod tests {
let entry = StoreEntry { let entry = StoreEntry {
tag: 0, tag: 0,
data: &[0; PAGE_SIZE], data: &[0; PAGE_SIZE],
sensitive: false,
}; };
assert_eq!(store.insert(entry), Err(StoreError::StoreFull)); assert_eq!(store.insert(entry), Err(StoreError::StoreFull));
} }

View File

@@ -165,6 +165,57 @@ pub fn send_or_recv(buf: &mut [u8; 64]) -> SendOrRecvStatus {
pub fn recv_with_timeout( pub fn recv_with_timeout(
buf: &mut [u8; 64], buf: &mut [u8; 64],
timeout_delay: Duration<isize>, timeout_delay: Duration<isize>,
) -> Option<SendOrRecvStatus> {
#[cfg(feature = "verbose")]
writeln!(
Console::new(),
"Receiving packet with timeout of {}ms",
timeout_delay.ms(),
)
.unwrap();
let result = recv_with_timeout_detail(buf, timeout_delay);
#[cfg(feature = "verbose")]
{
if let Some(SendOrRecvStatus::Received) = result {
writeln!(Console::new(), "Received packet = {:02x?}", buf as &[u8]).unwrap();
}
}
result
}
// Same as send_or_recv, but with a timeout.
// If the timeout elapses, return None.
pub fn send_or_recv_with_timeout(
buf: &mut [u8; 64],
timeout_delay: Duration<isize>,
) -> Option<SendOrRecvStatus> {
#[cfg(feature = "verbose")]
writeln!(
Console::new(),
"Sending packet with timeout of {}ms = {:02x?}",
timeout_delay.ms(),
buf as &[u8]
)
.unwrap();
let result = send_or_recv_with_timeout_detail(buf, timeout_delay);
#[cfg(feature = "verbose")]
{
if let Some(SendOrRecvStatus::Received) = result {
writeln!(Console::new(), "Received packet = {:02x?}", buf as &[u8]).unwrap();
}
}
result
}
fn recv_with_timeout_detail(
buf: &mut [u8; 64],
timeout_delay: Duration<isize>,
) -> Option<SendOrRecvStatus> { ) -> Option<SendOrRecvStatus> {
let result = syscalls::allow(DRIVER_NUMBER, allow_nr::RECEIVE, buf); let result = syscalls::allow(DRIVER_NUMBER, allow_nr::RECEIVE, buf);
if result.is_err() { if result.is_err() {
@@ -225,7 +276,7 @@ pub fn recv_with_timeout(
// Cancel USB transaction if necessary. // Cancel USB transaction if necessary.
if status.get().is_none() { if status.get().is_none() {
#[cfg(feature = "debug_ctap")] #[cfg(feature = "verbose")]
writeln!(Console::new(), "Cancelling USB receive due to timeout").unwrap(); writeln!(Console::new(), "Cancelling USB receive due to timeout").unwrap();
let result_code = unsafe { syscalls::command(DRIVER_NUMBER, command_nr::CANCEL, 0, 0) }; let result_code = unsafe { syscalls::command(DRIVER_NUMBER, command_nr::CANCEL, 0, 0) };
match result_code { match result_code {
@@ -249,9 +300,7 @@ pub fn recv_with_timeout(
status.get() status.get()
} }
// Same as send_or_recv, but with a timeout. fn send_or_recv_with_timeout_detail(
// If the timeout elapses, return None.
pub fn send_or_recv_with_timeout(
buf: &mut [u8; 64], buf: &mut [u8; 64],
timeout_delay: Duration<isize>, timeout_delay: Duration<isize>,
) -> Option<SendOrRecvStatus> { ) -> Option<SendOrRecvStatus> {
@@ -317,7 +366,7 @@ pub fn send_or_recv_with_timeout(
// Cancel USB transaction if necessary. // Cancel USB transaction if necessary.
if status.get().is_none() { if status.get().is_none() {
#[cfg(feature = "debug_ctap")] #[cfg(feature = "verbose")]
writeln!(Console::new(), "Cancelling USB transaction due to timeout").unwrap(); writeln!(Console::new(), "Cancelling USB transaction due to timeout").unwrap();
let result_code = unsafe { syscalls::command(DRIVER_NUMBER, command_nr::CANCEL, 0, 0) }; let result_code = unsafe { syscalls::command(DRIVER_NUMBER, command_nr::CANCEL, 0, 0) };
match result_code { match result_code {