Configure the number of pages in the board

This permits to have portable apps.
This commit is contained in:
Julien Cretin
2021-08-15 22:39:13 +02:00
committed by Julien Cretin
parent c1f2551d0d
commit 2d5fdd1034
12 changed files with 77 additions and 79 deletions

View File

@@ -66,7 +66,8 @@ static mut PROCESSES: [Option<&'static dyn kernel::procs::ProcessType>; NUM_PROC
static mut STORAGE_LOCATIONS: [kernel::StorageLocation; 1] = [kernel::StorageLocation { static mut STORAGE_LOCATIONS: [kernel::StorageLocation; 1] = [kernel::StorageLocation {
address: 0xC0000, address: 0xC0000,
size: 0x40000, size: 0x14000, // NUM_PAGES = 20
unallocated_size: 0x40000, // MPU limitation
storage_type: kernel::StorageType::STORE, storage_type: kernel::StorageType::STORE,
}]; }];

View File

@@ -60,7 +60,8 @@ static mut PROCESSES: [Option<&'static dyn kernel::procs::ProcessType>; NUM_PROC
static mut STORAGE_LOCATIONS: [kernel::StorageLocation; 1] = [kernel::StorageLocation { static mut STORAGE_LOCATIONS: [kernel::StorageLocation; 1] = [kernel::StorageLocation {
address: 0xC0000, address: 0xC0000,
size: 0x40000, size: 0x14000, // NUM_PAGES = 20
unallocated_size: 0x40000, // MPU limitation
storage_type: kernel::StorageType::STORE, storage_type: kernel::StorageType::STORE,
}]; }];

View File

@@ -130,7 +130,8 @@ static mut PROCESSES: [Option<&'static dyn kernel::procs::ProcessType>; NUM_PROC
static mut STORAGE_LOCATIONS: [kernel::StorageLocation; 1] = [kernel::StorageLocation { static mut STORAGE_LOCATIONS: [kernel::StorageLocation; 1] = [kernel::StorageLocation {
address: 0xC0000, address: 0xC0000,
size: 0x40000, size: 0x14000, // NUM_PAGES = 20
unallocated_size: 0x40000, // MPU limitation
storage_type: kernel::StorageType::STORE, storage_type: kernel::StorageType::STORE,
}]; }];

View File

@@ -94,7 +94,7 @@ SUPPORTED_BOARDS = {
app_ldscript="nrf52840_layout.ld", app_ldscript="nrf52840_layout.ld",
app_address=0x40000, app_address=0x40000,
storage_address=0xC0000, storage_address=0xC0000,
storage_size=0x40000, storage_size=0x14000,
pyocd_target="nrf52840", pyocd_target="nrf52840",
openocd_board="nordic_nrf52840_dongle.cfg", openocd_board="nordic_nrf52840_dongle.cfg",
openocd_options=[], openocd_options=[],
@@ -113,7 +113,7 @@ SUPPORTED_BOARDS = {
app_ldscript="nrf52840_layout.ld", app_ldscript="nrf52840_layout.ld",
app_address=0x40000, app_address=0x40000,
storage_address=0xC0000, storage_address=0xC0000,
storage_size=0x40000, storage_size=0x14000,
pyocd_target="nrf52840", pyocd_target="nrf52840",
openocd_board="nordic_nrf52840_dongle.cfg", openocd_board="nordic_nrf52840_dongle.cfg",
openocd_options=[], openocd_options=[],
@@ -132,7 +132,7 @@ SUPPORTED_BOARDS = {
app_ldscript="nrf52840_layout.ld", app_ldscript="nrf52840_layout.ld",
app_address=0x40000, app_address=0x40000,
storage_address=0xC0000, storage_address=0xC0000,
storage_size=0x40000, storage_size=0x14000,
pyocd_target="nrf52840", pyocd_target="nrf52840",
openocd_board="nordic_nrf52840_dongle.cfg", openocd_board="nordic_nrf52840_dongle.cfg",
openocd_options=[], openocd_options=[],
@@ -151,7 +151,7 @@ SUPPORTED_BOARDS = {
app_ldscript="nrf52840_layout.ld", app_ldscript="nrf52840_layout.ld",
app_address=0x40000, app_address=0x40000,
storage_address=0xC0000, storage_address=0xC0000,
storage_size=0x40000, storage_size=0x14000,
pyocd_target="nrf52840", pyocd_target="nrf52840",
openocd_board="nordic_nrf52840_dongle.cfg", openocd_board="nordic_nrf52840_dongle.cfg",
openocd_options=[], openocd_options=[],

View File

@@ -37,10 +37,10 @@ fn is_page_erased(storage: &dyn Storage, page: usize) -> bool {
fn main() { fn main() {
led::get(1).flex_unwrap().on().flex_unwrap(); // red on dongle led::get(1).flex_unwrap().on().flex_unwrap(); // red on dongle
const NUM_PAGES: usize = 20; // should be at least ctap::storage::NUM_PAGES let mut storage = new_storage().unwrap();
let mut storage = new_storage(NUM_PAGES); let num_pages = storage.num_pages();
writeln!(Console::new(), "Erase {} pages of storage:", NUM_PAGES).unwrap(); writeln!(Console::new(), "Erase {} pages of storage:", num_pages).unwrap();
for page in 0..NUM_PAGES { for page in 0..num_pages {
write!(Console::new(), "- Page {} ", page).unwrap(); write!(Console::new(), "- Page {} ", page).unwrap();
if is_page_erased(&storage, page) { if is_page_erased(&storage, page) {
writeln!(Console::new(), "skipped (was already erased).").unwrap(); writeln!(Console::new(), "skipped (was already erased).").unwrap();

View File

@@ -26,7 +26,7 @@ use libtock_drivers::console::Console;
use libtock_drivers::timer::{self, Duration, Timer, Timestamp}; use libtock_drivers::timer::{self, Duration, Timer, Timestamp};
use persistent_store::Store; use persistent_store::Store;
libtock_core::stack_size! {0x800} libtock_core::stack_size! {0x2000}
fn timestamp(timer: &Timer) -> Timestamp<f64> { fn timestamp(timer: &Timer) -> Timestamp<f64> {
Timestamp::<f64>::from_clock_value(timer.get_current_clock().ok().unwrap()) Timestamp::<f64>::from_clock_value(timer.get_current_clock().ok().unwrap())
@@ -40,20 +40,35 @@ fn measure<T>(timer: &Timer, operation: impl FnOnce() -> T) -> (T, Duration<f64>
} }
// Only use one store at a time. // Only use one store at a time.
unsafe fn boot_store(num_pages: usize, erase: bool) -> Store<Storage> { unsafe fn boot_store(erase: bool) -> Store<Storage> {
let mut storage = new_storage(num_pages); use persistent_store::Storage;
let mut storage = new_storage().unwrap();
let num_pages = storage.num_pages();
if erase { if erase {
for page in 0..num_pages { for page in 0..num_pages {
use persistent_store::Storage;
storage.erase_page(page).unwrap(); storage.erase_page(page).unwrap();
} }
} }
Store::new(storage).ok().unwrap() Store::new(storage).ok().unwrap()
} }
#[derive(Debug)]
struct StorageConfig {
page_size: usize,
num_pages: usize,
}
fn storage_config() -> StorageConfig {
use persistent_store::Storage;
let storage = new_storage().unwrap();
StorageConfig {
page_size: storage.page_size(),
num_pages: storage.num_pages(),
}
}
#[derive(Default)] #[derive(Default)]
struct Stat { struct Stat {
num_pages: usize,
key_increment: usize, key_increment: usize,
entry_length: usize, // words entry_length: usize, // words
boot_ms: f64, boot_ms: f64,
@@ -69,7 +84,6 @@ fn compute_latency(
word_length: usize, word_length: usize,
) -> Stat { ) -> Stat {
let mut stat = Stat { let mut stat = Stat {
num_pages,
key_increment, key_increment,
entry_length: word_length, entry_length: word_length,
..Default::default() ..Default::default()
@@ -78,12 +92,12 @@ fn compute_latency(
let mut console = Console::new(); let mut console = Console::new();
writeln!( writeln!(
console, console,
"\nLatency for num_pages={} key_increment={} word_length={}.", "\nLatency for key_increment={} word_length={}.",
num_pages, key_increment, word_length key_increment, word_length
) )
.unwrap(); .unwrap();
let mut store = unsafe { boot_store(num_pages, true) }; let mut store = unsafe { boot_store(true) };
let total_capacity = store.capacity().unwrap().total(); let total_capacity = store.capacity().unwrap().total();
assert_eq!(store.capacity().unwrap().used(), 0); assert_eq!(store.capacity().unwrap().used(), 0);
assert_eq!(store.lifetime().unwrap().used(), 0); assert_eq!(store.lifetime().unwrap().used(), 0);
@@ -121,7 +135,7 @@ fn compute_latency(
); );
// Measure latency of boot. // Measure latency of boot.
let (mut store, time) = measure(&timer, || unsafe { boot_store(num_pages, false) }); let (mut store, time) = measure(&timer, || unsafe { boot_store(false) });
writeln!(console, "Boot: {:.1}ms.", time.ms()).unwrap(); writeln!(console, "Boot: {:.1}ms.", time.ms()).unwrap();
stat.boot_ms = time.ms(); stat.boot_ms = time.ms();
@@ -150,19 +164,17 @@ fn compute_latency(
fn main() { fn main() {
let mut with_callback = timer::with_callback(|_, _| {}); let mut with_callback = timer::with_callback(|_, _| {});
let timer = with_callback.init().ok().unwrap(); let timer = with_callback.init().ok().unwrap();
let config = storage_config();
let mut stats = Vec::new(); let mut stats = Vec::new();
writeln!(Console::new(), "\nRunning 4 tests...").unwrap(); writeln!(Console::new(), "\nRunning 2 tests...").unwrap();
// Those non-overwritten 50 words entries simulate credentials. // Simulate a store full of credentials (of 50 words).
stats.push(compute_latency(&timer, 3, 1, 50)); stats.push(compute_latency(&timer, config.num_pages, 1, 50));
stats.push(compute_latency(&timer, 20, 1, 50)); // Simulate a store full of increments of a single counter.
// Those overwritten 1 word entries simulate counters. stats.push(compute_latency(&timer, config.num_pages, 0, 1));
stats.push(compute_latency(&timer, 3, 0, 1));
stats.push(compute_latency(&timer, 20, 0, 1));
writeln!(Console::new(), "\nDone.\n").unwrap(); writeln!(Console::new(), "\nDone.\n").unwrap();
const HEADERS: &[&str] = &[ const HEADERS: &[&str] = &[
"Pages",
"Overwrite", "Overwrite",
"Length", "Length",
"Boot", "Boot",
@@ -173,7 +185,6 @@ fn main() {
let mut matrix = vec![HEADERS.iter().map(|x| x.to_string()).collect()]; let mut matrix = vec![HEADERS.iter().map(|x| x.to_string()).collect()];
for stat in stats { for stat in stats {
matrix.push(vec![ matrix.push(vec![
format!("{}", stat.num_pages),
if stat.key_increment == 0 { "yes" } else { "no" }.to_string(), if stat.key_increment == 0 { "yes" } else { "no" }.to_string(),
format!("{} words", stat.entry_length), format!("{} words", stat.entry_length),
format!("{:.1} ms", stat.boot_ms), format!("{:.1} ms", stat.boot_ms),
@@ -182,14 +193,15 @@ fn main() {
format!("{:.1} ms", stat.remove_ms), format!("{:.1} ms", stat.remove_ms),
]); ]);
} }
writeln!(Console::new(), "Copy to examples/store_latency.rs:\n").unwrap();
writeln!(Console::new(), "{:?}", config).unwrap();
write_matrix(matrix); write_matrix(matrix);
// Results on nrf52840dk_opensk: // Results for nrf52840dk_opensk:
// Pages Overwrite Length Boot Compaction Insert Remove // StorageConfig { page_size: 4096, num_pages: 20 }
// 3 no 50 words 5.3 ms 141.9 ms 8.0 ms 3.3 ms // Overwrite Length Boot Compaction Insert Remove
// 20 no 50 words 18.7 ms 148.6 ms 21.0 ms 9.8 ms // no 50 words 16.2 ms 143.8 ms 18.3 ms 8.4 ms
// 3 yes 1 words 37.8 ms 100.2 ms 11.3 ms 5.5 ms // yes 1 words 303.8 ms 97.9 ms 9.7 ms 4.7 ms
// 20 yes 1 words 336.5 ms 100.3 ms 11.5 ms 5.6 ms
} }
fn align(x: &str, n: usize) { fn align(x: &str, n: usize) {

View File

@@ -349,7 +349,7 @@ index 348c746a5..5465c95f4 100644
} }
} }
diff --git a/kernel/src/process.rs b/kernel/src/process.rs diff --git a/kernel/src/process.rs b/kernel/src/process.rs
index c52754be3..ae6a58341 100644 index c52754be3..26a7c47d3 100644
--- a/kernel/src/process.rs --- a/kernel/src/process.rs
+++ b/kernel/src/process.rs +++ b/kernel/src/process.rs
@@ -359,6 +359,15 @@ pub trait ProcessType { @@ -359,6 +359,15 @@ pub trait ProcessType {
@@ -415,7 +415,7 @@ index c52754be3..ae6a58341 100644
+ .mpu() + .mpu()
+ .allocate_region( + .allocate_region(
+ storage_location.address as *const u8, + storage_location.address as *const u8,
+ storage_location.size, + storage_location.unallocated_size,
+ storage_location.size, + storage_location.size,
+ mpu::Permissions::ReadOnly, + mpu::Permissions::ReadOnly,
+ &mut mpu_config, + &mut mpu_config,
@@ -439,10 +439,10 @@ index c52754be3..ae6a58341 100644
// memory space just for kernel and grant state. We need to make // memory space just for kernel and grant state. We need to make
// sure we allocate enough memory just for that. // sure we allocate enough memory just for that.
diff --git a/kernel/src/sched.rs b/kernel/src/sched.rs diff --git a/kernel/src/sched.rs b/kernel/src/sched.rs
index 10626a2e1..8844bc6c3 100644 index 10626a2e1..61401b04a 100644
--- a/kernel/src/sched.rs --- a/kernel/src/sched.rs
+++ b/kernel/src/sched.rs +++ b/kernel/src/sched.rs
@@ -118,6 +118,12 @@ pub enum SchedulingDecision { @@ -118,6 +118,13 @@ pub enum SchedulingDecision {
TrySleep, TrySleep,
} }
@@ -450,12 +450,13 @@ index 10626a2e1..8844bc6c3 100644
+pub struct StorageLocation { +pub struct StorageLocation {
+ pub address: usize, + pub address: usize,
+ pub size: usize, + pub size: usize,
+ pub unallocated_size: usize,
+} +}
+ +
/// Main object for the kernel. Each board will need to create one. /// Main object for the kernel. Each board will need to create one.
pub struct Kernel { pub struct Kernel {
/// How many "to-do" items exist at any given time. These include /// How many "to-do" items exist at any given time. These include
@@ -127,6 +133,9 @@ pub struct Kernel { @@ -127,6 +134,9 @@ pub struct Kernel {
/// This holds a pointer to the static array of Process pointers. /// This holds a pointer to the static array of Process pointers.
processes: &'static [Option<&'static dyn process::ProcessType>], processes: &'static [Option<&'static dyn process::ProcessType>],
@@ -465,7 +466,7 @@ index 10626a2e1..8844bc6c3 100644
/// A counter which keeps track of how many process identifiers have been /// A counter which keeps track of how many process identifiers have been
/// created. This is used to create new unique identifiers for processes. /// created. This is used to create new unique identifiers for processes.
process_identifier_max: Cell<usize>, process_identifier_max: Cell<usize>,
@@ -170,9 +179,17 @@ pub enum StoppedExecutingReason { @@ -170,9 +180,17 @@ pub enum StoppedExecutingReason {
impl Kernel { impl Kernel {
pub fn new(processes: &'static [Option<&'static dyn process::ProcessType>]) -> Kernel { pub fn new(processes: &'static [Option<&'static dyn process::ProcessType>]) -> Kernel {
@@ -483,7 +484,7 @@ index 10626a2e1..8844bc6c3 100644
process_identifier_max: Cell::new(0), process_identifier_max: Cell::new(0),
grant_counter: Cell::new(0), grant_counter: Cell::new(0),
grants_finalized: Cell::new(false), grants_finalized: Cell::new(false),
@@ -900,4 +917,8 @@ impl Kernel { @@ -900,4 +918,8 @@ impl Kernel {
(return_reason, time_executed_us) (return_reason, time_executed_us)
} }

View File

@@ -31,10 +31,10 @@ index 5465c95f4..e596648f7 100644
} }
} }
diff --git a/kernel/src/sched.rs b/kernel/src/sched.rs diff --git a/kernel/src/sched.rs b/kernel/src/sched.rs
index 031159500..0cbfea929 100644 index 61401b04a..e9a58c018 100644
--- a/kernel/src/sched.rs --- a/kernel/src/sched.rs
+++ b/kernel/src/sched.rs +++ b/kernel/src/sched.rs
@@ -118,10 +118,19 @@ pub enum SchedulingDecision { @@ -118,11 +118,20 @@ pub enum SchedulingDecision {
TrySleep, TrySleep,
} }
@@ -50,6 +50,7 @@ index 031159500..0cbfea929 100644
pub struct StorageLocation { pub struct StorageLocation {
pub address: usize, pub address: usize,
pub size: usize, pub size: usize,
pub unallocated_size: usize,
+ pub storage_type: StorageType, + pub storage_type: StorageType,
} }

View File

@@ -219,23 +219,10 @@ pub const MAX_RP_IDS_LENGTH: usize = 8;
/// ///
/// - The storage key CREDENTIALS must fit at least this number of credentials. /// - The storage key CREDENTIALS must fit at least this number of credentials.
/// ///
/// This value has implications on the flash lifetime, please see the
/// documentation for NUM_PAGES below.
pub const MAX_SUPPORTED_RESIDENT_KEYS: usize = 150;
/// Sets the number of pages used for persistent storage.
///
/// The number of pages should be at least 3 and at most what the flash can
/// hold. There should be no reason to put a small number here, except that the
/// latency of flash operations is linear in the number of pages. This may
/// improve in the future. Currently, using 20 pages gives between 20ms and
/// 240ms per operation. The rule of thumb is between 1ms and 12ms per
/// additional page.
///
/// Limiting the number of resident keys permits to ensure a minimum number of /// Limiting the number of resident keys permits to ensure a minimum number of
/// counter increments. /// counter increments.
/// Let: /// Let:
/// - P the number of pages (NUM_PAGES) /// - P the number of pages (NUM_PAGES in the board definition)
/// - K the maximum number of resident keys (MAX_SUPPORTED_RESIDENT_KEYS) /// - K the maximum number of resident keys (MAX_SUPPORTED_RESIDENT_KEYS)
/// - S the maximum size of a resident key (about 500) /// - S the maximum size of a resident key (about 500)
/// - C the number of erase cycles (10000) /// - C the number of erase cycles (10000)
@@ -245,7 +232,7 @@ pub const MAX_SUPPORTED_RESIDENT_KEYS: usize = 150;
/// ///
/// With P=20 and K=150, we have I=2M which is enough for 500 increments per day /// With P=20 and K=150, we have I=2M which is enough for 500 increments per day
/// for 10 years. /// for 10 years.
pub const NUM_PAGES: usize = 20; pub const MAX_SUPPORTED_RESIDENT_KEYS: usize = 150;
#[cfg(test)] #[cfg(test)]
mod test { mod test {

View File

@@ -18,7 +18,6 @@ use crate::ctap::client_pin::PIN_AUTH_LENGTH;
use crate::ctap::customization::{ use crate::ctap::customization::{
DEFAULT_MIN_PIN_LENGTH, DEFAULT_MIN_PIN_LENGTH_RP_IDS, ENFORCE_ALWAYS_UV, DEFAULT_MIN_PIN_LENGTH, DEFAULT_MIN_PIN_LENGTH_RP_IDS, ENFORCE_ALWAYS_UV,
MAX_LARGE_BLOB_ARRAY_SIZE, MAX_PIN_RETRIES, MAX_RP_IDS_LENGTH, MAX_SUPPORTED_RESIDENT_KEYS, MAX_LARGE_BLOB_ARRAY_SIZE, MAX_PIN_RETRIES, MAX_RP_IDS_LENGTH, MAX_SUPPORTED_RESIDENT_KEYS,
NUM_PAGES,
}; };
use crate::ctap::data_formats::{ use crate::ctap::data_formats::{
extract_array, extract_text_string, CredentialProtectionPolicy, PublicKeyCredentialSource, extract_array, extract_text_string, CredentialProtectionPolicy, PublicKeyCredentialSource,
@@ -68,7 +67,7 @@ impl PersistentStore {
/// ///
/// This should be at most one instance of persistent store per program lifetime. /// This should be at most one instance of persistent store per program lifetime.
pub fn new(rng: &mut impl Rng256) -> PersistentStore { pub fn new(rng: &mut impl Rng256) -> PersistentStore {
let storage = new_storage(NUM_PAGES); let storage = new_storage().ok().unwrap();
let mut store = PersistentStore { let mut store = PersistentStore {
store: persistent_store::Store::new(storage).ok().unwrap(), store: persistent_store::Store::new(storage).ok().unwrap(),
}; };

View File

@@ -28,8 +28,8 @@ mod prod {
pub type Storage = SyscallStorage; pub type Storage = SyscallStorage;
pub fn new_storage(num_pages: usize) -> Storage { pub fn new_storage() -> persistent_store::StorageResult<Storage> {
Storage::new(num_pages).unwrap() Storage::new()
} }
pub type UpgradeLocations = SyscallUpgradeStorage; pub type UpgradeLocations = SyscallUpgradeStorage;
@@ -44,9 +44,11 @@ mod test {
pub type Storage = persistent_store::BufferStorage; pub type Storage = persistent_store::BufferStorage;
pub fn new_storage(num_pages: usize) -> Storage { pub fn new_storage() -> persistent_store::StorageResult<Storage> {
// Use the Nordic configuration.
const PAGE_SIZE: usize = 0x1000; const PAGE_SIZE: usize = 0x1000;
let store = vec![0xff; num_pages * PAGE_SIZE].into_boxed_slice(); const NUM_PAGES: usize = 20;
let store = vec![0xff; NUM_PAGES * PAGE_SIZE].into_boxed_slice();
let options = persistent_store::BufferOptions { let options = persistent_store::BufferOptions {
word_size: 4, word_size: 4,
page_size: PAGE_SIZE, page_size: PAGE_SIZE,
@@ -54,7 +56,7 @@ mod test {
max_page_erases: 10000, max_page_erases: 10000,
strict_mode: true, strict_mode: true,
}; };
Storage::new(store, options) Ok(Storage::new(store, options))
} }
pub type UpgradeLocations = BufferUpgradeStorage; pub type UpgradeLocations = BufferUpgradeStorage;

View File

@@ -134,13 +134,11 @@ impl SyscallStorage {
/// - The page size is a power of two. /// - The page size is a power of two.
/// - The page size is a multiple of the word size. /// - The page size is a multiple of the word size.
/// - The storage is page-aligned. /// - The storage is page-aligned.
/// pub fn new() -> StorageResult<SyscallStorage> {
/// Returns `OutOfBounds` the number of pages does not fit in the storage.
pub fn new(mut num_pages: usize) -> StorageResult<SyscallStorage> {
let mut syscall = SyscallStorage { let mut syscall = SyscallStorage {
word_size: get_info(command_nr::get_info_nr::WORD_SIZE, 0)?, word_size: get_info(command_nr::get_info_nr::WORD_SIZE, 0)?,
page_size: get_info(command_nr::get_info_nr::PAGE_SIZE, 0)?, page_size: get_info(command_nr::get_info_nr::PAGE_SIZE, 0)?,
num_pages, num_pages: 0,
max_word_writes: get_info(command_nr::get_info_nr::MAX_WORD_WRITES, 0)?, max_word_writes: get_info(command_nr::get_info_nr::MAX_WORD_WRITES, 0)?,
max_page_erases: get_info(command_nr::get_info_nr::MAX_PAGE_ERASES, 0)?, max_page_erases: get_info(command_nr::get_info_nr::MAX_PAGE_ERASES, 0)?,
storage_locations: Vec::new(), storage_locations: Vec::new(),
@@ -156,20 +154,15 @@ impl SyscallStorage {
continue; continue;
} }
let storage_ptr = memop(memop_nr::STORAGE_PTR, i)?; let storage_ptr = memop(memop_nr::STORAGE_PTR, i)?;
let max_storage_len = memop(memop_nr::STORAGE_LEN, i)?; let storage_len = memop(memop_nr::STORAGE_LEN, i)?;
if !syscall.is_page_aligned(storage_ptr) || !syscall.is_page_aligned(max_storage_len) { if !syscall.is_page_aligned(storage_ptr) || !syscall.is_page_aligned(storage_len) {
return Err(StorageError::CustomError); return Err(StorageError::CustomError);
} }
let storage_len = core::cmp::min(num_pages * syscall.page_size, max_storage_len); syscall.num_pages += storage_len / syscall.page_size;
num_pages -= storage_len / syscall.page_size;
syscall syscall
.storage_locations .storage_locations
.push(unsafe { core::slice::from_raw_parts(storage_ptr as *mut u8, storage_len) }); .push(unsafe { core::slice::from_raw_parts(storage_ptr as *mut u8, storage_len) });
} }
if num_pages > 0 {
// The storage locations don't have enough pages.
return Err(StorageError::OutOfBounds);
}
Ok(syscall) Ok(syscall)
} }