Merge branch 'master' into v2_lib
This commit is contained in:
116
libraries/persistent_store/fuzz/examples/store.rs
Normal file
116
libraries/persistent_store/fuzz/examples/store.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2019-2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use fuzz_store::{fuzz, StatKey, Stats};
|
||||
use std::io::Write;
|
||||
use std::io::{stdout, Read};
|
||||
use std::path::Path;
|
||||
|
||||
fn usage(program: &str) {
|
||||
println!(
|
||||
r#"Usage: {} {{ [<artifact_file>] | <corpus_directory> <bucket_predicate>.. }}
|
||||
|
||||
If <artifact_file> is not provided, it is read from standard input.
|
||||
|
||||
When <bucket_predicate>.. are provided, only runs matching all predicates are shown. The format of
|
||||
each <bucket_predicate> is <bucket_key>=<bucket_value>."#,
|
||||
program
|
||||
);
|
||||
}
|
||||
|
||||
fn debug(data: &[u8]) {
|
||||
println!("{:02x?}", data);
|
||||
fuzz(data, true, None);
|
||||
}
|
||||
|
||||
/// Bucket predicate.
|
||||
struct Predicate {
|
||||
/// Bucket key.
|
||||
key: StatKey,
|
||||
|
||||
/// Bucket value.
|
||||
value: usize,
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Predicate {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(input: &str) -> Result<Self, Self::Err> {
|
||||
let predicate: Vec<&str> = input.split('=').collect();
|
||||
if predicate.len() != 2 {
|
||||
return Err("Predicate should have exactly one equal sign.".to_string());
|
||||
}
|
||||
let key = predicate[0]
|
||||
.parse()
|
||||
.map_err(|_| format!("Predicate key `{}` is not recognized.", predicate[0]))?;
|
||||
let value: usize = predicate[1]
|
||||
.parse()
|
||||
.map_err(|_| format!("Predicate value `{}` is not a number.", predicate[1]))?;
|
||||
if value != 0 && !value.is_power_of_two() {
|
||||
return Err(format!(
|
||||
"Predicate value `{}` is not a bucket.",
|
||||
predicate[1]
|
||||
));
|
||||
}
|
||||
Ok(Predicate { key, value })
|
||||
}
|
||||
}
|
||||
|
||||
fn analyze(corpus: &Path, predicates: Vec<Predicate>) {
|
||||
let mut stats = Stats::default();
|
||||
let mut count = 0;
|
||||
let total = std::fs::read_dir(corpus).unwrap().count();
|
||||
for entry in std::fs::read_dir(corpus).unwrap() {
|
||||
let data = std::fs::read(entry.unwrap().path()).unwrap();
|
||||
let mut stat = Stats::default();
|
||||
fuzz(&data, false, Some(&mut stat));
|
||||
if predicates
|
||||
.iter()
|
||||
.all(|p| stat.get_count(p.key, p.value).is_some())
|
||||
{
|
||||
stats.merge(&stat);
|
||||
}
|
||||
count += 1;
|
||||
print!("\u{1b}[K{} / {}\r", count, total);
|
||||
stdout().flush().unwrap();
|
||||
}
|
||||
// NOTE: To avoid reloading the corpus each time we want to check a different filter, we can
|
||||
// start an interactive loop here taking filters as input and printing the filtered stats. We
|
||||
// would keep all individual stats for each run in a vector.
|
||||
print!("{}", stats);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
// No arguments reads from stdin.
|
||||
if args.len() <= 1 {
|
||||
let stdin = std::io::stdin();
|
||||
let mut data = Vec::new();
|
||||
stdin.lock().read_to_end(&mut data).unwrap();
|
||||
return debug(&data);
|
||||
}
|
||||
let path = Path::new(&args[1]);
|
||||
// File argument assumes artifact.
|
||||
if path.is_file() && args.len() == 2 {
|
||||
return debug(&std::fs::read(path).unwrap());
|
||||
}
|
||||
// Directory argument assumes corpus.
|
||||
if path.is_dir() {
|
||||
match args[2..].iter().map(|x| x.parse()).collect() {
|
||||
Ok(predicates) => return analyze(path, predicates),
|
||||
Err(error) => eprintln!("Error: {}", error),
|
||||
}
|
||||
}
|
||||
usage(&args[0]);
|
||||
}
|
||||
@@ -133,7 +133,7 @@ impl<'a> Fuzzer<'a> {
|
||||
page_size: 1 << self.entropy.read_range(5, 12),
|
||||
max_word_writes: 2,
|
||||
max_page_erases: self.entropy.read_range(0, 50000),
|
||||
strict_write: true,
|
||||
strict_mode: true,
|
||||
};
|
||||
let num_pages = self.entropy.read_range(3, 64);
|
||||
self.record(StatKey::PageSize, options.page_size);
|
||||
@@ -156,7 +156,7 @@ impl<'a> Fuzzer<'a> {
|
||||
if self.debug {
|
||||
println!("Start with dirty storage.");
|
||||
}
|
||||
options.strict_write = false;
|
||||
options.strict_mode = false;
|
||||
let storage = BufferStorage::new(storage, options);
|
||||
StoreDriver::Off(StoreDriverOff::new_dirty(storage))
|
||||
} else if self.entropy.read_bit() {
|
||||
|
||||
@@ -23,9 +23,9 @@ use alloc::vec;
|
||||
/// for tests and fuzzing, for which it has dedicated functionalities.
|
||||
///
|
||||
/// This storage tracks how many times words are written between page erase cycles, how many times
|
||||
/// pages are erased, and whether an operation flips bits in the wrong direction (optional).
|
||||
/// Operations panic if those conditions are broken. This storage also permits to interrupt
|
||||
/// operations for inspection or to corrupt the operation.
|
||||
/// pages are erased, and whether an operation flips bits in the wrong direction. Operations panic
|
||||
/// if those conditions are broken (optional). This storage also permits to interrupt operations for
|
||||
/// inspection or to corrupt the operation.
|
||||
#[derive(Clone)]
|
||||
pub struct BufferStorage {
|
||||
/// Content of the storage.
|
||||
@@ -59,8 +59,13 @@ pub struct BufferOptions {
|
||||
/// How many times a page can be erased.
|
||||
pub max_page_erases: usize,
|
||||
|
||||
/// Whether bits cannot be written from 0 to 1.
|
||||
pub strict_write: bool,
|
||||
/// Whether the storage should check the flash invariant.
|
||||
///
|
||||
/// When set, the following conditions would panic:
|
||||
/// - A bit is written from 0 to 1.
|
||||
/// - A word is written more than `max_word_writes`.
|
||||
/// - A page is erased more than `max_page_erases`.
|
||||
pub strict_mode: bool,
|
||||
}
|
||||
|
||||
/// Corrupts a slice given actual and expected value.
|
||||
@@ -214,7 +219,10 @@ impl BufferStorage {
|
||||
///
|
||||
/// Panics if the maximum number of erase cycles per page is reached.
|
||||
fn incr_page_erases(&mut self, page: usize) {
|
||||
assert!(self.page_erases[page] < self.max_page_erases());
|
||||
// Check that pages are not erased too many times.
|
||||
if self.options.strict_mode {
|
||||
assert!(self.page_erases[page] < self.max_page_erases());
|
||||
}
|
||||
self.page_erases[page] += 1;
|
||||
let num_words = self.page_size() / self.word_size();
|
||||
for word in 0..num_words {
|
||||
@@ -252,7 +260,10 @@ impl BufferStorage {
|
||||
continue;
|
||||
}
|
||||
let word = index / word_size + i;
|
||||
assert!(self.word_writes[word] < self.max_word_writes());
|
||||
// Check that words are not written too many times.
|
||||
if self.options.strict_mode {
|
||||
assert!(self.word_writes[word] < self.max_word_writes());
|
||||
}
|
||||
self.word_writes[word] += 1;
|
||||
}
|
||||
}
|
||||
@@ -306,8 +317,8 @@ impl Storage for BufferStorage {
|
||||
self.interruption.tick(&operation)?;
|
||||
// Check and update counters.
|
||||
self.incr_word_writes(range.start, value, value);
|
||||
// Check strict write.
|
||||
if self.options.strict_write {
|
||||
// Check that bits are correctly flipped.
|
||||
if self.options.strict_mode {
|
||||
for (byte, &val) in range.clone().zip(value.iter()) {
|
||||
assert_eq!(self.storage[byte] & val, val);
|
||||
}
|
||||
@@ -472,7 +483,7 @@ mod tests {
|
||||
page_size: 16,
|
||||
max_word_writes: 2,
|
||||
max_page_erases: 3,
|
||||
strict_write: true,
|
||||
strict_mode: true,
|
||||
};
|
||||
// Those words are decreasing bit patterns. Bits are only changed from 1 to 0 and at least one
|
||||
// bit is changed.
|
||||
|
||||
@@ -1257,7 +1257,7 @@ mod tests {
|
||||
page_size: self.page_size,
|
||||
max_word_writes: self.max_word_writes,
|
||||
max_page_erases: self.max_page_erases,
|
||||
strict_write: true,
|
||||
strict_mode: true,
|
||||
};
|
||||
StoreDriverOff::new(options, self.num_pages)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user