Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions fuzz/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,13 @@ test = false
doc = false
bench = false

[[bin]]
name = "prune_value"
path = "fuzz_targets/prune_value.rs"
test = false
doc = false
bench = false

[[bin]]
name = "regression_286"
path = "fuzz_targets/regression_286.rs"
Expand Down
22 changes: 11 additions & 11 deletions fuzz/fuzz_lib/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ impl<'f> Extractor<'f> {
}

/// Attempt to yield a type from the fuzzer.
pub fn extract_final_type(&mut self) -> Option<Arc<FinalTy>> {
// We can costruct extremely large types by duplicating Arcs; there
pub fn extract_final_type(&mut self, allow_blowup: bool) -> Option<Arc<FinalTy>> {
// We can construct extremely large types by duplicating Arcs; there
// is no need to have an exponential blowup in the number of tasks.
const MAX_N_TASKS: usize = 300;

Expand All @@ -83,7 +83,7 @@ impl<'f> Extractor<'f> {
result_stack.push(FinalTy::unit());
} else {
let is_sum = self.extract_bit()?;
let dupe = task_stack.len() >= MAX_N_TASKS || self.extract_bit()?;
let dupe = allow_blowup && (task_stack.len() >= MAX_N_TASKS || self.extract_bit()?);
task_stack.push(StackElem::Binary { is_sum, dupe });
if !dupe {
task_stack.push(StackElem::NeedType)
Expand Down Expand Up @@ -113,7 +113,7 @@ impl<'f> Extractor<'f> {
/// Attempt to yield a value from the fuzzer by constructing a type and then
/// reading a bitstring of that type, in the padded value encoding.
pub fn extract_value_padded(&mut self) -> Option<Value> {
let ty = self.extract_final_type()?;
let ty = self.extract_final_type(false)?;
if ty.bit_width() > 64 * 1024 * 1024 {
// little fuzzing value in producing massive values
return None;
Expand All @@ -128,7 +128,7 @@ impl<'f> Extractor<'f> {
/// Attempt to yield a value from the fuzzer by constructing a type and then
/// reading a bitstring of that type, in the compact value encoding.
pub fn extract_value_compact(&mut self) -> Option<Value> {
let ty = self.extract_final_type()?;
let ty = self.extract_final_type(true)?;
if ty.bit_width() > 64 * 1024 * 1024 {
// little fuzzing value in producing massive values
return None;
Expand Down Expand Up @@ -184,15 +184,15 @@ impl<'f> Extractor<'f> {
}
StackElem::Left => {
let child = result_stack.pop().unwrap();
let ty = self.extract_final_type()?;
let ty = self.extract_final_type(true)?;
if ty.bit_width() > MAX_TY_WIDTH {
return None;
}
result_stack.push(Value::left(child, ty));
}
StackElem::Right => {
let child = result_stack.pop().unwrap();
let ty = self.extract_final_type()?;
let ty = self.extract_final_type(true)?;
if ty.bit_width() > MAX_TY_WIDTH {
return None;
}
Expand All @@ -205,7 +205,7 @@ impl<'f> Extractor<'f> {
}

/// Attempt to yield a type from the fuzzer.
pub fn extract_old_final_type(&mut self) -> Option<Arc<OldFinalTy>> {
pub fn extract_old_final_type(&mut self, allow_blowup: bool) -> Option<Arc<OldFinalTy>> {
// We can costruct extremely large types by duplicating Arcs; there
// is no need to have an exponential blowup in the number of tasks.
const MAX_N_TASKS: usize = 300;
Expand All @@ -225,7 +225,7 @@ impl<'f> Extractor<'f> {
result_stack.push(OldFinalTy::unit());
} else {
let is_sum = self.extract_bit()?;
let dupe = task_stack.len() >= MAX_N_TASKS || self.extract_bit()?;
let dupe = allow_blowup && (task_stack.len() >= MAX_N_TASKS || self.extract_bit()?);
task_stack.push(StackElem::Binary { is_sum, dupe });
if !dupe {
task_stack.push(StackElem::NeedType)
Expand Down Expand Up @@ -297,15 +297,15 @@ impl<'f> Extractor<'f> {
}
StackElem::Left => {
let child = result_stack.pop().unwrap();
let ty = self.extract_old_final_type()?;
let ty = self.extract_old_final_type(true)?;
if ty.bit_width() > MAX_TY_WIDTH {
return None;
}
result_stack.push(OldValue::left(child, ty));
}
StackElem::Right => {
let child = result_stack.pop().unwrap();
let ty = self.extract_old_final_type()?;
let ty = self.extract_old_final_type(true)?;
if ty.bit_width() > MAX_TY_WIDTH {
return None;
}
Expand Down
2 changes: 1 addition & 1 deletion fuzz/fuzz_targets/construct_type.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#[cfg(any(fuzzing, test))]
fn do_test(data: &[u8]) {
let mut extractor = simplicity_fuzz::Extractor::new(data);
let _ = extractor.extract_final_type();
let _ = extractor.extract_final_type(true);
}

#[cfg(fuzzing)]
Expand Down
105 changes: 105 additions & 0 deletions fuzz/fuzz_targets/prune_value.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
// SPDX-License-Identifier: CC0-1.0

#![cfg_attr(fuzzing, no_main)]

#[cfg(any(fuzzing, test))]
fn do_test(data: &[u8]) -> Option<()> {
use simplicity::dag::{DagLike, NoSharing};
use simplicity::types::{CompleteBound, Final};

let mut extractor = simplicity_fuzz::Extractor::new(data);

let val = extractor.extract_value_padded()?;
let ty = val.ty();

// Construct a smaller type
let mut stack = vec![];
for node in ty.post_order_iter::<NoSharing>() {
match node.node.bound() {
CompleteBound::Unit => stack.push(Final::unit()),
CompleteBound::Sum(..) => {
let right = stack.pop().unwrap();
let left = stack.pop().unwrap();
stack.push(Final::sum(left, right));
}
CompleteBound::Product(..) => {
let mut right = stack.pop().unwrap();
let mut left = stack.pop().unwrap();
if extractor.extract_bit()? {
left = Final::unit();
}
if extractor.extract_bit()? {
right = Final::unit();
}
stack.push(Final::product(left, right));
}
}
}
let pruned_ty = stack.pop().unwrap();
assert!(stack.is_empty());


// Prune the value
let pruned_val = match val.prune(&pruned_ty) {
Some(val) => val,
None => panic!("Failed to prune value {val} from {ty} to {pruned_ty}"),
};

/*
// If you have a regression you likely want to uncomment these printlns.
println!("Original Value Bits: {:?}", val.iter_padded().collect::<Vec<bool>>());
println!("Original Value: {val}");
println!(" Original Type: {ty}");
println!(" Pruned Value: {pruned_val}");
println!(" Pruned Type: {pruned_ty}");
*/

// Check that pruning made sense by going through the compact bit iterator
// and checking that the pruned value is obtained from the original by
// just deleting bits.
let mut orig_iter = val.iter_compact();
let mut prune_iter = pruned_val.iter_compact();

loop {
match (orig_iter.next(), prune_iter.next()) {
(Some(true), Some(true)) => {},
(Some(false), Some(false)) => {},
(Some(_), Some(prune_bit)) => {
// We get here if the pruned and the original iterator disagree.
// This should happen iff we deleted some bits from the pruned
// value, meaning that we just need to ratchet forward the
// original iterator until we're back on track.
loop {
match orig_iter.next() {
Some(orig_bit) => {
if orig_bit == prune_bit { break }
},
None => panic!("original iterator ran out before pruned iterator did"),
}
}
}
(None, Some(_)) => panic!("original iterator ran out before pruned iterator did"),
(_, None) => break, // done once the pruned iterator runs out
}
}
Some(())
}

#[cfg(fuzzing)]
libfuzzer_sys::fuzz_target!(|data| { let _ = do_test(data); });

#[cfg(not(fuzzing))]
fn main() {}

#[cfg(test)]
mod tests {
use base64::Engine;

#[test]
fn duplicate_crash() {
let data = base64::prelude::BASE64_STANDARD
.decode("Cg==")
.expect("base64 should be valid");
let _ = super::do_test(&data);
}
}
36 changes: 36 additions & 0 deletions src/value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1262,6 +1262,42 @@ mod tests {
let new_v = Value::from_padded_bits(&mut iter, &v.ty).unwrap();
assert_eq!(v, new_v);
}

#[test]
fn prune_regression_337_1() {
// Two values that differ only in padding bits are nonetheless equal
let ty_2x1_opt = Final::sum(Final::unit(), Final::product(Final::two_two_n(0), Final::unit()));

// L(ε) as all zeros
let mut iter = BitIter::new(Some(0b0000_0000u8).into_iter());
let value_1 = Value::from_padded_bits(&mut iter, &ty_2x1_opt).unwrap();
// L(ε) with a one in its padding bit
let mut iter = BitIter::new(Some(0b0100_0000u8).into_iter());
let value_2 = Value::from_padded_bits(&mut iter, &ty_2x1_opt).unwrap();

assert_eq!(value_1, value_2);
}

#[test]
fn prune_regression_337_2() {
let ty_2x1_opt = Final::sum(Final::unit(), Final::product(Final::two_two_n(0), Final::unit()));
let ty_1x1_opt = Final::sum(Final::unit(), Final::product(Final::unit(), Final::unit()));

// Bits [false, true] - first bit is false (left sum), second bit is true (unused padding)
let mut iter = BitIter::new(Some(0b0100_0000u8).into_iter());

// Parse as (2 × 1)? then prune to (1 × 1)?
let value = Value::from_padded_bits(&mut iter, &ty_2x1_opt).unwrap();
let pruned = value.prune(&ty_1x1_opt).unwrap();

// Expected: L(ε) - still in the left (unit) branch
let expected = Value::left(Value::unit(), Final::product(Final::unit(), Final::unit()));

// BUG: This fails because pruning incorrectly returns R((ε,ε)). We first compare string
// serializations since a direct comparison might only test `prune_regression_337_1`.
assert_eq!(pruned.to_string(), expected.to_string());
assert_eq!(pruned, expected);
}
}

#[cfg(bench)]
Expand Down
Loading