From 34c203be8869e8fc1613c0c293aba3a1f1512732 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sun, 18 Feb 2024 09:25:53 +0000 Subject: [PATCH 1/4] instruction exerciser --- .github/workflows/main.yml | 1 + tests/exercise_instructions.rs | 501 +++++++++++++++++++++++++++++++++ 2 files changed, 502 insertions(+) create mode 100644 tests/exercise_instructions.rs diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f098da1a..17caf6f7 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -38,6 +38,7 @@ jobs: export RUSTFLAGS="-D warnings" cargo build --verbose cargo test --verbose + cargo test --test exercise_instructions --verbose shell: bash - name: CLI - Lint run: | diff --git a/tests/exercise_instructions.rs b/tests/exercise_instructions.rs new file mode 100644 index 00000000..ef611261 --- /dev/null +++ b/tests/exercise_instructions.rs @@ -0,0 +1,501 @@ +#![allow(clippy::arithmetic_side_effects)] +#![cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] +// Copyright 2020 Solana Maintainers +// +// Licensed under the Apache License, Version 2.0 or +// the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +extern crate byteorder; +extern crate libc; +extern crate solana_rbpf; +extern crate test_utils; +extern crate thiserror; + +use rand::{rngs::SmallRng, RngCore, SeedableRng}; +use solana_rbpf::{ + assembler::assemble, + ebpf, + memory_region::MemoryRegion, + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + static_analysis::Analysis, + verifier::RequisiteVerifier, + vm::{Config, ContextObject, TestContextObject}, +}; +use std::sync::Arc; +use test_utils::create_vm; + +macro_rules! test_interpreter_and_jit { + (register, $function_registry:expr, $location:expr => $syscall_function:expr) => { + $function_registry + .register_function_hashed($location.as_bytes(), $syscall_function) + .unwrap(); + }; + ($executable:expr, $mem:tt, $context_object:expr $(,)?) => { + let expected_instruction_count = $context_object.get_remaining(); + #[allow(unused_mut)] + let mut context_object = $context_object; + $executable.verify::().unwrap(); + let ( + instruction_count_interpreter, + interpreter_final_pc, + _tracer_interpreter, + interpreter_result, + interpreter_mem, + ) = { + let mut mem = $mem.clone(); + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + let mut context_object = context_object.clone(); + create_vm!( + vm, + &$executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let (instruction_count_interpreter, result) = vm.execute_program(&$executable, true); + ( + instruction_count_interpreter, + vm.registers[11], + vm.context_object_pointer.clone(), + result.unwrap(), + mem, + ) + }; + #[cfg(all(not(windows), target_arch = "x86_64"))] + { + #[allow(unused_mut)] + $executable.jit_compile().unwrap(); + let mut mem = $mem; + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + create_vm!( + vm, + &$executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let (instruction_count_jit, result) = vm.execute_program(&$executable, false); + let tracer_jit = &vm.context_object_pointer; + if !TestContextObject::compare_trace_log(&_tracer_interpreter, tracer_jit) { + let analysis = Analysis::from_executable(&$executable).unwrap(); + let stdout = std::io::stdout(); + analysis + .disassemble_trace_log(&mut stdout.lock(), &_tracer_interpreter.trace_log) + .unwrap(); + analysis + .disassemble_trace_log(&mut stdout.lock(), &tracer_jit.trace_log) + .unwrap(); + panic!(); + } + assert_eq!( + result.unwrap(), + interpreter_result, + "Unexpected result for JIT" + ); + assert_eq!( + instruction_count_interpreter, instruction_count_jit, + "Interpreter and JIT instruction meter diverged", + ); + assert_eq!( + interpreter_final_pc, vm.registers[11], + "Interpreter and JIT instruction final PC diverged", + ); + assert_eq!(interpreter_mem, mem, "Interpreter and JIT memory diverged",); + } + if $executable.get_config().enable_instruction_meter { + assert_eq!( + instruction_count_interpreter, expected_instruction_count, + "Instruction meter did not consume expected amount" + ); + } + }; +} + +macro_rules! test_interpreter_and_jit_asm { + ($source:expr, $config:expr, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr $(,)?) => { + #[allow(unused_mut)] + { + let mut config = $config; + config.enable_instruction_tracing = true; + let mut function_registry = FunctionRegistry::>::default(); + $(test_interpreter_and_jit!(register, function_registry, $location => $syscall_function);)* + let loader = Arc::new(BuiltinProgram::new_loader(config, function_registry)); + let mut executable = assemble($source, loader).unwrap(); + test_interpreter_and_jit!(executable, $mem, $context_object); + } + }; + ($source:expr, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr $(,)?) => { + #[allow(unused_mut)] + { + test_interpreter_and_jit_asm!($source, Config::default(), $mem, ($($location => $syscall_function),*), $context_object); + } + }; +} + +// BPF_ALU : Arithmetic and Logic +#[test] +fn fuzz_alu() { + let seed = 0xC2DB2F8F282284A0; + let mut prng = SmallRng::seed_from_u64(seed); + + for src in 0..10 { + for dst in 0..10 { + for _ in 0..10 { + test_ins(format!("mov64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("add64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("sub64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("xor64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("and64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("lmul64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("uhmul64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("shmul64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("udiv64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("urem64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("sdiv64 r{src}, r{dst}"), &mut prng, 21); + + test_ins(format!("lsh64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("rsh64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("arsh64 r{src}, r{dst}"), &mut prng, 21); + + test_ins(format!("mov32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("add32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("sub32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("xor32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("and32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("lmul32 r{src}, r{dst}"), &mut prng, 21); + // test_ins(format!("uhmul32 r{src}, r{dst}"), &mut p, 21rng); + // test_ins(format!("shmul32 r{src}, r{dst}"), &mut p, 21rng); + test_ins(format!("udiv32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("urem32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("sdiv32 r{src}, r{dst}"), &mut prng, 21); + + test_ins(format!("lsh32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("rsh32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("arsh32 r{src}, r{dst}"), &mut prng, 21); + + // test load, store + let rand = prng.next_u32() as i64; + let offset = prng.next_u32() as i16; + let addr = rand % 80 + 0x4_0000_0000i64 - offset as i64; + let mut tmp = (src + 1) % 10; + if dst == tmp { + tmp = (src + 2) % 10; + } + + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + ldxb r{dst}, [r{src}{offset:+}]", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + stxb [r{src}{offset:+}], r{dst}", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + let addr = rand % 79 + 0x4_0000_0000i64 - offset as i64; + + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + ldxh r{dst}, [r{src}{offset:+}]", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + stxh [r{src}{offset:+}], r{dst}", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + let addr = rand % 77 + 0x4_0000_0000i64 - offset as i64; + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + ldxw r{dst}, [r{src}{offset:+}]", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + stxw [r{src}{offset:+}], r{dst}", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + let addr = rand % 73 + 0x4_0000_0000i64 - offset as i64; + + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + ldxdw r{dst}, [r{src}{offset:+}]", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + stxdw [r{src}{offset:+}], r{dst}", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + // test conditionals + for jc in [ + "jeq", "jgt", "jge", "jlt", "jle", "jset", "jne", "jsgt", "jsge", "jslt", + "jsle", + ] { + test_ins( + format!( + "{jc} r{src}, r{dst}, l1 + or64 r{src},0x12345678 + ja l2 + l1: + and64 r{dst},0x12345678 + ja l2 + l2:", + ), + &mut prng, + 23, + ); + } + } + } + + for _ in 0..10 { + let mut imm = prng.next_u32() as i32; + + test_ins(format!("mov64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("add64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("sub64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("xor64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("and64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("lmul64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("uhmul64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("shmul64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("udiv64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("urem64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("sdiv64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("srem64 r{src}, {imm}"), &mut prng, 21); + + test_ins(format!("mov32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("add32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("sub32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("xor32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("and32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("lmul32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("udiv32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("urem32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("sdiv32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("srem32 r{src}, {imm}"), &mut prng, 21); + + // test st imm + let rand = prng.next_u32() as i64; + let offset = prng.next_u32() as i16; + let tmp = (src + 1) % 10; + + let addr = rand % 80 + 0x4_0000_0000i64 - offset as i64; + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + stb [r{src}{offset:+}], {}", + addr as i32, + addr >> 32, + imm as i8, + ), + &mut prng, + 25, + ); + + let addr = rand % 79 + 0x4_0000_0000i64 - offset as i64; + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + sth [r{src}{offset:+}], {}", + addr as i32, + addr >> 32, + imm as i16 + ), + &mut prng, + 25, + ); + + let addr = rand % 77 + 0x4_0000_0000i64 - offset as i64; + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + stw [r{src}{offset:+}], {imm}", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + let addr = rand % 73 + 0x4_0000_0000i64 - offset as i64; + test_ins( + format!( + "mov32 r{tmp},{} + mov64 r{src},{:#x} + lsh64 r{src},32 + or64 r{src},r{tmp} + stdw [r{src}{offset:+}], {imm}", + addr as i32, + addr >> 32, + ), + &mut prng, + 25, + ); + + // unconditional jump + test_ins( + format!( + "ja 1 + xor64 r{src},0x12345678 + 1:", + ), + &mut prng, + 21, + ); + + for jc in [ + "jeq", "jgt", "jge", "jlt", "jle", "jset", "jne", "jsgt", "jsge", "jslt", "jsle", + ] { + test_ins( + format!( + "{jc} r{src}, {imm}, l1 + or64 r{src},0x12345678 + ja l2 + l1: + and64 r{src},0x12345678 + ja l2 + l2:", + ), + &mut prng, + 23, + ); + } + + imm &= 63; + + test_ins(format!("lsh64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("rsh64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("arsh64 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("hor64 r{src}, {imm}"), &mut prng, 21); + + imm &= 31; + + test_ins(format!("lsh32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("rsh32 r{src}, {imm}"), &mut prng, 21); + test_ins(format!("arsh32 r{src}, {imm}"), &mut prng, 21); + + test_ins(format!("be64 r{src}"), &mut prng, 21); + test_ins(format!("be32 r{src}"), &mut prng, 21); + test_ins(format!("be16 r{src}"), &mut prng, 21); + } + } +} + +fn test_ins(ins: String, prng: &mut SmallRng, cu: u64) { + let mut input = [0u8; 80]; + + prng.fill_bytes(&mut input); + + let asm = format!( + " + ldxdw r9, [r1+72] + ldxdw r8, [r1+64] + ldxdw r7, [r1+56] + ldxdw r6, [r1+48] + ldxdw r5, [r1+40] + ldxdw r4, [r1+32] + ldxdw r3, [r1+24] + ldxdw r2, [r1+16] + ldxdw r0, [r1+0] + ldxdw r1, [r1+8] + {ins} + xor64 r0, r1 + xor64 r0, r2 + xor64 r0, r3 + xor64 r0, r4 + xor64 r0, r5 + xor64 r0, r6 + xor64 r0, r7 + xor64 r0, r8 + xor64 r0, r9 + exit" + ); + + test_interpreter_and_jit_asm!(asm.as_str(), input, (), TestContextObject::new(cu)); +} From 87ab5b9d14ef067c33dcb28541a9ab69388c4a7f Mon Sep 17 00:00:00 2001 From: Sean Young Date: Mon, 8 Apr 2024 16:44:14 +0100 Subject: [PATCH 2/4] Appease clippy nightly --- src/static_analysis.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/static_analysis.rs b/src/static_analysis.rs index 6bea90a1..aa5d0ae3 100644 --- a/src/static_analysis.rs +++ b/src/static_analysis.rs @@ -180,7 +180,9 @@ impl<'a> Analysis<'a> { } let mut result = Self { // Removes the generic ContextObject which is safe because we are not going to execute the program - executable: unsafe { std::mem::transmute(executable) }, + executable: unsafe { + std::mem::transmute::<&Executable, &Executable>(executable) + }, instructions, functions, cfg_nodes: BTreeMap::new(), From 9a97d3c7d5f8cb1b6e55c09b3ac6166afcf17db1 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Mon, 8 Apr 2024 18:09:04 +0100 Subject: [PATCH 3/4] Add missing or and all signed/unsigned arithmetic --- tests/exercise_instructions.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/exercise_instructions.rs b/tests/exercise_instructions.rs index ef611261..0196717d 100644 --- a/tests/exercise_instructions.rs +++ b/tests/exercise_instructions.rs @@ -149,6 +149,7 @@ fn fuzz_alu() { test_ins(format!("mov64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("add64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("sub64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("or64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("xor64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("and64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("lmul64 r{src}, r{dst}"), &mut prng, 21); @@ -156,7 +157,9 @@ fn fuzz_alu() { test_ins(format!("shmul64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("udiv64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("urem64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("srem64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("sdiv64 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("udiv64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("lsh64 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("rsh64 r{src}, r{dst}"), &mut prng, 21); @@ -165,14 +168,16 @@ fn fuzz_alu() { test_ins(format!("mov32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("add32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("sub32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("or32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("xor32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("and32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("lmul32 r{src}, r{dst}"), &mut prng, 21); // test_ins(format!("uhmul32 r{src}, r{dst}"), &mut p, 21rng); // test_ins(format!("shmul32 r{src}, r{dst}"), &mut p, 21rng); test_ins(format!("udiv32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("urem32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("sdiv32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("srem32 r{src}, r{dst}"), &mut prng, 21); + test_ins(format!("urem32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("lsh32 r{src}, r{dst}"), &mut prng, 21); test_ins(format!("rsh32 r{src}, r{dst}"), &mut prng, 21); From 3d0cae3293b2718bd1b43936ec2369764cb5ba5e Mon Sep 17 00:00:00 2001 From: Sean Young Date: Tue, 9 Apr 2024 09:47:08 +0100 Subject: [PATCH 4/4] Add missing sbfv1 instructions Signed-off-by: Sean Young --- tests/exercise_instructions.rs | 190 ++++++++++++++++++++------------- 1 file changed, 113 insertions(+), 77 deletions(-) diff --git a/tests/exercise_instructions.rs b/tests/exercise_instructions.rs index 0196717d..78532f48 100644 --- a/tests/exercise_instructions.rs +++ b/tests/exercise_instructions.rs @@ -129,12 +129,6 @@ macro_rules! test_interpreter_and_jit_asm { test_interpreter_and_jit!(executable, $mem, $context_object); } }; - ($source:expr, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr $(,)?) => { - #[allow(unused_mut)] - { - test_interpreter_and_jit_asm!($source, Config::default(), $mem, ($($location => $syscall_function),*), $context_object); - } - }; } // BPF_ALU : Arithmetic and Logic @@ -146,42 +140,49 @@ fn fuzz_alu() { for src in 0..10 { for dst in 0..10 { for _ in 0..10 { - test_ins(format!("mov64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("add64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("sub64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("or64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("xor64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("and64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("lmul64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("uhmul64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("shmul64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("udiv64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("urem64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("srem64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("sdiv64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("udiv64 r{src}, r{dst}"), &mut prng, 21); - - test_ins(format!("lsh64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("rsh64 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("arsh64 r{src}, r{dst}"), &mut prng, 21); - - test_ins(format!("mov32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("add32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("sub32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("or32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("xor32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("and32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("lmul32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("mov64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("add64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("sub64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("or64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("xor64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("and64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("lmul64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("uhmul64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("shmul64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("udiv64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("urem64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("srem64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("sdiv64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("udiv64 r{src}, r{dst}"), &mut prng, 21); + + test_ins(false, format!("lsh64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("rsh64 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("arsh64 r{src}, r{dst}"), &mut prng, 21); + + test_ins(false, format!("mov32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("add32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("sub32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("or32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("xor32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("and32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("lmul32 r{src}, r{dst}"), &mut prng, 21); // test_ins(format!("uhmul32 r{src}, r{dst}"), &mut p, 21rng); // test_ins(format!("shmul32 r{src}, r{dst}"), &mut p, 21rng); - test_ins(format!("udiv32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("sdiv32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("srem32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("urem32 r{src}, r{dst}"), &mut prng, 21); - - test_ins(format!("lsh32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("rsh32 r{src}, r{dst}"), &mut prng, 21); - test_ins(format!("arsh32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("udiv32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("sdiv32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("srem32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("urem32 r{src}, r{dst}"), &mut prng, 21); + + test_ins(false, format!("lsh32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("rsh32 r{src}, r{dst}"), &mut prng, 21); + test_ins(false, format!("arsh32 r{src}, r{dst}"), &mut prng, 21); + + test_ins(true, format!("mul64 r{src}, r{dst}"), &mut prng, 21); + test_ins(true, format!("mod64 r{src}, r{dst}"), &mut prng, 21); + test_ins(true, format!("div64 r{src}, r{dst}"), &mut prng, 21); + test_ins(true, format!("mul32 r{src}, r{dst}"), &mut prng, 21); + test_ins(true, format!("mod32 r{src}, r{dst}"), &mut prng, 21); + test_ins(true, format!("div32 r{src}, r{dst}"), &mut prng, 21); // test load, store let rand = prng.next_u32() as i64; @@ -193,6 +194,7 @@ fn fuzz_alu() { } test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -207,6 +209,7 @@ fn fuzz_alu() { ); test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -223,6 +226,7 @@ fn fuzz_alu() { let addr = rand % 79 + 0x4_0000_0000i64 - offset as i64; test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -237,6 +241,7 @@ fn fuzz_alu() { ); test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -252,6 +257,7 @@ fn fuzz_alu() { let addr = rand % 77 + 0x4_0000_0000i64 - offset as i64; test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -265,6 +271,7 @@ fn fuzz_alu() { 25, ); test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -281,6 +288,7 @@ fn fuzz_alu() { let addr = rand % 73 + 0x4_0000_0000i64 - offset as i64; test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -294,6 +302,7 @@ fn fuzz_alu() { 25, ); test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -313,6 +322,7 @@ fn fuzz_alu() { "jsle", ] { test_ins( + false, format!( "{jc} r{src}, r{dst}, l1 or64 r{src},0x12345678 @@ -330,31 +340,47 @@ fn fuzz_alu() { } for _ in 0..10 { - let mut imm = prng.next_u32() as i32; - - test_ins(format!("mov64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("add64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("sub64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("xor64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("and64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("lmul64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("uhmul64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("shmul64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("udiv64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("urem64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("sdiv64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("srem64 r{src}, {imm}"), &mut prng, 21); - - test_ins(format!("mov32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("add32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("sub32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("xor32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("and32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("lmul32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("udiv32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("urem32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("sdiv32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("srem32 r{src}, {imm}"), &mut prng, 21); + let imm = prng.next_u64() as i64; + test_ins(true, format!("lddw r{src}, {imm}"), &mut prng, 21); + + let mut imm = imm as i32; + + test_ins(true, format!("neg64 r{src}"), &mut prng, 21); + test_ins(true, format!("neg32 r{src}"), &mut prng, 21); + + test_ins(true, format!("mul64 r{src}, {imm}"), &mut prng, 21); + test_ins(true, format!("mod64 r{src}, {imm}"), &mut prng, 21); + test_ins(true, format!("div64 r{src}, {imm}"), &mut prng, 21); + + test_ins(true, format!("mul32 r{src}, {imm}"), &mut prng, 21); + test_ins(true, format!("mod32 r{src}, {imm}"), &mut prng, 21); + test_ins(true, format!("div32 r{src}, {imm}"), &mut prng, 21); + + test_ins(false, format!("mov64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("add64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("sub64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("or64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("xor64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("and64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("lmul64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("uhmul64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("shmul64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("udiv64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("urem64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("sdiv64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("srem64 r{src}, {imm}"), &mut prng, 21); + + test_ins(false, format!("mov32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("add32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("sub32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("or32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("xor32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("and32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("lmul32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("udiv32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("urem32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("sdiv32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("srem32 r{src}, {imm}"), &mut prng, 21); // test st imm let rand = prng.next_u32() as i64; @@ -363,6 +389,7 @@ fn fuzz_alu() { let addr = rand % 80 + 0x4_0000_0000i64 - offset as i64; test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -379,6 +406,7 @@ fn fuzz_alu() { let addr = rand % 79 + 0x4_0000_0000i64 - offset as i64; test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -395,6 +423,7 @@ fn fuzz_alu() { let addr = rand % 77 + 0x4_0000_0000i64 - offset as i64; test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -410,6 +439,7 @@ fn fuzz_alu() { let addr = rand % 73 + 0x4_0000_0000i64 - offset as i64; test_ins( + false, format!( "mov32 r{tmp},{} mov64 r{src},{:#x} @@ -425,6 +455,7 @@ fn fuzz_alu() { // unconditional jump test_ins( + false, format!( "ja 1 xor64 r{src},0x12345678 @@ -438,6 +469,7 @@ fn fuzz_alu() { "jeq", "jgt", "jge", "jlt", "jle", "jset", "jne", "jsgt", "jsge", "jslt", "jsle", ] { test_ins( + false, format!( "{jc} r{src}, {imm}, l1 or64 r{src},0x12345678 @@ -454,25 +486,25 @@ fn fuzz_alu() { imm &= 63; - test_ins(format!("lsh64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("rsh64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("arsh64 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("hor64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("lsh64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("rsh64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("arsh64 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("hor64 r{src}, {imm}"), &mut prng, 21); imm &= 31; - test_ins(format!("lsh32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("rsh32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("arsh32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("lsh32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("rsh32 r{src}, {imm}"), &mut prng, 21); + test_ins(false, format!("arsh32 r{src}, {imm}"), &mut prng, 21); - test_ins(format!("be64 r{src}"), &mut prng, 21); - test_ins(format!("be32 r{src}"), &mut prng, 21); - test_ins(format!("be16 r{src}"), &mut prng, 21); + test_ins(false, format!("be64 r{src}"), &mut prng, 21); + test_ins(false, format!("be32 r{src}"), &mut prng, 21); + test_ins(false, format!("be16 r{src}"), &mut prng, 21); } } } -fn test_ins(ins: String, prng: &mut SmallRng, cu: u64) { +fn test_ins(v1: bool, ins: String, prng: &mut SmallRng, cu: u64) { let mut input = [0u8; 80]; prng.fill_bytes(&mut input); @@ -502,5 +534,9 @@ fn test_ins(ins: String, prng: &mut SmallRng, cu: u64) { exit" ); - test_interpreter_and_jit_asm!(asm.as_str(), input, (), TestContextObject::new(cu)); + let mut config = Config::default(); + if v1 { + config.enable_sbpf_v2 = false; + } + test_interpreter_and_jit_asm!(asm.as_str(), config, input, (), TestContextObject::new(cu)); }