From 522d132198838e4ddc6596cc000d6e85525fb90c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 18 Dec 2023 13:33:33 +0100 Subject: [PATCH] Adds explicit sign extension of results in SBPF-v2. --- src/interpreter.rs | 44 ++++++++---- src/jit.rs | 26 +++++-- src/program.rs | 5 ++ src/x86.rs | 13 ++++ tests/execution.rs | 170 +++++++++++++++++++++++++++++++++++++++------ 5 files changed, 218 insertions(+), 40 deletions(-) diff --git a/src/interpreter.rs b/src/interpreter.rs index c854f886c..5c1889bdf 100644 --- a/src/interpreter.rs +++ b/src/interpreter.rs @@ -152,6 +152,18 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { true } + fn sign_extension(&self, value: i32) -> u64 { + if self + .executable + .get_sbpf_version() + .implicit_sign_extension_of_results() + { + value as i64 as u64 + } else { + value as u32 as u64 + } + } + /// Advances the interpreter state by one instruction /// /// Returns false if the program terminated or threw an error. @@ -245,14 +257,14 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { }, // BPF_ALU class - ebpf::ADD32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(insn.imm as i32) as u64, - ebpf::ADD32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(self.reg[src] as i32) as u64, + ebpf::ADD32_IMM => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(insn.imm as i32)), + ebpf::ADD32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(self.reg[src] as i32)), ebpf::SUB32_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { - self.reg[dst] = (insn.imm as i32).wrapping_sub(self.reg[dst] as i32) as u64 + self.reg[dst] = self.sign_extension((insn.imm as i32).wrapping_sub(self.reg[dst] as i32)) } else { - self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64 + self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(insn.imm as i32)) }, - ebpf::SUB32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32) as u64, + ebpf::SUB32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32)), ebpf::MUL32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64, ebpf::MUL32_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64, ebpf::DIV32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64, @@ -277,9 +289,13 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64, ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64, ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64, - ebpf::MOV32_REG => self.reg[dst] = (self.reg[src] as u32) as u64, - ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64 & (u32::MAX as u64), - ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u64 & (u32::MAX as u64), + ebpf::MOV32_REG => self.reg[dst] = if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + self.reg[src] as u32 as u64 + } else { + self.reg[src] as i32 as i64 as u64 + }, + ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u32 as u64, + ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u32 as u64, ebpf::LE if self.executable.get_sbpf_version().enable_le() => { self.reg[dst] = match insn.imm { 16 => (self.reg[dst] as u16).to_le() as u64, @@ -342,8 +358,8 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { } // BPF_PQR class - ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64, - ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64, + ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32).wrapping_mul(insn.imm as u32) as u64, + ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32).wrapping_mul(self.reg[src] as u32) as u64, ebpf::LMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64), ebpf::LMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]), ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u64 as u128).wrapping_shr(64) as u64, @@ -380,12 +396,12 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { }, ebpf::SDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32); - self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u64; + self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u32 as u64; } ebpf::SDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideByZero; self, self.reg[src], i32); throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32); - self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u64; + self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u32 as u64; }, ebpf::SDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64); @@ -398,12 +414,12 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { }, ebpf::SREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32); - self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u64; + self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u32 as u64; } ebpf::SREM32_REG if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideByZero; self, self.reg[src], i32); throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32); - self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u64; + self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u32 as u64; }, ebpf::SREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64); diff --git a/src/jit.rs b/src/jit.rs index c0b5b5681..d204ac7d7 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -477,11 +477,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // BPF_ALU class ebpf::ADD32_IMM => { self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + } }, ebpf::ADD32_REG => { self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x01, src, dst, 0, None)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + } }, ebpf::SUB32_IMM => { if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { @@ -492,11 +496,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } else { self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm); } - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + } }, ebpf::SUB32_REG => { self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x29, src, dst, 0, None)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + } }, ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.emit_product_quotient_remainder(OperandSize::S32, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, dst, dst, Some(insn.imm)), @@ -520,7 +528,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::load_immediate(OperandSize::S32, dst, insn.imm)); } } - ebpf::MOV32_REG => self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst)), + ebpf::MOV32_REG => { + if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst)); + } else { + self.emit_ins(X86Instruction::mov_with_sign_extension(OperandSize::S64, src, dst)); + } + } ebpf::ARSH32_IMM => self.emit_shift(OperandSize::S32, 7, REGISTER_SCRATCH, dst, Some(insn.imm)), ebpf::ARSH32_REG => self.emit_shift(OperandSize::S32, 7, src, dst, None), ebpf::LE if self.executable.get_sbpf_version().enable_le() => { @@ -1283,7 +1297,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::pop(RAX)); } if let OperandSize::S32 = size { - if signed { + if signed && self.executable.get_sbpf_version().implicit_sign_extension_of_results() { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 } } diff --git a/src/program.rs b/src/program.rs index 0ef76bcc8..38f8a7af1 100644 --- a/src/program.rs +++ b/src/program.rs @@ -20,6 +20,11 @@ pub enum SBPFVersion { } impl SBPFVersion { + /// Implicitly perform sign extension of results + pub fn implicit_sign_extension_of_results(&self) -> bool { + self == &SBPFVersion::V1 + } + /// Enable the little-endian byte swap instructions pub fn enable_le(&self) -> bool { self == &SBPFVersion::V1 diff --git a/src/x86.rs b/src/x86.rs index eaaa13849..b0b003601 100644 --- a/src/x86.rs +++ b/src/x86.rs @@ -218,6 +218,19 @@ impl X86Instruction { } } + /// Move source to destination + #[inline] + pub const fn mov_with_sign_extension(size: OperandSize, source: u8, destination: u8) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); + Self { + size, + opcode: 0x63, + first_operand: destination, + second_operand: source, + ..Self::DEFAULT + } + } + /// Conditionally move source to destination #[inline] pub const fn cmov(size: OperandSize, condition: u8, source: u8, destination: u8) -> Self { diff --git a/tests/execution.rs b/tests/execution.rs index 6d8f151fc..a41871598 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -184,7 +184,29 @@ macro_rules! test_interpreter_and_jit_elf { // BPF_ALU : Arithmetic and Logic #[test] -fn test_mov() { +fn test_mov32_imm() { + test_interpreter_and_jit_asm!( + " + mov32 r0, 1 + exit", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(1), + ); + test_interpreter_and_jit_asm!( + " + mov32 r0, -1 + exit", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(0xffffffff), + ); +} + +#[test] +fn test_mov32_reg() { test_interpreter_and_jit_asm!( " mov32 r1, 1 @@ -195,32 +217,61 @@ fn test_mov() { TestContextObject::new(3), ProgramResult::Ok(0x1), ); + test_interpreter_and_jit_asm!( + " + mov32 r1, -1 + mov32 r0, r1 + exit", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xffffffffffffffff), + ); } #[test] -fn test_mov32_imm_large() { +fn test_mov64_imm() { test_interpreter_and_jit_asm!( " - mov32 r0, -1 + mov64 r0, 1 exit", [], (), TestContextObject::new(2), - ProgramResult::Ok(0xffffffff), + ProgramResult::Ok(1), + ); + test_interpreter_and_jit_asm!( + " + mov64 r0, -1 + exit", + [], + (), + TestContextObject::new(2), + ProgramResult::Ok(0xffffffffffffffff), ); } #[test] -fn test_mov_large() { +fn test_mov64_reg() { test_interpreter_and_jit_asm!( " - mov32 r1, -1 - mov32 r0, r1 + mov64 r1, 1 + mov64 r0, r1 exit", [], (), TestContextObject::new(3), - ProgramResult::Ok(0xffffffff), + ProgramResult::Ok(0x1), + ); + test_interpreter_and_jit_asm!( + " + mov64 r1, -1 + mov64 r0, r1 + exit", + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xffffffffffffffff), ); } @@ -651,33 +702,93 @@ fn test_pqr() { (ebpf::UDIV64_IMM, u64::MAX, u64::MAX, 1u64), (ebpf::UREM32_IMM, u64::MAX, u64::MAX, 0u64), (ebpf::UREM64_IMM, u64::MAX, u64::MAX, 0u64), - (ebpf::LMUL32_IMM, 13i64 as u64, 4i32 as u64, 52i32 as u64), + ( + ebpf::LMUL32_IMM, + 13i64 as u64, + 4i32 as u32 as u64, + 52i32 as u32 as u64, + ), (ebpf::LMUL64_IMM, 13i64 as u64, 4i64 as u64, 52i64 as u64), (ebpf::SHMUL64_IMM, 13i64 as u64, 4i64 as u64, 0i64 as u64), - (ebpf::SDIV32_IMM, 13i64 as u64, 4i32 as u64, 3i32 as u64), + ( + ebpf::SDIV32_IMM, + 13i64 as u64, + 4i32 as u32 as u64, + 3i32 as u32 as u64, + ), (ebpf::SDIV64_IMM, 13i64 as u64, 4i64 as u64, 3i64 as u64), - (ebpf::SREM32_IMM, 13i64 as u64, 4i32 as u64, 1i64 as u64), + ( + ebpf::SREM32_IMM, + 13i64 as u64, + 4i32 as u32 as u64, + 1i64 as u64, + ), (ebpf::SREM64_IMM, 13i64 as u64, 4i64 as u64, 1i64 as u64), - (ebpf::LMUL32_IMM, 13i64 as u64, -4i32 as u64, -52i32 as u64), + ( + ebpf::LMUL32_IMM, + 13i64 as u64, + -4i32 as u32 as u64, + -52i32 as u32 as u64, + ), (ebpf::LMUL64_IMM, 13i64 as u64, -4i64 as u64, -52i64 as u64), (ebpf::SHMUL64_IMM, 13i64 as u64, -4i64 as u64, -1i64 as u64), - (ebpf::SDIV32_IMM, 13i64 as u64, -4i32 as u64, -3i32 as u64), + ( + ebpf::SDIV32_IMM, + 13i64 as u64, + -4i32 as u32 as u64, + -3i32 as u32 as u64, + ), (ebpf::SDIV64_IMM, 13i64 as u64, -4i64 as u64, -3i64 as u64), - (ebpf::SREM32_IMM, 13i64 as u64, -4i32 as u64, 1i64 as u64), + ( + ebpf::SREM32_IMM, + 13i64 as u64, + -4i32 as u32 as u64, + 1i64 as u64, + ), (ebpf::SREM64_IMM, 13i64 as u64, -4i64 as u64, 1i64 as u64), - (ebpf::LMUL32_IMM, -13i64 as u64, 4i32 as u64, -52i32 as u64), + ( + ebpf::LMUL32_IMM, + -13i64 as u64, + 4i32 as u32 as u64, + -52i32 as u32 as u64, + ), (ebpf::LMUL64_IMM, -13i64 as u64, 4i64 as u64, -52i64 as u64), (ebpf::SHMUL64_IMM, -13i64 as u64, 4i64 as u64, -1i64 as u64), - (ebpf::SDIV32_IMM, -13i64 as u64, 4i32 as u64, -3i32 as u64), + ( + ebpf::SDIV32_IMM, + -13i64 as u64, + 4i32 as u32 as u64, + -3i32 as u32 as u64, + ), (ebpf::SDIV64_IMM, -13i64 as u64, 4i64 as u64, -3i64 as u64), - (ebpf::SREM32_IMM, -13i64 as u64, 4i32 as u64, -1i64 as u64), + ( + ebpf::SREM32_IMM, + -13i64 as u64, + 4i32 as u32 as u64, + -1i32 as u32 as u64, + ), (ebpf::SREM64_IMM, -13i64 as u64, 4i64 as u64, -1i64 as u64), - (ebpf::LMUL32_IMM, -13i64 as u64, -4i32 as u64, 52i32 as u64), + ( + ebpf::LMUL32_IMM, + -13i64 as u64, + -4i32 as u32 as u64, + 52i32 as u32 as u64, + ), (ebpf::LMUL64_IMM, -13i64 as u64, -4i64 as u64, 52i64 as u64), (ebpf::SHMUL64_IMM, -13i64 as u64, -4i64 as u64, 0i64 as u64), - (ebpf::SDIV32_IMM, -13i64 as u64, -4i32 as u64, 3i32 as u64), + ( + ebpf::SDIV32_IMM, + -13i64 as u64, + -4i32 as u32 as u64, + 3i32 as u32 as u64, + ), (ebpf::SDIV64_IMM, -13i64 as u64, -4i64 as u64, 3i64 as u64), - (ebpf::SREM32_IMM, -13i64 as u64, -4i32 as u64, -1i64 as u64), + ( + ebpf::SREM32_IMM, + -13i64 as u64, + -4i32 as u32 as u64, + -1i32 as u32 as u64, + ), (ebpf::SREM64_IMM, -13i64 as u64, -4i64 as u64, -1i64 as u64), ] { LittleEndian::write_u32(&mut prog[4..], dst as u32); @@ -3442,6 +3553,25 @@ fn test_err_fixed_stack_out_of_bound() { ); } +#[test] +fn test_mov32_reg_truncating() { + let config = Config { + enable_sbpf_v2: false, + ..Config::default() + }; + test_interpreter_and_jit_asm!( + " + mov64 r1, -1 + mov32 r0, r1 + exit", + config, + [], + (), + TestContextObject::new(3), + ProgramResult::Ok(0xffffffff), + ); +} + #[test] fn test_lddw() { let config = Config {