Skip to content

Commit

Permalink
Migrate codebase to use banderwagon (#39)
Browse files Browse the repository at this point in the history
* remove ToBytes helper trait

* - swap bandersnatch for banderwagon
- point the ipa branch to the branch that has the banderwagon code

* migrate code to use banderwagon

* fix build

* modify test vectors to match python output for banderwagon

* modify chunking code to add an extra chunk if the last chunk had extra unprocessed push data
  • Loading branch information
kevaundray authored Mar 16, 2022
1 parent 11149e0 commit 5812004
Show file tree
Hide file tree
Showing 17 changed files with 172 additions and 171 deletions.
19 changes: 17 additions & 2 deletions verkle-spec/src/code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,23 @@ pub fn chunkify_code(code: Vec<u8>) -> Vec<Bytes32> {
let mut leftover_push_data = 0usize;
remaining_pushdata_bytes.push(leftover_push_data);

for chunk in chunked_code31.clone() {
let last_chunk_index = chunked_code31.len()-1;
// set this to true, if the last chunk had a push data instruction that
// needed another chunk
let mut last_chunk_push_data = false;
for (chunk_i, chunk) in chunked_code31.clone().enumerate() {
// Case1: the left over push data is larger than the chunk size
//
// The left over push data can be larger than the chunk size
// For example, if the last instruction was a PUSH32 and chunk size is 31
// We can compute the left over push data for this chunk as 31, the chunk size
// and then the left over push data for the next chunk as 32-31=1
if leftover_push_data > chunk.len() {
if leftover_push_data > chunk.len() {
if chunk_i == last_chunk_index {
last_chunk_push_data = true;
break
}

leftover_push_data = leftover_push_data - chunk.len();
remaining_pushdata_bytes.push(chunk.len());
continue;
Expand Down Expand Up @@ -92,6 +101,12 @@ pub fn chunkify_code(code: Vec<u8>) -> Vec<Bytes32> {
chunked_code32.push(chunk32)
}

if last_chunk_push_data {
// If the last chunk had remaining push data to be added
// we add a new chunk with 32 zeroes. This is fine
chunked_code32.push([0u8;32])
}

chunked_code32
}
// This functions returns a number which indicates how much PUSHDATA
Expand Down
11 changes: 6 additions & 5 deletions verkle-trie/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
bandersnatch = "0.1.1"
ipa-multipoint = { git = "https://github.com/crate-crypto/ipa_multipoint", branch = "develop" }
tempfile = "3.2.0"
ipa-multipoint = { git = "https://github.com/crate-crypto/ipa_multipoint", branch = "banderwagon_migration" }
banderwagon = { git = "https://github.com/crate-crypto/banderwagon" }
ark-ff = { version = "^0.3.0", default-features = false }
ark-ec = { version = "^0.3.0", default-features = false }
ark-serialize = { version = "^0.3.0", default-features = false }
Expand Down Expand Up @@ -40,9 +41,9 @@ debug-assertions = true
incremental = true


[[bench]]
name = "benchmark_main"
harness = false
# [[bench]]
# name = "benchmark_main"
# harness = false


[profile.test]
Expand Down
2 changes: 1 addition & 1 deletion verkle-trie/benches/benchmarks/precompute_scalar_mul.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::benchmarks::util::{generate_set_of_keys, KEYS_10K, PRECOMPUTED_TABLE};
use ark_ff::{Field, PrimeField};
use bandersnatch::{EdwardsProjective, Fr};
use banderwagon::Fr;
use criterion::BenchmarkId;
use criterion::{black_box, criterion_group, BatchSize, Criterion};
use verkle_db::BareMetalDiskDb;
Expand Down
10 changes: 5 additions & 5 deletions verkle-trie/src/committer.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use bandersnatch::{EdwardsProjective, Fr};
use banderwagon::{Element, Fr};

pub mod precompute;
pub mod test;
Expand All @@ -10,12 +10,12 @@ pub trait Committer {
// Commit to a lagrange polynomial, evaluations.len() must equal the size of the SRS at the moment
//TODO: We can make this &[Fr;256] since we have committed to 256, this would force the caller
// to handle the size of the slice
fn commit_lagrange(&self, evaluations: &[Fr]) -> EdwardsProjective;
fn commit_lagrange(&self, evaluations: &[Fr]) -> Element;
// compute value * G for a specific generator in the SRS
fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> EdwardsProjective;
fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> Element;

fn commit_sparse(&self, val_indices: Vec<(Fr, usize)>) -> EdwardsProjective {
let mut result = EdwardsProjective::default();
fn commit_sparse(&self, val_indices: Vec<(Fr, usize)>) -> Element {
let mut result = Element::zero();

for (value, lagrange_index) in val_indices {
result += self.scalar_mul(value, lagrange_index)
Expand Down
53 changes: 25 additions & 28 deletions verkle-trie/src/committer/precompute.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
use crate::committer::Committer;
use ark_ec::AffineCurve;
use ark_ff::Zero;

use banderwagon::{Element, Fr};

use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write};
use bandersnatch::{EdwardsAffine, EdwardsProjective, Fr};

#[derive(Debug, Clone, CanonicalSerialize, CanonicalDeserialize, PartialEq, Eq)]
pub struct PrecomputeLagrange {
Expand All @@ -13,12 +14,12 @@ pub struct PrecomputeLagrange {
impl<'a> Committer for &'a PrecomputeLagrange {
// If compute these points at compile time, we can
// dictate that evaluations should be an array
fn commit_lagrange(&self, evaluations: &[Fr]) -> EdwardsProjective {
fn commit_lagrange(&self, evaluations: &[Fr]) -> Element {
if evaluations.len() != self.num_points {
panic!("wrong number of points")
}

let mut result = EdwardsProjective::default();
let mut result = Element::zero();

let scalar_table = evaluations
.into_iter()
Expand All @@ -29,54 +30,54 @@ impl<'a> Committer for &'a PrecomputeLagrange {
// convert scalar to bytes in little endian
let bytes = ark_ff::to_bytes!(scalar).unwrap();

let partial_result: EdwardsProjective = bytes
let partial_result: Element = bytes
.into_iter()
.enumerate()
.map(|(row, byte)| {
let point = table.point(row, byte);
EdwardsProjective::from(*point)
Element::from(*point)
})
.sum();
result += partial_result;
}
result
}

fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> EdwardsProjective {
fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> Element {
let table = &self.inner[lagrange_index];

let bytes = ark_ff::to_bytes!(value).unwrap();
let result: EdwardsProjective = bytes
let result: Element = bytes
.into_iter()
.enumerate()
.map(|(row, byte)| {
let point = table.point(row, byte);
EdwardsProjective::from(*point)
Element::from(*point)
})
.sum();
result
}
}
impl Committer for PrecomputeLagrange {
fn commit_lagrange(&self, evaluations: &[Fr]) -> EdwardsProjective {
fn commit_lagrange(&self, evaluations: &[Fr]) -> Element {
(&self).commit_lagrange(evaluations)
}

fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> EdwardsProjective {
fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> Element {
(&self).scalar_mul(value, lagrange_index)
}
}

impl PrecomputeLagrange {
pub fn precompute(points: &[EdwardsAffine]) -> Self {
pub fn precompute(points: &[Element]) -> Self {
let lagrange_precomputed_points = PrecomputeLagrange::precompute_lagrange_points(points);
Self {
inner: lagrange_precomputed_points,
num_points: points.len(),
}
}

fn precompute_lagrange_points(lagrange_points: &[EdwardsAffine]) -> Vec<LagrangeTablePoints> {
fn precompute_lagrange_points(lagrange_points: &[Element]) -> Vec<LagrangeTablePoints> {
use rayon::prelude::*;
lagrange_points
.into_par_iter()
Expand All @@ -86,12 +87,12 @@ impl PrecomputeLagrange {
}
#[derive(Debug, Clone, CanonicalSerialize, CanonicalDeserialize, PartialEq, Eq)]
pub struct LagrangeTablePoints {
identity: EdwardsAffine,
matrix: Vec<EdwardsAffine>,
identity: Element,
matrix: Vec<Element>,
}

impl LagrangeTablePoints {
pub fn new(point: &EdwardsAffine) -> LagrangeTablePoints {
pub fn new(point: &Element) -> LagrangeTablePoints {
let num_rows = 32u64;
// We use base 256
let base_u128 = 256u128;
Expand All @@ -111,19 +112,19 @@ impl LagrangeTablePoints {
let flattened_rows: Vec<_> = rows.into_par_iter().flatten().collect();

LagrangeTablePoints {
identity: EdwardsAffine::default(),
identity: Element::zero(),
matrix: flattened_rows,
}
}
pub fn point(&self, index: usize, value: u8) -> &EdwardsAffine {
pub fn point(&self, index: usize, value: u8) -> &Element {
if value == 0 {
return &self.identity;
}
&self.matrix.as_slice()[(index * 255) + (value - 1) as usize]
}

// Computes [G_1, 2G_1, 3G_1, ... num_points * G_1]
fn compute_base_row(point: &EdwardsAffine, num_points: usize) -> Vec<EdwardsAffine> {
fn compute_base_row(point: &Element, num_points: usize) -> Vec<Element> {
let mut row = Vec::with_capacity(num_points);
row.push(*point);
for i in 1..num_points {
Expand All @@ -135,11 +136,8 @@ impl LagrangeTablePoints {

// Given [G_1, 2G_1, 3G_1, ... num_points * G_1] and a scalar `k`
// Returns [k * G_1, 2 * k * G_1, 3 * k * G_1, ... num_points * k * G_1]
fn scale_row(points: &[EdwardsAffine], scale: Fr) -> Vec<EdwardsAffine> {
let scaled_row: Vec<EdwardsAffine> = points
.into_iter()
.map(|element| element.mul(scale).into())
.collect();
fn scale_row(points: &[Element], scale: Fr) -> Vec<Element> {
let scaled_row: Vec<Element> = points.into_iter().map(|element| *element * scale).collect();

scaled_row
}
Expand All @@ -150,14 +148,13 @@ mod test {

use crate::committer::precompute::LagrangeTablePoints;
use crate::committer::Committer;
use ark_ec::AffineCurve;
use ark_ff::{ToBytes, Zero};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use bandersnatch::{EdwardsAffine, EdwardsProjective, Fr};
use banderwagon::{Element, Fr};

#[test]
fn read_write() {
let point: EdwardsAffine = EdwardsAffine::prime_subgroup_generator();
let point = Element::prime_subgroup_generator();

let mut serialized_lagrange_table: Vec<u8> = Vec::new();

Expand Down Expand Up @@ -188,7 +185,7 @@ mod test {
// let values: Vec<_> = (1..=degree + 1).map(|i| Fr::from(i as u128)).collect();

// let expected_comm = {
// let mut res = EdwardsProjective::zero();
// let mut res = Element::zero();
// for (val, point) in values.iter().zip(SRS.iter()) {
// res += point.mul(val.into_repr())
// }
Expand Down
13 changes: 7 additions & 6 deletions verkle-trie/src/committer/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,23 @@ use crate::{committer::Committer, constants::CRS};
use ark_ec::ProjectiveCurve;
use ark_ff::PrimeField;
use ark_ff::Zero;
use bandersnatch::{EdwardsProjective, Fr};

use banderwagon::{Element, Fr};
// A Basic Commit struct to be used in tests.
// In production, we will use the Precomputed points
#[derive(Debug, Clone, Copy)]
pub struct TestCommitter;
impl Committer for TestCommitter {
fn commit_lagrange(&self, evaluations: &[Fr]) -> EdwardsProjective {
let mut res = EdwardsProjective::zero();
fn commit_lagrange(&self, evaluations: &[Fr]) -> Element {
let mut res = Element::zero();
for (val, point) in evaluations.iter().zip(CRS.G.iter()) {
res += point.mul(val.into_repr())
res += point * val;
}
res
}

fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> EdwardsProjective {
CRS[lagrange_index].mul(value.into_repr())
fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> Element {
CRS[lagrange_index] * value
}
}

Expand Down
4 changes: 2 additions & 2 deletions verkle-trie/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ impl<Storage> VerkleConfig<Storage> {

// File is not already precomputed, so we pre-compute the points and store them
let mut file = File::create(PRECOMPUTED_POINTS_PATH).unwrap();
let g_aff: Vec<_> = CRS.G.iter().map(|point| point.into_affine()).collect();
let committer = PrecomputeLagrange::precompute(&g_aff);

let committer = PrecomputeLagrange::precompute(&CRS.G);
committer.serialize_unchecked(&mut file).unwrap();
Ok(Config { db, committer })
}
Expand Down
8 changes: 4 additions & 4 deletions verkle-trie/src/constants.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use ark_ff::BigInteger256;
pub use bandersnatch::Fr;
use ipa_multipoint::{lagrange_basis::PrecomputedWeights, multiproof::CRS};
pub use banderwagon::Fr;
use ipa_multipoint::{crs::CRS, lagrange_basis::PrecomputedWeights};
use once_cell::sync::Lazy;

pub const FLUSH_BATCH: u32 = 20_000;
// This library only works for a width of 256. It can be modified to work for other widths, but this is
Expand All @@ -17,7 +18,6 @@ pub(crate) const TWO_POW_128: Fr = Fr::new(BigInteger256([
1249884543737537366,
]));

use once_cell::sync::Lazy;
pub static CRS: Lazy<CRS> = Lazy::new(|| CRS::new(VERKLE_NODE_WIDTH, PEDERSEN_SEED));

pub static PRECOMPUTED_WEIGHTS: Lazy<PrecomputedWeights> =
Expand All @@ -27,7 +27,7 @@ pub static PRECOMPUTED_WEIGHTS: Lazy<PrecomputedWeights> =
mod tests {
use super::TWO_POW_128;
use ark_ff::PrimeField;
use bandersnatch::Fr;
use banderwagon::Fr;

#[test]
fn test_two_pow128_constant() {
Expand Down
Loading

0 comments on commit 5812004

Please sign in to comment.