Skip to content

Commit

Permalink
fd_shredder: fix bug with entry batch sizing
Browse files Browse the repository at this point in the history
  • Loading branch information
ptaffet-jump committed Mar 25, 2024
1 parent 2478a0e commit efa9d87
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 6 deletions.
4 changes: 3 additions & 1 deletion src/disco/shred/fd_shredder.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,9 @@ fd_shredder_next_fec_set( fd_shredder_t * shredder,

ulong entry_bytes_remaining = entry_sz - offset;
/* how many total payload bytes in this FEC set? */
ulong chunk_size = fd_ulong_if( entry_bytes_remaining>=2UL*31200UL, 31200UL, entry_bytes_remaining );
ulong chunk_size = fd_ulong_if( entry_bytes_remaining>=2UL*FD_SHREDDER_NORMAL_FEC_SET_PAYLOAD_SZ,
FD_SHREDDER_NORMAL_FEC_SET_PAYLOAD_SZ,
entry_bytes_remaining );
ulong data_shred_cnt = fd_shredder_count_data_shreds( chunk_size );
ulong parity_shred_cnt = fd_shredder_count_parity_shreds( chunk_size );
/* Our notion of tree depth counts the root, while the shred version
Expand Down
9 changes: 4 additions & 5 deletions src/disco/shred/fd_shredder.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,20 +111,20 @@ void * fd_shredder_delete( void * mem );
There are two legitimate ways to send certain payload sizes. We
always pick the larger value of payload_bytes_per_shred. */

#define NORMAL_FEC_SET_PAYLOAD_SZ (31840UL)
#define FD_SHREDDER_NORMAL_FEC_SET_PAYLOAD_SZ (31840UL)

FD_FN_CONST static inline ulong
fd_shredder_count_fec_sets( ulong sz_bytes ) {
/* if sz_bytes < 2*31840, we make 1 FEC set. If sz_bytes is a
multiple of 31840, we make exactly sz_bytes/31840 sets. Otherwise,
we make floor(sz_bytes/31840)-1 normal set + one odd-sized set.
These cases can be simplified to make it branchless: */
return fd_ulong_max( sz_bytes, 2UL*NORMAL_FEC_SET_PAYLOAD_SZ - 1UL ) / NORMAL_FEC_SET_PAYLOAD_SZ;
return fd_ulong_max( sz_bytes, 2UL*FD_SHREDDER_NORMAL_FEC_SET_PAYLOAD_SZ - 1UL ) / FD_SHREDDER_NORMAL_FEC_SET_PAYLOAD_SZ;
}
FD_FN_CONST static inline ulong
fd_shredder_count_data_shreds( ulong sz_bytes ) {
ulong normal_sets = fd_shredder_count_fec_sets( sz_bytes ) - 1UL;
ulong remaining_bytes = sz_bytes - normal_sets * NORMAL_FEC_SET_PAYLOAD_SZ;
ulong remaining_bytes = sz_bytes - normal_sets * FD_SHREDDER_NORMAL_FEC_SET_PAYLOAD_SZ;
ulong shreds = normal_sets * 32UL;
if( FD_UNLIKELY( remaining_bytes <= 9135UL ) ) shreds += fd_ulong_max( 1UL, (remaining_bytes + 1014UL)/1015UL );
else if( FD_LIKELY( remaining_bytes <= 31840UL ) ) shreds += (remaining_bytes + 994UL)/ 995UL;
Expand All @@ -135,15 +135,14 @@ fd_shredder_count_data_shreds( ulong sz_bytes ) {
FD_FN_CONST static inline ulong
fd_shredder_count_parity_shreds( ulong sz_bytes ) {
ulong normal_sets = fd_shredder_count_fec_sets( sz_bytes ) - 1UL;
ulong remaining_bytes = sz_bytes - normal_sets * NORMAL_FEC_SET_PAYLOAD_SZ;
ulong remaining_bytes = sz_bytes - normal_sets * FD_SHREDDER_NORMAL_FEC_SET_PAYLOAD_SZ;
ulong shreds = normal_sets * 32UL;
if( FD_UNLIKELY( remaining_bytes <= 9135UL ) ) shreds += fd_shredder_data_to_parity_cnt[ fd_ulong_max( 1UL, (remaining_bytes + 1014UL)/1015UL ) ];
else if( FD_LIKELY( remaining_bytes <= 31840UL ) ) shreds += fd_shredder_data_to_parity_cnt[ (remaining_bytes + 994UL)/ 995UL ];
else if( FD_LIKELY( remaining_bytes <= 62400UL ) ) shreds += (remaining_bytes + 974UL)/ 975UL;
else shreds += (remaining_bytes + 954UL)/ 955UL;
return shreds;
}
#undef NORMAL_FEC_SET_PAYLOAD_SZ

/* fd_shredder_init_batch begins the computation of shreds for an entry
batch. shredder must be a valid local join. entry_batch points to
Expand Down
36 changes: 36 additions & 0 deletions src/disco/shred/test_shredder.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,42 @@ test_shredder_count( void ) {
FD_TEST( fd_shredder_count_parity_shreds( data_sz ) == parity_shreds );
FD_TEST( fd_shredder_count_fec_sets( data_sz ) == fec_sets );
}

/* Now check to make sure the shredder always produces that many
shreds. */

fd_entry_batch_meta_t meta[1];
fd_memset( meta, 0, sizeof(fd_entry_batch_meta_t) );
signer_ctx_t signer_ctx[ 1 ];
signer_ctx_init( signer_ctx, test_private_key );

FD_TEST( _shredder==fd_shredder_new( _shredder, test_signer, signer_ctx, (ushort)0 ) );
fd_shredder_t * shredder = fd_shredder_join( _shredder ); FD_TEST( shredder );

fd_fec_set_t _set[ 1 ];
for( ulong j=0UL; j<FD_REEDSOL_DATA_SHREDS_MAX; j++ ) _set->data_shreds[ j ] = fec_set_memory_1 + 2048UL*j;
for( ulong j=0UL; j<FD_REEDSOL_PARITY_SHREDS_MAX; j++ ) _set->parity_shreds[ j ] = fec_set_memory_2 + 2048UL*j;

ulong slot=0UL;
for( ulong sz=1UL; sz<100000UL; sz++ ) {
fd_shredder_init_batch( shredder, perf_test_entry_batch, sz, slot++, meta );

ulong data_shred_cnt = 0UL;
ulong parity_shred_cnt = 0UL;
ulong sets_cnt = fd_shredder_count_fec_sets( sz );
for( ulong j=0UL; j<sets_cnt; j++ ) {
fd_fec_set_t * set = fd_shredder_next_fec_set( shredder, _set );
FD_TEST( set );

data_shred_cnt += set->data_shred_cnt;
parity_shred_cnt += set->parity_shred_cnt;
}
FD_TEST( !fd_shredder_next_fec_set( shredder, _set ) );
fd_shredder_fini_batch( shredder );

FD_TEST( data_shred_cnt ==fd_shredder_count_data_shreds ( sz ) );
FD_TEST( parity_shred_cnt==fd_shredder_count_parity_shreds( sz ) );
}
}

static void
Expand Down

0 comments on commit efa9d87

Please sign in to comment.