Skip to content

Commit

Permalink
util: add deep ASan feature
Browse files Browse the repository at this point in the history
  • Loading branch information
ibhatt-jumptrading authored and riptl committed Apr 22, 2024
1 parent a18fe01 commit 0facf7e
Show file tree
Hide file tree
Showing 13 changed files with 282 additions and 47 deletions.
8 changes: 8 additions & 0 deletions config/extra/with-deepasan.mk
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
FD_HAS_ASAN:=1
FD_HAS_DEEPASAN:=1
CPPFLAGS+=-DFD_HAS_ASAN=1
CFLAGS+=-DFD_HAS_DEEPASAN=1

CPPFLAGS+=-fsanitize=address,leak -fno-omit-frame-pointer

LDFLAGS+=-fsanitize=address,leak
110 changes: 74 additions & 36 deletions src/util/alloc/fd_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -518,9 +518,6 @@ fd_alloc_join( void * shalloc,
FD_LOG_WARNING(( "bad magic" ));
return NULL;
}
#ifdef FD_WKSP_ASAN
fd_asan_poison(alloc, fd_alloc_footprint());
#endif
return fd_alloc_join_cgroup_hint_set( alloc, cgroup_hint );
}

Expand All @@ -531,9 +528,6 @@ fd_alloc_leave( fd_alloc_t * join ) {
FD_LOG_WARNING(( "NULL join" ));
return NULL;
}
#ifdef FD_WKSP_ASAN
fd_asan_unpoison(join, fd_alloc_footprint());
#endif
return fd_alloc_private_join_alloc( join );
}

Expand Down Expand Up @@ -620,18 +614,29 @@ fd_alloc_malloc_at_least( fd_alloc_t * join,
need to do elaborate overflow checking. */

fd_alloc_t * alloc = fd_alloc_private_join_alloc( join );

align = fd_ulong_if( !align, FD_ALLOC_MALLOC_ALIGN_DEFAULT, align );

ulong footprint = sz + sizeof(fd_alloc_hdr_t) + align - 1UL;
#if FD_HAS_DEEPASAN
/* The header is prepended and needs to be unpoisoned. Ensure that
there is padding for the alloc_hdr to be properly aligned. We
want to exit silently if the sz passed in is 0. The alignment must be
at least 8. */
ulong fd_alloc_hdr_footprint = fd_ulong_align_up( sizeof(fd_alloc_hdr_t), FD_ASAN_ALIGN );
if ( sz && sz < ULONG_MAX )
sz = fd_ulong_align_up( sz, FD_ASAN_ALIGN );
align = fd_ulong_if( align < FD_ASAN_ALIGN, FD_ASAN_ALIGN, align );
#else
ulong fd_alloc_hdr_footprint = sizeof(fd_alloc_hdr_t);
#endif

ulong footprint = sz + fd_alloc_hdr_footprint + align - 1UL;

if( FD_UNLIKELY( (!alloc) | (!fd_ulong_is_pow2( align )) | (!sz) | (footprint<=sz) ) ) {
*max = 0UL;
return NULL;
}
#ifdef FD_WKSP_ASAN
fd_asan_unpoison(alloc,footprint);
#endif

fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );

/* At this point, alloc is non-NULL and backed by wksp, align is a
Expand Down Expand Up @@ -707,19 +712,34 @@ fd_alloc_malloc_at_least( fd_alloc_t * join,
ulong superblock_footprint = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].superblock_footprint;
if( FD_UNLIKELY( superblock_footprint > FD_ALLOC_FOOTPRINT_SMALL_THRESH ) ) {

ulong wksp_footprint = superblock_footprint + sizeof(fd_alloc_hdr_t) + FD_ALLOC_SUPERBLOCK_ALIGN - 1UL;
ulong wksp_footprint = superblock_footprint + fd_alloc_hdr_footprint + FD_ALLOC_SUPERBLOCK_ALIGN - 1UL;
ulong wksp_gaddr = fd_wksp_alloc( wksp, 1UL, wksp_footprint, alloc->tag );
if( FD_UNLIKELY( !wksp_gaddr ) ) {
*max = 0UL;
return NULL;
}
superblock_gaddr = fd_ulong_align_up( wksp_gaddr + sizeof(fd_alloc_hdr_t), FD_ALLOC_SUPERBLOCK_ALIGN );
superblock = (fd_alloc_superblock_t *)
fd_alloc_hdr_store_large( fd_wksp_laddr_fast( wksp, superblock_gaddr ), 1 /* sb */ );
#ifdef FD_WKSP_ASAN
fd_asan_unpoison(superblock,superblock_footprint);

superblock_gaddr = fd_ulong_align_up( wksp_gaddr + fd_alloc_hdr_footprint, FD_ALLOC_SUPERBLOCK_ALIGN );
#if FD_HAS_DEEPASAN
/* At this point, a new superblock is allocated from the wksp and the header
is prepended. The alignment needs to be taken into account: the padding
should also be unpoisoned.
Due to ASan requiring 8 byte word alignment for poisoning regions, must
guarantee 8 bytes for the header instead of just sizeof(fd_alloc_hdr_t).
We have a worst case padding of 15 bytes. Due to forced alignment in
fd_wksp_alloc of at least 8 bytes, in the worst case we will use 8 bytes
to align up the superblock_gaddr. The remaining 7 padding bytes will be
used to safely allow for the superblock_footprint to be aligned up to
an 8 byte multiple. */
void * unpoison_laddr = fd_wksp_laddr_fast( wksp, superblock_gaddr - fd_alloc_hdr_footprint );
ulong aligned_superblock_footprint = fd_ulong_align_up( superblock_footprint, FD_ASAN_ALIGN );
fd_asan_unpoison( unpoison_laddr, aligned_superblock_footprint + fd_alloc_hdr_footprint );
#endif

superblock = (fd_alloc_superblock_t *)
fd_alloc_hdr_store_large( fd_wksp_laddr_fast( wksp, superblock_gaddr ), 1 /* sb */ );

} else {

/* TODO: consider having user facing API wrap an internal API so
Expand Down Expand Up @@ -839,12 +859,28 @@ fd_alloc_malloc_at_least( fd_alloc_t * join,

ulong block_footprint = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_footprint;
ulong block_laddr = (ulong)superblock + sizeof(fd_alloc_superblock_t) + block_idx*block_footprint;
ulong alloc_laddr = fd_ulong_align_up( block_laddr + sizeof(fd_alloc_hdr_t), align );
ulong alloc_laddr = fd_ulong_align_up( block_laddr + fd_alloc_hdr_footprint, align );

#if FD_HAS_DEEPASAN
/* The block and the header must be unpoisoned to accomodate the block
footprint. The block footprint is determined by the sizeclass which
provides the minimum size that accomodates the footprint which is the
sz that's passed in, the padded fd_alloc_hdr, and the worst case amount
of alignment bytes. Because sz % FD_ASAN_ALIGN == 0, it is known that
we will have unused bytes at the end of the block since alloc_laddr %
FD_ASAN_ALIGN == 0. To ensure ASAN alignment, the range of bytes used
in the block can be safely rounded down.
*/

void* laddr = (void*) ( alloc_laddr - fd_alloc_hdr_footprint );

ulong block_hi_addr = block_laddr + block_footprint;
ulong block_unpoison_sz = fd_ulong_align_dn( block_hi_addr - alloc_laddr, FD_ASAN_ALIGN );
fd_asan_unpoison( laddr, block_unpoison_sz + fd_alloc_hdr_footprint );
#endif

*max = block_footprint - (alloc_laddr - block_laddr);
#ifdef FD_WKSP_ASAN
fd_asan_poison(superblock,fd_alloc_superblock_footprint());
#endif

return fd_alloc_hdr_store( (void *)alloc_laddr, superblock, block_idx, sizeclass );
}

Expand All @@ -869,9 +905,6 @@ fd_alloc_free( fd_alloc_t * join,
if( FD_UNLIKELY( sizeclass==FD_ALLOC_SIZECLASS_LARGE ) ) {
fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
fd_wksp_free( wksp, fd_wksp_gaddr_fast( wksp, laddr ) );
#ifdef FD_WKSP_ASAN
fd_asan_poison(alloc,fd_alloc_footprint());
#endif
return;
}

Expand All @@ -882,9 +915,23 @@ fd_alloc_free( fd_alloc_t * join,
ulong block_idx = fd_alloc_hdr_block_idx( hdr );
fd_alloc_block_set_t block = fd_alloc_block_set_ele( block_idx );
fd_alloc_block_set_t free_blocks = fd_alloc_block_set_add( &superblock->free_blocks, block );

#ifdef FD_WKSP_ASAN
fd_asan_unpoison(superblock,fd_alloc_superblock_footprint());


#if FD_HAS_DEEPASAN
/* The portion of the block which is used for the header and the allocation
should get poisoned. The alloc's laddr is already at least 8 byte aligned.
The 8 bytes prior to the start of the laddr are used by the fd_alloc_hdr_t.
These should get poisoned as the block is freed again. The region used by
the allocation should also get poisoned: [laddr,block_laddr+block_footprint].
However, we know that the size of the initial allocation was also 8 byte
aligned so we align down the size of the range to poison safely. */
ulong block_footprint = (ulong)fd_alloc_sizeclass_cfg[ sizeclass ].block_footprint;
ulong block_laddr = (ulong)superblock + sizeof(fd_alloc_superblock_t) + block_idx*block_footprint;
ulong block_hi_addr = fd_ulong_align_dn( block_laddr + block_footprint, FD_ASAN_ALIGN );
ulong fd_alloc_hdr_footprint = fd_ulong_align_up( sizeof(fd_alloc_hdr_t), FD_ASAN_ALIGN );
ulong fd_alloc_hdr_laddr = (ulong)laddr - fd_alloc_hdr_footprint;
ulong sz = block_hi_addr - (ulong)laddr + fd_alloc_hdr_footprint;
fd_asan_poison( (void*) fd_alloc_hdr_laddr, sz );
#endif

/* At this point, free_blocks is the set of free blocks just before
Expand Down Expand Up @@ -960,11 +1007,6 @@ fd_alloc_free( fd_alloc_t * join,

if( FD_UNLIKELY( displaced_superblock_gaddr ) )
fd_alloc_private_inactive_stack_push( alloc->inactive_stack + sizeclass, wksp, displaced_superblock_gaddr );

#ifdef FD_WKSP_ASAN
fd_asan_poison(alloc,fd_alloc_footprint());
fd_asan_poison(superblock,fd_alloc_superblock_footprint());
#endif
return;
}

Expand Down Expand Up @@ -1026,10 +1068,6 @@ fd_alloc_free( fd_alloc_t * join,
is wide enough to make that risk zero on any practical
timescale.) */

#ifdef FD_WKSP_ASAN
fd_asan_poison(alloc,fd_alloc_footprint());
fd_asan_poison(superblock,fd_alloc_superblock_footprint());
#endif
fd_wksp_t * wksp = fd_alloc_private_wksp( alloc );
ulong deletion_candidate_gaddr = fd_alloc_private_inactive_stack_pop( alloc->inactive_stack + sizeclass, wksp );

Expand Down
22 changes: 19 additions & 3 deletions src/util/alloc/test_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ test_main( int argc,
ulong align_max = FD_VOLATILE_CONST( _align_max );
ulong sz_max = FD_VOLATILE_CONST( _sz_max );

ulong print_interval = (1UL<<fd_ulong_find_msb_w_default( alloc_cnt>>2, 1 ));
ulong print_mask = (print_interval<<1)-1UL;
ulong print_interval = (1UL<<fd_ulong_find_msb_w_default( alloc_cnt>>2, 1 ));
ulong FD_FN_UNUSED print_mask = (print_interval<<1)-1UL;

fd_rng_t _rng[1]; fd_rng_t * rng = fd_rng_join( fd_rng_new( _rng, (uint)tile_idx, 0UL ) );

Expand All @@ -62,6 +62,7 @@ test_main( int argc,

for( ulong i=0UL; i<2UL*alloc_cnt; i++ ) {

#if !FD_HAS_DEEPASAN
if( (i & print_mask)==print_interval ) {
char * info = NULL;
ulong info_sz = 0UL;
Expand All @@ -74,6 +75,7 @@ test_main( int argc,
FD_LOG_DEBUG(( "fd_alloc_fprintf said:\n%*s", (int)(info_sz&INT_MAX), info ));
free( info );
}
#endif

/* Determine if we should alloc or free this iteration. If j==0,
there are no outstanding allocs to free so we must alloc. If
Expand All @@ -94,12 +96,22 @@ test_main( int argc,
ulong align = fd_ulong_if( lg_align==lg_align_max+1, 0UL, 1UL<<lg_align );

sz[j] = fd_rng_ulong_roll( rng, sz_max+1UL );
#if FD_HAS_DEEPASAN
/* Enforce 8 byte alignment requirements */
align = fd_ulong_if( align < FD_ASAN_ALIGN, FD_ASAN_ALIGN, align );
sz[j] = fd_ulong_if( sz[j] < FD_ASAN_ALIGN, FD_ASAN_ALIGN, sz[j] );
#endif

/* Allocate it */

ulong max;
mem[j] = (uchar *)fd_alloc_malloc_at_least( alloc, align, sz[j], &max );

#if FD_HAS_DEEPASAN
if ( mem[j] && sz[j] )
FD_TEST( fd_asan_query( mem[j], sz[j] ) == NULL );
#endif

/* Check if the value is sane */

if( !sz[j] && mem[j] )
Expand Down Expand Up @@ -142,6 +154,11 @@ test_main( int argc,

fd_alloc_free( alloc, mem[k] );

#if FD_HAS_DEEPASAN
if ( mem[k] && sz[k] )
FD_TEST( fd_asan_query( mem[k], sz[k] ) != NULL );
#endif

/* Remove from outstanding allocations */

j--;
Expand Down Expand Up @@ -319,4 +336,3 @@ main( int argc,
}

#endif

11 changes: 9 additions & 2 deletions src/util/fd_util_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,13 @@
#define FD_HAS_UBSAN 0
#endif

/* FD_HAS_DEEPASAN indicates that the build target is using ASAN with manual
memory poisoning for fd_alloc, fd_wksp, and fd_scratch. */

#ifndef FD_HAS_DEEPASAN
#define FD_HAS_DEEPASAN 0
#endif

/* Base development environment ***************************************/

/* The functionality provided by these vanilla headers are always
Expand Down Expand Up @@ -935,7 +942,7 @@ FD_PROTOTYPES_BEGIN
#define FD_USE_ARCH_MEMCPY 0
#endif

#if FD_HAS_X86 && FD_USE_ARCH_MEMCPY && !defined(CBMC) && !FD_HAS_MSAN
#if FD_HAS_X86 && FD_USE_ARCH_MEMCPY && !defined(CBMC) && !FD_HAS_DEEPASAN && !FD_HAS_MSAN

static inline void *
fd_memcpy( void * FD_RESTRICT d,
Expand Down Expand Up @@ -969,7 +976,7 @@ fd_memcpy( void * FD_RESTRICT d,
#define FD_USE_ARCH_MEMSET 0
#endif

#if FD_HAS_X86 && FD_USE_ARCH_MEMSET && !defined(CBMC) && !FD_HAS_MSAN
#if FD_HAS_X86 && FD_USE_ARCH_MEMSET && !defined(CBMC) && !FD_HAS_DEEPASAN && !FD_HAS_MSAN

static inline void *
fd_memset( void * d,
Expand Down
2 changes: 1 addition & 1 deletion src/util/sandbox/fd_sandbox.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ switch_user( uint uid, uint gid ) {
There's a problem with this: POSIX states that all threads in a
process must have the same uid and gid, so glibc does some wacky
stuff... from man 2 setresgid
C library/kernel differences
At the kernel level, user IDs and group IDs are a per-thread
attribute. However, POSIX requires that all threads in a
Expand Down
2 changes: 1 addition & 1 deletion src/util/sandbox/fd_sandbox.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
already mapped code pages.
Typically the only things are process will need to do while
privileged are read files and map memory.
privileged are read files and map memory.
Calling fd_sandbox will do each of the following, in order,
Expand Down
Loading

0 comments on commit 0facf7e

Please sign in to comment.