From 9b703dbec74cfc4b245c8c073aecb0021052fad3 Mon Sep 17 00:00:00 2001 From: Fire30 Date: Thu, 16 Nov 2023 15:51:48 -0500 Subject: [PATCH] shmem: randomize address of shared memory mappings in tile processes Due to the behavior of `mmap` in the Linux kernel the shared memory mappings in each tile processes will most likely be adjacent to each other. Relevant output from `/proc/pid/maps` is below: ``` 7f64c0000000-7f6500000000 rw-s ... 6736764 /mnt/.fd/.gigantic/fd1_verify_dedup.wksp 7f6500000000-7f6540000000 rw-s ... 6736763 /mnt/.fd/.gigantic/fd1_quic_verify.wksp 7f6577200000-7f6577400000 rw-s ... 6736773 /mnt/.fd/.huge/fd1_verify.wksp ``` This makes it such that out of bounds access vulnerabilities can possibly corrupt other shared memory regions causing further unintended behavior. To mitigate this possible situation we introduce a function `fd_shmem_private_get_random_mappable_addr` which will return a random mappable address for a passed size that can be used with `MAP_FIXED`. Shared memory regions now will no longer be adjacent to each other. --- src/util/shmem/fd_shmem_private.h | 4 ++ src/util/shmem/fd_shmem_user.c | 67 ++++++++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/src/util/shmem/fd_shmem_private.h b/src/util/shmem/fd_shmem_private.h index 0a80894e1e..9bccf898a3 100644 --- a/src/util/shmem/fd_shmem_private.h +++ b/src/util/shmem/fd_shmem_private.h @@ -14,6 +14,10 @@ #define FD_SHMEM_PRIVATE_PATH_BUF_MAX (256UL) #define FD_SHMEM_PRIVATE_BASE_MAX (FD_SHMEM_PRIVATE_PATH_BUF_MAX-FD_SHMEM_NAME_MAX-FD_SHMEM_PAGE_SZ_CSTR_MAX-1UL) +#define FD_SHMEM_PRIVATE_MMAP_NORMAL_MASK 0x7ffffffff000 +#define FD_SHMEM_PRIVATE_MMAP_HUGE_MASK 0x7fffffc00000 +#define FD_SHMEM_PRIVATE_MMAP_GIGANTIC_MASK 0x7fffc0000000 + #if FD_HAS_THREADS #define FD_SHMEM_LOCK pthread_mutex_lock( fd_shmem_private_lock ) #define FD_SHMEM_UNLOCK pthread_mutex_unlock( fd_shmem_private_lock ) diff --git a/src/util/shmem/fd_shmem_user.c b/src/util/shmem/fd_shmem_user.c index 6b4fdf97a0..dbc0dd0481 100644 --- a/src/util/shmem/fd_shmem_user.c +++ b/src/util/shmem/fd_shmem_user.c @@ -10,6 +10,7 @@ #include #include #include +#include /* fd_shmem_private_key converts the cstr pointed to by name into a valid key and stores it at the location pointed to by key assumed @@ -74,6 +75,65 @@ fd_shmem_private_map_query_by_addr( fd_shmem_join_info_t * map, return def; } +/* + * fd_shmem_private_grab_region will attempt to map a region at the passed + * address with the passed size. If the return value of `mmap` equals the + * passed address this means the area of memory was unmapped previously and + * we have succesfully "grabbed" the region. We can then call `mmap` with + * MAP_FIXED over the region and be certain no corruption occurs. If the + * return value of `mmap` does not return the passed address this means that + * the passed region is already atleast partially mapped and we cannot grab it. + */ +static int +fd_shmem_private_grab_region( ulong addr, + ulong size ) { + void *mmap_ret; + int err; + + mmap_ret = mmap( (void*)addr, size, PROT_READ, MAP_ANON|MAP_PRIVATE, -1, 0 ); + + if( mmap_ret == MAP_FAILED ) + return 0; + + /* Only call munmap on failure case. On success we want to keep the mapping */ + if( (ulong)mmap_ret != addr ) { + err = munmap( mmap_ret, size ); + if ( err == -1 ) { + FD_LOG_ERR(( "failed to unmap temporary mapping, munmap() failed (%i-%s)", errno, fd_io_strerror( errno ) )); + } + return 0; + } + + return 1; +} + +static ulong +fd_shmem_private_get_random_mappable_addr( ulong size, + ulong page_size ) { + ulong ret_addr = 0; + + /* Failure is unlikely, 1000 iterations should guarantee success */ + for( ulong i = 0; i < 1000; i++ ) { + long n = getrandom( &ret_addr, sizeof(ret_addr), 0 ); + if( FD_UNLIKELY( n!=sizeof(ret_addr) ) ) FD_LOG_ERR(( "could not generate random address, getrandom() failed (%i-%s)", errno, fd_io_strerror( errno ) )); + + /* The type of region determines the alignment we need for the region */ + if( page_size == FD_SHMEM_GIGANTIC_PAGE_SZ ) + ret_addr &= FD_SHMEM_PRIVATE_MMAP_GIGANTIC_MASK; + else if( page_size == FD_SHMEM_HUGE_PAGE_SZ ) + ret_addr &= FD_SHMEM_PRIVATE_MMAP_HUGE_MASK; + else + ret_addr &= FD_SHMEM_PRIVATE_MMAP_NORMAL_MASK; + + if( fd_shmem_private_grab_region( ret_addr, size ) ) { + return ret_addr; + } + } + + FD_LOG_ERR(( "unable to find random address for memory map after 1000 attempts" )); + return (ulong)MAP_FAILED; +} + static fd_shmem_join_info_t fd_shmem_private_map[ FD_SHMEM_PRIVATE_MAP_SLOT_CNT ]; /* Empty on thread group start */ static ulong fd_shmem_private_map_cnt; /* 0 on thread group start */ @@ -145,8 +205,13 @@ fd_shmem_join( char const * name, FD_LOG_WARNING(( "open(\"%s\",%s,0) failed (%i-%s)", path, rw ? "O_RDWR" : "O_RDONLY", errno, fd_io_strerror( errno ) )); return NULL; } + + /* Generate a random address that we are guaranteed to be able to map */ + ulong rand_addr = fd_shmem_private_get_random_mappable_addr( sz, page_sz ); + /* Note that MAP_HUGETLB and MAP_HUGE_* are implied by the mount point */ - void * shmem = mmap( NULL, sz, rw ? (PROT_READ|PROT_WRITE) : PROT_READ, MAP_SHARED, fd, (off_t)0 ); + void * shmem = mmap( (void*)rand_addr, sz, rw ? (PROT_READ|PROT_WRITE) : PROT_READ, MAP_SHARED | MAP_FIXED, fd, (off_t)0 ); + int mmap_errno = errno; if( FD_UNLIKELY( close( fd ) ) ) FD_LOG_WARNING(( "close(\"%s\") failed (%i-%s); attempting to continue", path, errno, fd_io_strerror( errno ) ));