Skip to content

Commit

Permalink
Merge branch 'master' into das_http_limits
Browse files Browse the repository at this point in the history
  • Loading branch information
Tristan-Wilson authored May 25, 2024
2 parents 6ebcf74 + 7cda2b3 commit cf8c4a4
Show file tree
Hide file tree
Showing 40 changed files with 723 additions and 225 deletions.
40 changes: 21 additions & 19 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ RUN apt-get update && apt-get install -y curl build-essential=12.9

FROM wasm-base as wasm-libs-builder
# clang / lld used by soft-float wasm
RUN apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt
RUN apt-get update && \
apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt
# pinned rust 1.75.0
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.75.0 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi
COPY ./Makefile ./
Expand Down Expand Up @@ -203,6 +204,7 @@ COPY ./scripts/download-machine.sh .
#RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a
#RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4
#RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4
RUN ./download-machine.sh consensus-v30-rc.2 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b

FROM golang:1.21-bookworm as node-builder
WORKDIR /workspace
Expand Down Expand Up @@ -268,11 +270,15 @@ USER user
WORKDIR /home/user/
ENTRYPOINT [ "/usr/local/bin/nitro" ]

FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy

FROM nitro-node-slim as nitro-node
USER root
COPY --from=prover-export /bin/jit /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/daserver /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/datool /usr/local/bin/
COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines
RUN rm -rf /workspace/target/legacy-machines/latest
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
apt-get install -y \
Expand All @@ -282,10 +288,23 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \
nitro --version
ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/home/user/target/machines"]

USER user

FROM nitro-node as nitro-node-validator
USER root
COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val
COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
apt-get install -y xxd netcat-traditional && \
rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/*
COPY scripts/split-val-entry.sh /usr/local/bin
ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ]
USER user

FROM nitro-node as nitro-node-dev-base
FROM nitro-node-validator as nitro-node-dev
USER root
# Copy in latest WASM module root
RUN rm -f /home/user/target/machines/latest
Expand All @@ -309,22 +328,5 @@ RUN export DEBIAN_FRONTEND=noninteractive && \

USER user

FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy

FROM nitro-node-dev-base as nitro-node-dev
USER root

RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get update && \
apt-get install -y xxd netcat-traditional && \
rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/*
COPY scripts/split-val-entry.sh /usr/local/bin
COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines
RUN rm -rf /workspace/target/legacy-machines/latest
COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val
COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit
ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ]
USER user

FROM nitro-node as nitro-node-default
# Just to ensure nitro-node-dist is default
69 changes: 43 additions & 26 deletions arbitrator/stylus/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ macro_rules! cache {
}

pub struct InitCache {
arbos: HashMap<CacheKey, CacheItem>,
long_term: HashMap<CacheKey, CacheItem>,
lru: LruCache<CacheKey, CacheItem>,
}

Expand Down Expand Up @@ -59,20 +59,31 @@ impl CacheItem {
}

impl InitCache {
// current implementation only has one tag that stores to the long_term
// future implementations might have more, but 0 is a reserved tag
// that will never modify long_term state
const ARBOS_TAG: u32 = 1;

fn new(size: usize) -> Self {
Self {
arbos: HashMap::new(),
long_term: HashMap::new(),
lru: LruCache::new(NonZeroUsize::new(size).unwrap()),
}
}

pub fn set_lru_size(size: u32) {
cache!()
.lru
.resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap())
}

/// Retrieves a cached value, updating items as necessary.
pub fn get(module_hash: Bytes32, version: u16, debug: bool) -> Option<(Module, Store)> {
let mut cache = cache!();
let key = CacheKey::new(module_hash, version, debug);

// See if the item is in the long term cache
if let Some(item) = cache.arbos.get(&key) {
if let Some(item) = cache.long_term.get(&key) {
return Some(item.data());
}

Expand All @@ -84,18 +95,27 @@ impl InitCache {
}

/// Inserts an item into the long term cache, cloning from the LRU cache if able.
/// If long_term_tag is 0 will only insert to LRU
pub fn insert(
module_hash: Bytes32,
module: &[u8],
version: u16,
long_term_tag: u32,
debug: bool,
) -> Result<(Module, Store)> {
let key = CacheKey::new(module_hash, version, debug);

// if in LRU, add to ArbOS
let mut cache = cache!();
if let Some(item) = cache.long_term.get(&key) {
return Ok(item.data());
}
if let Some(item) = cache.lru.peek(&key).cloned() {
cache.arbos.insert(key, item.clone());
if long_term_tag == Self::ARBOS_TAG {
cache.long_term.insert(key, item.clone());
} else {
cache.lru.promote(&key)
}
return Ok(item.data());
}
drop(cache);
Expand All @@ -105,37 +125,34 @@ impl InitCache {

let item = CacheItem::new(module, engine);
let data = item.data();
cache!().arbos.insert(key, item);
let mut cache = cache!();
if long_term_tag != Self::ARBOS_TAG {
cache.lru.put(key, item);
} else {
cache.long_term.insert(key, item);
}
Ok(data)
}

/// Inserts an item into the short-lived LRU cache.
pub fn insert_lru(
module_hash: Bytes32,
module: &[u8],
version: u16,
debug: bool,
) -> Result<(Module, Store)> {
let engine = CompileConfig::version(version, debug).engine();
let module = unsafe { Module::deserialize_unchecked(&engine, module)? };

let key = CacheKey::new(module_hash, version, debug);
let item = CacheItem::new(module, engine);
cache!().lru.put(key, item.clone());
Ok(item.data())
}

/// Evicts an item in the long-term cache.
pub fn evict(module_hash: Bytes32, version: u16, debug: bool) {
pub fn evict(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) {
if long_term_tag != Self::ARBOS_TAG {
return;
}
let key = CacheKey::new(module_hash, version, debug);
cache!().arbos.remove(&key);
let mut cache = cache!();
if let Some(item) = cache.long_term.remove(&key) {
cache.lru.put(key, item);
}
}

/// Modifies the cache for reorg, dropping the long-term cache.
pub fn reorg(_block: u64) {
pub fn clear_long_term(long_term_tag: u32) {
if long_term_tag != Self::ARBOS_TAG {
return;
}
let mut cache = cache!();
let cache = &mut *cache;
for (key, item) in cache.arbos.drain() {
for (key, item) in cache.long_term.drain() {
cache.lru.put(key, item); // not all will fit, just a heuristic
}
}
Expand Down
34 changes: 28 additions & 6 deletions arbitrator/stylus/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,7 @@ pub unsafe extern "C" fn stylus_call(
debug_chain: bool,
output: *mut RustBytes,
gas: *mut u64,
long_term_tag: u32,
) -> UserOutcomeKind {
let module = module.slice();
let calldata = calldata.slice().to_vec();
Expand All @@ -193,7 +194,14 @@ pub unsafe extern "C" fn stylus_call(

// Safety: module came from compile_user_wasm and we've paid for memory expansion
let instance = unsafe {
NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, debug_chain)
NativeInstance::deserialize_cached(
module,
config.version,
evm_api,
evm_data,
long_term_tag,
debug_chain,
)
};
let mut instance = match instance {
Ok(instance) => instance,
Expand All @@ -212,33 +220,47 @@ pub unsafe extern "C" fn stylus_call(
status
}

/// resize lru
#[no_mangle]
pub extern "C" fn stylus_cache_lru_resize(size: u32) {
InitCache::set_lru_size(size);
}

/// Caches an activated user program.
///
/// # Safety
///
/// `module` must represent a valid module produced from `stylus_activate`.
/// arbos_tag: a tag for arbos cache. 0 won't affect real caching
/// currently only if tag==1 caching will be affected
#[no_mangle]
pub unsafe extern "C" fn stylus_cache_module(
module: GoSliceData,
module_hash: Bytes32,
version: u16,
arbos_tag: u32,
debug: bool,
) {
if let Err(error) = InitCache::insert(module_hash, module.slice(), version, debug) {
if let Err(error) = InitCache::insert(module_hash, module.slice(), version, arbos_tag, debug) {
panic!("tried to cache invalid asm!: {error}");
}
}

/// Evicts an activated user program from the init cache.
#[no_mangle]
pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, debug: bool) {
InitCache::evict(module_hash, version, debug);
pub extern "C" fn stylus_evict_module(
module_hash: Bytes32,
version: u16,
arbos_tag: u32,
debug: bool,
) {
InitCache::evict(module_hash, version, arbos_tag, debug);
}

/// Reorgs the init cache. This will likely never happen.
#[no_mangle]
pub extern "C" fn stylus_reorg_vm(block: u64) {
InitCache::reorg(block);
pub extern "C" fn stylus_reorg_vm(_block: u64, arbos_tag: u32) {
InitCache::clear_long_term(arbos_tag);
}

/// Frees the vector. Does nothing when the vector is null.
Expand Down
10 changes: 6 additions & 4 deletions arbitrator/stylus/src/native.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ impl<D: DataReader, E: EvmApi<D>> NativeInstance<D, E> {
version: u16,
evm: E,
evm_data: EvmData,
mut long_term_tag: u32,
debug: bool,
) -> Result<Self> {
let compile = CompileConfig::version(version, debug);
Expand All @@ -122,10 +123,11 @@ impl<D: DataReader, E: EvmApi<D>> NativeInstance<D, E> {
if let Some((module, store)) = InitCache::get(module_hash, version, debug) {
return Self::from_module(module, store, env);
}
let (module, store) = match env.evm_data.cached {
true => InitCache::insert(module_hash, module, version, debug)?,
false => InitCache::insert_lru(module_hash, module, version, debug)?,
};
if !env.evm_data.cached {
long_term_tag = 0;
}
let (module, store) =
InitCache::insert(module_hash, module, version, long_term_tag, debug)?;
Self::from_module(module, store, env)
}

Expand Down
14 changes: 7 additions & 7 deletions arbnode/dataposter/data_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -851,31 +851,31 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti
// different type with a lower nonce.
// If we decide not to send this tx yet, just leave it queued and with Sent set to false.
// The resending/repricing loop in DataPoster.Start will keep trying.
if !newTx.Sent && newTx.FullTx.Nonce() > 0 {
previouslySent := newTx.Sent || (prevTx != nil && prevTx.Sent) // if we've previously sent this nonce
if !previouslySent && newTx.FullTx.Nonce() > 0 {
precedingTx, err := p.queue.Get(ctx, arbmath.SaturatingUSub(newTx.FullTx.Nonce(), 1))
if err != nil {
return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err)
}
if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed
var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64
var latestBlockNumber, prevBlockNumber, reorgResistantTxCount uint64
if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent {
latestBlockNumber, err = p.client.BlockNumber(ctx)
if err != nil {
return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err)
}
prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1)
reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber))
reorgResistantTxCount, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber))
if err != nil {
return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err)
}

if precedingTx.FullTx.Nonce() > reorgResistantNonce {
log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent)
if newTx.FullTx.Nonce() > reorgResistantTxCount {
log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount)
return nil
}
} else {
log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce)
}
log.Debug("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount)
}
}

Expand Down
3 changes: 2 additions & 1 deletion arbnode/dataposter/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"github.com/offchainlabs/nitro/arbnode/dataposter/redis"
"github.com/offchainlabs/nitro/arbnode/dataposter/slice"
"github.com/offchainlabs/nitro/arbnode/dataposter/storage"
"github.com/offchainlabs/nitro/cmd/conf"
"github.com/offchainlabs/nitro/util/arbmath"
"github.com/offchainlabs/nitro/util/redisutil"
"github.com/offchainlabs/nitro/util/signature"
Expand All @@ -44,7 +45,7 @@ func newLevelDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.St

func newPebbleDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.Storage {
t.Helper()
db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true)
db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true, conf.PersistentConfigDefault.Pebble.ExtraOptions("pebble"))
if err != nil {
t.Fatalf("NewPebbleDBDatabase() unexpected error: %v", err)
}
Expand Down
1 change: 1 addition & 0 deletions arbnode/inbox_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (*
if err != nil {
Fail(t, err)
}
execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCache)
execSeq := &execClientWrapper{execEngine, t}
inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher)
if err != nil {
Expand Down
Loading

0 comments on commit cf8c4a4

Please sign in to comment.