From 09c747a5761cd430bd4eb31ac57a515d72f46155 Mon Sep 17 00:00:00 2001 From: Sergiy Lavrynenko Date: Tue, 7 Jul 2020 11:22:15 +0300 Subject: [PATCH 1/3] SKALE-2560 updated reverence to latest consenus and BLS with improved deps build script --- libconsensus | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libconsensus b/libconsensus index 44849ca26..cb229ce9b 160000 --- a/libconsensus +++ b/libconsensus @@ -1 +1 @@ -Subproject commit 44849ca26687c2d92cadb4946a44f293cf4dcab3 +Subproject commit cb229ce9b3599381c33ad5ff83d27ed36753136d From 8b15df365dcb05ad543a5fa2857d17e9f1d5dbd0 Mon Sep 17 00:00:00 2001 From: Sergiy Lavrynenko Date: Tue, 7 Jul 2020 13:22:21 +0300 Subject: [PATCH 2/3] SKALE-2560 updated reverence to latest consenus and BLS with improved deps build script --- libconsensus | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libconsensus b/libconsensus index cb229ce9b..8211f4c3d 160000 --- a/libconsensus +++ b/libconsensus @@ -1 +1 @@ -Subproject commit cb229ce9b3599381c33ad5ff83d27ed36753136d +Subproject commit 8211f4c3d63f1f84ce4ae32fab1b0370292ff88d From f86ad58c58a22da4624df2160f657b7982a0f283 Mon Sep 17 00:00:00 2001 From: Dima Litvinov Date: Wed, 8 Jul 2020 16:22:54 +0300 Subject: [PATCH 3/3] SKALE-2560 Fix broken block import (if totalDifficulty > 0...) --- libethereum/BlockChain.cpp | 185 +++++++++++++++++-------------------- 1 file changed, 86 insertions(+), 99 deletions(-) diff --git a/libethereum/BlockChain.cpp b/libethereum/BlockChain.cpp index 668a30263..69a0f1ce8 100644 --- a/libethereum/BlockChain.cpp +++ b/libethereum/BlockChain.cpp @@ -743,122 +743,109 @@ ImportRoute BlockChain::insertBlockAndExtras( VerifiedBlockRef const& _block, // This might be the new best block... h256 last = currentHash(); // we import every block even if it's difficulty is not enough - if ( _totalDifficulty > 0 /* in ethereum there is details( last ).totalDifficulty but not 0 - */ - || ( m_sealEngine->chainParams().tieBreakingGas && - _totalDifficulty == details( last ).totalDifficulty && - _block.info.gasUsed() > info( last ).gasUsed() ) ) { - MICROPROFILE_SCOPEI( "insertBlockAndExtras", "difficulty", MP_HOTPINK ); - - // don't include bi.hash() in treeRoute, since it's not yet in details DB... - // just tack it on afterwards. - unsigned commonIndex; - tie( route, common, commonIndex ) = treeRoute( last, _block.info.parentHash() ); - route.push_back( _block.info.hash() ); - - // Most of the time these two will be equal - only when we're doing a chain revert - // will they not be - if ( common != last ) - DEV_READ_GUARDED( x_lastBlockHash ) - clearCachesDuringChainReversion( number( common ) + 1 ); - - // TODO Understand and remove this trash with "routes" - - // Go through ret backwards (i.e. from new head to common) until hash != - // last.parent and update m_transactionAddresses, m_blockHashes - for ( auto i = route.rbegin(); i != route.rend() && *i != common; ++i ) { - MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for", MP_PEACHPUFF1 ); - - BlockHeader tbi; - if ( *i == _block.info.hash() ) - tbi = _block.info; - else - tbi = BlockHeader( block( *i ) ); + MICROPROFILE_SCOPEI( "insertBlockAndExtras", "difficulty", MP_HOTPINK ); + + // don't include bi.hash() in treeRoute, since it's not yet in details DB... + // just tack it on afterwards. + unsigned commonIndex; + tie( route, common, commonIndex ) = treeRoute( last, _block.info.parentHash() ); + route.push_back( _block.info.hash() ); + + // Most of the time these two will be equal - only when we're doing a chain revert + // will they not be + if ( common != last ) + DEV_READ_GUARDED( x_lastBlockHash ) + clearCachesDuringChainReversion( number( common ) + 1 ); + + // TODO Understand and remove this trash with "routes" + + // Go through ret backwards (i.e. from new head to common) until hash != + // last.parent and update m_transactionAddresses, m_blockHashes + for ( auto i = route.rbegin(); i != route.rend() && *i != common; ++i ) { + MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for", MP_PEACHPUFF1 ); + + BlockHeader tbi; + if ( *i == _block.info.hash() ) + tbi = _block.info; + else + tbi = BlockHeader( block( *i ) ); - // Collate logs into blooms. - h256s alteredBlooms; - { - MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_logs", MP_PALETURQUOISE ); - - LogBloom blockBloom = tbi.logBloom(); - blockBloom.shiftBloom< 3 >( sha3( tbi.author().ref() ) ); - - // Pre-memoize everything we need before locking x_blocksBlooms - for ( unsigned level = 0, index = ( unsigned ) tbi.number(); - level < c_bloomIndexLevels; level++, index /= c_bloomIndexSize ) - blocksBlooms( chunkId( level, index / c_bloomIndexSize ) ); - - WriteGuard l( x_blocksBlooms ); - for ( unsigned level = 0, index = ( unsigned ) tbi.number(); - level < c_bloomIndexLevels; level++, index /= c_bloomIndexSize ) { - unsigned i = index / c_bloomIndexSize; - unsigned o = index % c_bloomIndexSize; - alteredBlooms.push_back( chunkId( level, i ) ); - m_blocksBlooms[alteredBlooms.back()].blooms[o] |= blockBloom; - } - } + // Collate logs into blooms. + h256s alteredBlooms; + { + MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_logs", MP_PALETURQUOISE ); - for ( auto const& h : alteredBlooms ) - noteUsed( h, ExtraBlocksBlooms ); + LogBloom blockBloom = tbi.logBloom(); + blockBloom.shiftBloom< 3 >( sha3( tbi.author().ref() ) ); - // Collate transaction hashes and remember who they were. - // h256s newTransactionAddresses; - { - MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_txns", MP_LAVENDERBLUSH ); + // Pre-memoize everything we need before locking x_blocksBlooms + for ( unsigned level = 0, index = ( unsigned ) tbi.number(); level < c_bloomIndexLevels; + level++, index /= c_bloomIndexSize ) + blocksBlooms( chunkId( level, index / c_bloomIndexSize ) ); - bytes blockBytes; - RLP blockRLP( - *i == _block.info.hash() ? _block.block : &( blockBytes = block( *i ) ) ); - TransactionAddress ta; - ta.blockHash = tbi.hash(); - ta.index = 0; + WriteGuard l( x_blocksBlooms ); + for ( unsigned level = 0, index = ( unsigned ) tbi.number(); level < c_bloomIndexLevels; + level++, index /= c_bloomIndexSize ) { + unsigned i = index / c_bloomIndexSize; + unsigned o = index % c_bloomIndexSize; + alteredBlooms.push_back( chunkId( level, i ) ); + m_blocksBlooms[alteredBlooms.back()].blooms[o] |= blockBloom; + } + } - RLP txns_rlp = blockRLP[1]; + for ( auto const& h : alteredBlooms ) + noteUsed( h, ExtraBlocksBlooms ); - for ( RLP::iterator it = txns_rlp.begin(); it != txns_rlp.end(); ++it ) { - MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for2", MP_HONEYDEW ); + // Collate transaction hashes and remember who they were. + // h256s newTransactionAddresses; + { + MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_txns", MP_LAVENDERBLUSH ); - extrasWriteBatch->insert( - toSlice( sha3( ( *it ).data() ), ExtraTransactionAddress ), - ( db::Slice ) dev::ref( ta.rlp() ) ); - ++ta.index; - } - } + bytes blockBytes; + RLP blockRLP( *i == _block.info.hash() ? _block.block : &( blockBytes = block( *i ) ) ); + TransactionAddress ta; + ta.blockHash = tbi.hash(); + ta.index = 0; - // Update database with them. - // ReadGuard l1( x_blocksBlooms ); - WriteGuard l1( x_blocksBlooms ); - { - MICROPROFILE_SCOPEI( "insertBlockAndExtras", "insert_to_extras", MP_LIGHTSKYBLUE ); + RLP txns_rlp = blockRLP[1]; + + for ( RLP::iterator it = txns_rlp.begin(); it != txns_rlp.end(); ++it ) { + MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for2", MP_HONEYDEW ); - for ( auto const& h : alteredBlooms ) - extrasWriteBatch->insert( toSlice( h, ExtraBlocksBlooms ), - ( db::Slice ) dev::ref( m_blocksBlooms[h].rlp() ) ); - extrasWriteBatch->insert( toSlice( h256( tbi.number() ), ExtraBlockHash ), - ( db::Slice ) dev::ref( BlockHash( tbi.hash() ).rlp() ) ); + extrasWriteBatch->insert( + toSlice( sha3( ( *it ).data() ), ExtraTransactionAddress ), + ( db::Slice ) dev::ref( ta.rlp() ) ); + ++ta.index; } } - // FINALLY! change our best hash. + // Update database with them. + // ReadGuard l1( x_blocksBlooms ); + WriteGuard l1( x_blocksBlooms ); { - newLastBlockHash = _block.info.hash(); - newLastBlockNumber = ( unsigned ) _block.info.number(); - isImportedAndBest = true; + MICROPROFILE_SCOPEI( "insertBlockAndExtras", "insert_to_extras", MP_LIGHTSKYBLUE ); + + for ( auto const& h : alteredBlooms ) + extrasWriteBatch->insert( toSlice( h, ExtraBlocksBlooms ), + ( db::Slice ) dev::ref( m_blocksBlooms[h].rlp() ) ); + extrasWriteBatch->insert( toSlice( h256( tbi.number() ), ExtraBlockHash ), + ( db::Slice ) dev::ref( BlockHash( tbi.hash() ).rlp() ) ); } + } - LOG( m_logger ) << cc::debug( " Imported and best " ) << _totalDifficulty - << cc::debug( " (" ) << cc::warn( "#" ) << cc::num10( _block.info.number() ) - << cc::debug( "). Has " ) - << ( details( _block.info.parentHash() ).children.size() - 1 ) - << cc::debug( " siblings. Route: " ) << route; - } else { - LOG( m_loggerDetail ) << cc::debug( " Imported but not best (oTD: " ) - << details( last ).totalDifficulty << cc::debug( " > TD: " ) - << _totalDifficulty << cc::debug( "; " ) - << cc::num10( details( last ).number ) << cc::debug( ".." ) - << _block.info.number() << cc::debug( ")" ); + // FINALLY! change our best hash. + { + newLastBlockHash = _block.info.hash(); + newLastBlockNumber = ( unsigned ) _block.info.number(); + isImportedAndBest = true; } + LOG( m_logger ) << cc::debug( " Imported and best " ) << _totalDifficulty << cc::debug( " (" ) + << cc::warn( "#" ) << cc::num10( _block.info.number() ) + << cc::debug( "). Has " ) + << ( details( _block.info.parentHash() ).children.size() - 1 ) + << cc::debug( " siblings. Route: " ) << route; + try { MICROPROFILE_SCOPEI( "m_blocksDB", "commit", MP_PLUM ); m_blocksDB->commit( std::move( blocksWriteBatch ) );