Skip to content

Commit

Permalink
Merge pull request #301 from skalenetwork/bug/SKALE-2560-bad-env-mana…
Browse files Browse the repository at this point in the history
…gement-deps-build

SKALE-2560 updated reverence to latest consenus and BLS with improved deps build script
  • Loading branch information
sergiy-skalelabs authored Jul 9, 2020
2 parents 852fc0b + b2f3c4e commit a0fd925
Showing 1 changed file with 86 additions and 99 deletions.
185 changes: 86 additions & 99 deletions libethereum/BlockChain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -743,122 +743,109 @@ ImportRoute BlockChain::insertBlockAndExtras( VerifiedBlockRef const& _block,
// This might be the new best block...
h256 last = currentHash();
// we import every block even if it's difficulty is not enough
if ( _totalDifficulty > 0 /* in ethereum there is details( last ).totalDifficulty but not 0
*/
|| ( m_sealEngine->chainParams().tieBreakingGas &&
_totalDifficulty == details( last ).totalDifficulty &&
_block.info.gasUsed() > info( last ).gasUsed() ) ) {
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "difficulty", MP_HOTPINK );

// don't include bi.hash() in treeRoute, since it's not yet in details DB...
// just tack it on afterwards.
unsigned commonIndex;
tie( route, common, commonIndex ) = treeRoute( last, _block.info.parentHash() );
route.push_back( _block.info.hash() );

// Most of the time these two will be equal - only when we're doing a chain revert
// will they not be
if ( common != last )
DEV_READ_GUARDED( x_lastBlockHash )
clearCachesDuringChainReversion( number( common ) + 1 );

// TODO Understand and remove this trash with "routes"

// Go through ret backwards (i.e. from new head to common) until hash !=
// last.parent and update m_transactionAddresses, m_blockHashes
for ( auto i = route.rbegin(); i != route.rend() && *i != common; ++i ) {
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for", MP_PEACHPUFF1 );

BlockHeader tbi;
if ( *i == _block.info.hash() )
tbi = _block.info;
else
tbi = BlockHeader( block( *i ) );
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "difficulty", MP_HOTPINK );

// don't include bi.hash() in treeRoute, since it's not yet in details DB...
// just tack it on afterwards.
unsigned commonIndex;
tie( route, common, commonIndex ) = treeRoute( last, _block.info.parentHash() );
route.push_back( _block.info.hash() );

// Most of the time these two will be equal - only when we're doing a chain revert
// will they not be
if ( common != last )
DEV_READ_GUARDED( x_lastBlockHash )
clearCachesDuringChainReversion( number( common ) + 1 );

// TODO Understand and remove this trash with "routes"

// Go through ret backwards (i.e. from new head to common) until hash !=
// last.parent and update m_transactionAddresses, m_blockHashes
for ( auto i = route.rbegin(); i != route.rend() && *i != common; ++i ) {
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for", MP_PEACHPUFF1 );

BlockHeader tbi;
if ( *i == _block.info.hash() )
tbi = _block.info;
else
tbi = BlockHeader( block( *i ) );

// Collate logs into blooms.
h256s alteredBlooms;
{
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_logs", MP_PALETURQUOISE );

LogBloom blockBloom = tbi.logBloom();
blockBloom.shiftBloom< 3 >( sha3( tbi.author().ref() ) );

// Pre-memoize everything we need before locking x_blocksBlooms
for ( unsigned level = 0, index = ( unsigned ) tbi.number();
level < c_bloomIndexLevels; level++, index /= c_bloomIndexSize )
blocksBlooms( chunkId( level, index / c_bloomIndexSize ) );

WriteGuard l( x_blocksBlooms );
for ( unsigned level = 0, index = ( unsigned ) tbi.number();
level < c_bloomIndexLevels; level++, index /= c_bloomIndexSize ) {
unsigned i = index / c_bloomIndexSize;
unsigned o = index % c_bloomIndexSize;
alteredBlooms.push_back( chunkId( level, i ) );
m_blocksBlooms[alteredBlooms.back()].blooms[o] |= blockBloom;
}
}
// Collate logs into blooms.
h256s alteredBlooms;
{
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_logs", MP_PALETURQUOISE );

for ( auto const& h : alteredBlooms )
noteUsed( h, ExtraBlocksBlooms );
LogBloom blockBloom = tbi.logBloom();
blockBloom.shiftBloom< 3 >( sha3( tbi.author().ref() ) );

// Collate transaction hashes and remember who they were.
// h256s newTransactionAddresses;
{
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_txns", MP_LAVENDERBLUSH );
// Pre-memoize everything we need before locking x_blocksBlooms
for ( unsigned level = 0, index = ( unsigned ) tbi.number(); level < c_bloomIndexLevels;
level++, index /= c_bloomIndexSize )
blocksBlooms( chunkId( level, index / c_bloomIndexSize ) );

bytes blockBytes;
RLP blockRLP(
*i == _block.info.hash() ? _block.block : &( blockBytes = block( *i ) ) );
TransactionAddress ta;
ta.blockHash = tbi.hash();
ta.index = 0;
WriteGuard l( x_blocksBlooms );
for ( unsigned level = 0, index = ( unsigned ) tbi.number(); level < c_bloomIndexLevels;
level++, index /= c_bloomIndexSize ) {
unsigned i = index / c_bloomIndexSize;
unsigned o = index % c_bloomIndexSize;
alteredBlooms.push_back( chunkId( level, i ) );
m_blocksBlooms[alteredBlooms.back()].blooms[o] |= blockBloom;
}
}

RLP txns_rlp = blockRLP[1];
for ( auto const& h : alteredBlooms )
noteUsed( h, ExtraBlocksBlooms );

for ( RLP::iterator it = txns_rlp.begin(); it != txns_rlp.end(); ++it ) {
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for2", MP_HONEYDEW );
// Collate transaction hashes and remember who they were.
// h256s newTransactionAddresses;
{
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "collate_txns", MP_LAVENDERBLUSH );

extrasWriteBatch->insert(
toSlice( sha3( ( *it ).data() ), ExtraTransactionAddress ),
( db::Slice ) dev::ref( ta.rlp() ) );
++ta.index;
}
}
bytes blockBytes;
RLP blockRLP( *i == _block.info.hash() ? _block.block : &( blockBytes = block( *i ) ) );
TransactionAddress ta;
ta.blockHash = tbi.hash();
ta.index = 0;

// Update database with them.
// ReadGuard l1( x_blocksBlooms );
WriteGuard l1( x_blocksBlooms );
{
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "insert_to_extras", MP_LIGHTSKYBLUE );
RLP txns_rlp = blockRLP[1];

for ( RLP::iterator it = txns_rlp.begin(); it != txns_rlp.end(); ++it ) {
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "for2", MP_HONEYDEW );

for ( auto const& h : alteredBlooms )
extrasWriteBatch->insert( toSlice( h, ExtraBlocksBlooms ),
( db::Slice ) dev::ref( m_blocksBlooms[h].rlp() ) );
extrasWriteBatch->insert( toSlice( h256( tbi.number() ), ExtraBlockHash ),
( db::Slice ) dev::ref( BlockHash( tbi.hash() ).rlp() ) );
extrasWriteBatch->insert(
toSlice( sha3( ( *it ).data() ), ExtraTransactionAddress ),
( db::Slice ) dev::ref( ta.rlp() ) );
++ta.index;
}
}

// FINALLY! change our best hash.
// Update database with them.
// ReadGuard l1( x_blocksBlooms );
WriteGuard l1( x_blocksBlooms );
{
newLastBlockHash = _block.info.hash();
newLastBlockNumber = ( unsigned ) _block.info.number();
isImportedAndBest = true;
MICROPROFILE_SCOPEI( "insertBlockAndExtras", "insert_to_extras", MP_LIGHTSKYBLUE );

for ( auto const& h : alteredBlooms )
extrasWriteBatch->insert( toSlice( h, ExtraBlocksBlooms ),
( db::Slice ) dev::ref( m_blocksBlooms[h].rlp() ) );
extrasWriteBatch->insert( toSlice( h256( tbi.number() ), ExtraBlockHash ),
( db::Slice ) dev::ref( BlockHash( tbi.hash() ).rlp() ) );
}
}

LOG( m_logger ) << cc::debug( " Imported and best " ) << _totalDifficulty
<< cc::debug( " (" ) << cc::warn( "#" ) << cc::num10( _block.info.number() )
<< cc::debug( "). Has " )
<< ( details( _block.info.parentHash() ).children.size() - 1 )
<< cc::debug( " siblings. Route: " ) << route;
} else {
LOG( m_loggerDetail ) << cc::debug( " Imported but not best (oTD: " )
<< details( last ).totalDifficulty << cc::debug( " > TD: " )
<< _totalDifficulty << cc::debug( "; " )
<< cc::num10( details( last ).number ) << cc::debug( ".." )
<< _block.info.number() << cc::debug( ")" );
// FINALLY! change our best hash.
{
newLastBlockHash = _block.info.hash();
newLastBlockNumber = ( unsigned ) _block.info.number();
isImportedAndBest = true;
}

LOG( m_logger ) << cc::debug( " Imported and best " ) << _totalDifficulty << cc::debug( " (" )
<< cc::warn( "#" ) << cc::num10( _block.info.number() )
<< cc::debug( "). Has " )
<< ( details( _block.info.parentHash() ).children.size() - 1 )
<< cc::debug( " siblings. Route: " ) << route;

try {
MICROPROFILE_SCOPEI( "m_blocksDB", "commit", MP_PLUM );
m_blocksDB->commit( std::move( blocksWriteBatch ) );
Expand Down

0 comments on commit a0fd925

Please sign in to comment.