use crate::{
bootloader_debug::{BootloaderDebug, BootloaderDebugTracer},
console_log::ConsoleLogHandler,
deps::InMemoryStorage,
filters::{EthFilters, FilterType, LogFilter},
fork::{ForkDetails, ForkSource, ForkStorage},
formatter,
system_contracts::{self, Options, SystemContracts},
utils::{
self, adjust_l1_gas_price_for_tx, bytecode_to_factory_dep, create_debug_output,
not_implemented, to_human_size, IntoBoxedFuture,
},
};
use clap::Parser;
use colored::Colorize;
use core::fmt::Display;
use futures::FutureExt;
use indexmap::IndexMap;
use itertools::Itertools;
use jsonrpc_core::BoxFuture;
use once_cell::sync::OnceCell;
use std::{
cmp::{self},
collections::{HashMap, HashSet},
str::FromStr,
sync::{Arc, RwLock},
};
use vm::{
constants::{
BLOCK_GAS_LIMIT, BLOCK_OVERHEAD_PUBDATA, ETH_CALL_GAS_LIMIT, MAX_PUBDATA_PER_BLOCK,
},
utils::{
fee::derive_base_fee_and_gas_per_pubdata,
l2_blocks::load_last_l2_block,
overhead::{derive_overhead, OverheadCoeficients},
},
CallTracer, ExecutionResult, HistoryDisabled, L1BatchEnv, SystemEnv, TxExecutionMode, Vm,
VmExecutionResultAndLogs, VmTracer,
};
use zksync_basic_types::{
web3::{self, signing::keccak256},
AccountTreeId, Address, Bytes, L1BatchNumber, MiniblockNumber, H160, H256, U256, U64,
};
use zksync_contracts::BaseSystemContracts;
use zksync_core::api_server::web3::backend_jsonrpc::{
error::into_jsrpc_error, namespaces::eth::EthNamespaceT,
};
use zksync_state::{ReadStorage, StoragePtr, StorageView, WriteStorage};
use zksync_types::vm_trace::Call;
use zksync_types::{
api::{Block, DebugCall, Log, TransactionReceipt, TransactionVariant},
block::legacy_miniblock_hash,
fee::Fee,
get_code_key, get_nonce_key,
l2::L2Tx,
transaction_request::TransactionRequest,
utils::{
decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance,
storage_key_for_standard_token_balance,
},
PackedEthSignature, StorageKey, StorageLogQueryType, StorageValue, Transaction,
ACCOUNT_CODE_STORAGE_ADDRESS, EIP_712_TX_TYPE, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE,
MAX_L2_TX_GAS_LIMIT,
};
use zksync_utils::{
bytecode::{compress_bytecode, hash_bytecode},
h256_to_account_address, h256_to_u256, h256_to_u64, u256_to_h256,
};
use zksync_web3_decl::{
error::Web3Error,
types::{FeeHistory, Filter, FilterChanges},
};
pub const MAX_TX_SIZE: usize = 1_000_000;
pub const NON_FORK_FIRST_BLOCK_TIMESTAMP: u64 = 1_000;
pub const TEST_NODE_NETWORK_ID: u16 = 260;
pub const L1_GAS_PRICE: u64 = 50_000_000_000;
pub const L2_GAS_PRICE: u64 = 250_000_000;
pub const ESTIMATE_GAS_L1_GAS_PRICE_SCALE_FACTOR: f64 = 1.2;
pub const ESTIMATE_GAS_PUBLISH_BYTE_OVERHEAD: u32 = 100;
pub const ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION: u32 = 1_000;
pub const ESTIMATE_GAS_SCALE_FACTOR: f32 = 1.3;
pub const MAX_PREVIOUS_STATES: u16 = 128;
pub const PROTOCOL_VERSION: &str = "zks/1";
pub fn compute_hash(block_number: u64, tx_hash: H256) -> H256 {
let digest = [&block_number.to_be_bytes()[..], tx_hash.as_bytes()].concat();
H256(keccak256(&digest))
}
pub fn create_empty_block<TX>(block_number: u64, timestamp: u64, batch: u32) -> Block<TX> {
let hash = compute_hash(block_number, H256::zero());
Block {
hash,
number: U64::from(block_number),
timestamp: U256::from(timestamp),
l1_batch_number: Some(U64::from(batch)),
transactions: vec![],
gas_used: U256::from(0),
gas_limit: U256::from(BLOCK_GAS_LIMIT),
..Default::default()
}
}
#[derive(Debug, Clone)]
pub struct TxExecutionInfo {
pub tx: L2Tx,
pub batch_number: u32,
pub miniblock_number: u64,
pub result: VmExecutionResultAndLogs,
}
#[derive(Debug, clap::Parser, Clone, clap::ValueEnum, PartialEq, Eq)]
pub enum ShowCalls {
None,
User,
System,
All,
}
impl FromStr for ShowCalls {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_ref() {
"none" => Ok(ShowCalls::None),
"user" => Ok(ShowCalls::User),
"system" => Ok(ShowCalls::System),
"all" => Ok(ShowCalls::All),
_ => Err(format!(
"Unknown ShowCalls value {} - expected one of none|user|system|all.",
s
)),
}
}
}
impl Display for ShowCalls {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{:?}", self)
}
}
#[derive(Debug, Parser, Clone, clap::ValueEnum, PartialEq, Eq)]
pub enum ShowStorageLogs {
None,
Read,
Write,
All,
}
impl FromStr for ShowStorageLogs {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_ref() {
"none" => Ok(ShowStorageLogs::None),
"read" => Ok(ShowStorageLogs::Read),
"write" => Ok(ShowStorageLogs::Write),
"all" => Ok(ShowStorageLogs::All),
_ => Err(format!(
"Unknown ShowStorageLogs value {} - expected one of none|read|write|all.",
s
)),
}
}
}
impl Display for ShowStorageLogs {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{:?}", self)
}
}
#[derive(Debug, Parser, Clone, clap::ValueEnum, PartialEq, Eq)]
pub enum ShowVMDetails {
None,
All,
}
impl FromStr for ShowVMDetails {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_ref() {
"none" => Ok(ShowVMDetails::None),
"all" => Ok(ShowVMDetails::All),
_ => Err(format!(
"Unknown ShowVMDetails value {} - expected one of none|all.",
s
)),
}
}
}
impl Display for ShowVMDetails {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{:?}", self)
}
}
#[derive(Debug, Parser, Clone, clap::ValueEnum, PartialEq, Eq)]
pub enum ShowGasDetails {
None,
All,
}
impl FromStr for ShowGasDetails {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_ref() {
"none" => Ok(ShowGasDetails::None),
"all" => Ok(ShowGasDetails::All),
_ => Err(format!(
"Unknown ShowGasDetails value {} - expected one of none|all.",
s
)),
}
}
}
impl Display for ShowGasDetails {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{:?}", self)
}
}
#[derive(Debug, Clone)]
pub struct TransactionResult {
pub info: TxExecutionInfo,
pub receipt: TransactionReceipt,
pub debug: DebugCall,
}
impl TransactionResult {
pub fn debug_info(&self, only_top: bool) -> DebugCall {
let calls = if only_top {
vec![]
} else {
self.debug.calls.clone()
};
DebugCall {
calls,
..self.debug.clone()
}
}
}
#[derive(Clone)]
pub struct InMemoryNodeInner<S> {
pub current_timestamp: u64,
pub current_batch: u32,
pub current_miniblock: u64,
pub current_miniblock_hash: H256,
pub l1_gas_price: u64,
pub tx_results: HashMap<H256, TransactionResult>,
pub blocks: HashMap<H256, Block<TransactionVariant>>,
pub block_hashes: HashMap<u64, H256>,
pub filters: EthFilters,
pub fork_storage: ForkStorage<S>,
pub show_calls: ShowCalls,
pub show_storage_logs: ShowStorageLogs,
pub show_vm_details: ShowVMDetails,
pub show_gas_details: ShowGasDetails,
pub resolve_hashes: bool,
pub console_log_handler: ConsoleLogHandler,
pub system_contracts: SystemContracts,
pub impersonated_accounts: HashSet<Address>,
pub rich_accounts: HashSet<H160>,
pub previous_states: IndexMap<H256, HashMap<StorageKey, StorageValue>>,
}
type L2TxResult = (
HashMap<StorageKey, H256>,
VmExecutionResultAndLogs,
Vec<Call>,
Block<TransactionVariant>,
HashMap<U256, Vec<U256>>,
BlockContext,
);
impl<S: std::fmt::Debug + ForkSource> InMemoryNodeInner<S> {
pub fn create_l1_batch_env<ST: ReadStorage>(
&self,
storage: StoragePtr<ST>,
) -> (L1BatchEnv, BlockContext) {
let last_l2_block_hash = if let Some(last_l2_block) = load_last_l2_block(storage) {
last_l2_block.hash
} else {
legacy_miniblock_hash(MiniblockNumber(self.current_miniblock as u32))
};
let block_ctx = BlockContext::from_current(
self.current_batch,
self.current_miniblock,
self.current_timestamp,
);
let block_ctx = block_ctx.new_batch();
let batch_env = L1BatchEnv {
previous_batch_hash: None,
number: L1BatchNumber::from(block_ctx.batch),
timestamp: block_ctx.timestamp,
l1_gas_price: self.l1_gas_price,
fair_l2_gas_price: L2_GAS_PRICE,
fee_account: H160::zero(),
enforced_base_fee: None,
first_l2_block: vm::L2BlockEnv {
number: block_ctx.miniblock as u32,
timestamp: block_ctx.timestamp,
prev_block_hash: last_l2_block_hash,
max_virtual_blocks_to_create: 1,
},
};
(batch_env, block_ctx)
}
pub fn create_system_env(
&self,
base_system_contracts: BaseSystemContracts,
execution_mode: TxExecutionMode,
) -> SystemEnv {
SystemEnv {
zk_porter_available: false,
version: zksync_types::ProtocolVersionId::latest(),
base_system_smart_contracts: base_system_contracts,
gas_limit: BLOCK_GAS_LIMIT,
execution_mode,
default_validation_computational_gas_limit: BLOCK_GAS_LIMIT,
chain_id: self.fork_storage.chain_id,
}
}
pub fn estimate_gas_impl(
&self,
req: zksync_types::transaction_request::CallRequest,
) -> jsonrpc_core::Result<Fee> {
let mut l2_tx = match L2Tx::from_request(req.into(), MAX_TX_SIZE) {
Ok(tx) => tx,
Err(e) => {
let error = Web3Error::SerializationError(e);
return Err(into_jsrpc_error(error));
}
};
let tx: Transaction = l2_tx.clone().into();
let fair_l2_gas_price = L2_GAS_PRICE;
let l1_gas_price = {
let current_l1_gas_price =
((self.l1_gas_price as f64) * ESTIMATE_GAS_L1_GAS_PRICE_SCALE_FACTOR) as u64;
adjust_l1_gas_price_for_tx(
current_l1_gas_price,
L2_GAS_PRICE,
tx.gas_per_pubdata_byte_limit(),
)
};
let (base_fee, gas_per_pubdata_byte) =
derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price);
if l2_tx.common_data.signature.is_empty() {
l2_tx.common_data.signature = vec![0u8; 65];
l2_tx.common_data.signature[64] = 27;
}
l2_tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into();
l2_tx.common_data.fee.max_fee_per_gas = base_fee.into();
l2_tx.common_data.fee.max_priority_fee_per_gas = base_fee.into();
let mut storage_view = StorageView::new(&self.fork_storage);
let pubdata_for_factory_deps = l2_tx
.execute
.factory_deps
.as_deref()
.unwrap_or_default()
.iter()
.map(|bytecode| {
if storage_view.is_bytecode_known(&hash_bytecode(bytecode)) {
return 0;
}
let length = if let Ok(compressed) = compress_bytecode(bytecode) {
compressed.len()
} else {
bytecode.len()
};
length as u32 + ESTIMATE_GAS_PUBLISH_BYTE_OVERHEAD
})
.sum::<u32>();
if pubdata_for_factory_deps > MAX_PUBDATA_PER_BLOCK {
return Err(into_jsrpc_error(Web3Error::SubmitTransactionError(
"exceeds limit for published pubdata".into(),
Default::default(),
)));
}
let gas_for_bytecodes_pubdata: u32 =
pubdata_for_factory_deps * (gas_per_pubdata_byte as u32);
let storage = storage_view.to_rc_ptr();
let execution_mode = TxExecutionMode::EstimateFee;
let (mut batch_env, _) = self.create_l1_batch_env(storage.clone());
batch_env.l1_gas_price = l1_gas_price;
let system_env = self.create_system_env(
self.system_contracts.contracts_for_fee_estimate().clone(),
execution_mode,
);
let mut lower_bound = 0;
let mut upper_bound = MAX_L2_TX_GAS_LIMIT as u32;
let mut attempt_count = 1;
log::trace!("Starting gas estimation loop");
while lower_bound + ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION < upper_bound {
let mid = (lower_bound + upper_bound) / 2;
log::trace!(
"Attempt {} (lower_bound: {}, upper_bound: {}, mid: {})",
attempt_count,
lower_bound,
upper_bound,
mid
);
let try_gas_limit = gas_for_bytecodes_pubdata + mid;
let estimate_gas_result = InMemoryNodeInner::estimate_gas_step(
l2_tx.clone(),
gas_per_pubdata_byte,
try_gas_limit,
l1_gas_price,
batch_env.clone(),
system_env.clone(),
&self.fork_storage,
);
if estimate_gas_result.result.is_failed() {
log::trace!("Attempt {} FAILED", attempt_count);
lower_bound = mid + 1;
} else {
log::trace!("Attempt {} SUCCEEDED", attempt_count);
upper_bound = mid;
}
attempt_count += 1;
}
log::trace!("Gas Estimation Values:");
log::trace!(" Final upper_bound: {}", upper_bound);
log::trace!(" ESTIMATE_GAS_SCALE_FACTOR: {}", ESTIMATE_GAS_SCALE_FACTOR);
log::trace!(" MAX_L2_TX_GAS_LIMIT: {}", MAX_L2_TX_GAS_LIMIT);
let tx_body_gas_limit = cmp::min(
MAX_L2_TX_GAS_LIMIT as u32,
(upper_bound as f32 * ESTIMATE_GAS_SCALE_FACTOR) as u32,
);
let suggested_gas_limit = tx_body_gas_limit + gas_for_bytecodes_pubdata;
let estimate_gas_result = InMemoryNodeInner::estimate_gas_step(
l2_tx.clone(),
gas_per_pubdata_byte,
suggested_gas_limit,
l1_gas_price,
batch_env,
system_env,
&self.fork_storage,
);
let coefficients = OverheadCoeficients::from_tx_type(EIP_712_TX_TYPE);
let overhead: u32 = derive_overhead(
suggested_gas_limit,
gas_per_pubdata_byte as u32,
tx.encoding_len(),
coefficients,
);
match estimate_gas_result.result {
ExecutionResult::Revert { output } => {
log::info!("{}", format!("Unable to estimate gas for the request with our suggested gas limit of {}. The transaction is most likely unexecutable. Breakdown of estimation:", suggested_gas_limit + overhead).red());
log::info!(
"{}",
format!(
"\tEstimated transaction body gas cost: {}",
tx_body_gas_limit
)
.red()
);
log::info!(
"{}",
format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red()
);
log::info!("{}", format!("\tOverhead: {}", overhead).red());
let message = output.to_string();
let pretty_message = format!(
"execution reverted{}{}",
if message.is_empty() { "" } else { ": " },
message
);
let data = output.encoded_data();
log::info!("{}", pretty_message.on_red());
Err(into_jsrpc_error(Web3Error::SubmitTransactionError(
pretty_message,
data,
)))
}
ExecutionResult::Halt { reason } => {
log::info!("{}", format!("Unable to estimate gas for the request with our suggested gas limit of {}. The transaction is most likely unexecutable. Breakdown of estimation:", suggested_gas_limit + overhead).red());
log::info!(
"{}",
format!(
"\tEstimated transaction body gas cost: {}",
tx_body_gas_limit
)
.red()
);
log::info!(
"{}",
format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red()
);
log::info!("{}", format!("\tOverhead: {}", overhead).red());
let message = reason.to_string();
let pretty_message = format!(
"execution reverted{}{}",
if message.is_empty() { "" } else { ": " },
message
);
log::info!("{}", pretty_message.on_red());
Err(into_jsrpc_error(Web3Error::SubmitTransactionError(
pretty_message,
vec![],
)))
}
ExecutionResult::Success { .. } => {
let full_gas_limit = match tx_body_gas_limit
.overflowing_add(gas_for_bytecodes_pubdata + overhead)
{
(value, false) => value,
(_, true) => {
log::info!("{}", "Overflow when calculating gas estimation. We've exceeded the block gas limit by summing the following values:".red());
log::info!(
"{}",
format!(
"\tEstimated transaction body gas cost: {}",
tx_body_gas_limit
)
.red()
);
log::info!(
"{}",
format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red()
);
log::info!("{}", format!("\tOverhead: {}", overhead).red());
return Err(into_jsrpc_error(Web3Error::SubmitTransactionError(
"exceeds block gas limit".into(),
Default::default(),
)));
}
};
log::trace!("Gas Estimation Results");
log::trace!(" tx_body_gas_limit: {}", tx_body_gas_limit);
log::trace!(" gas_for_bytecodes_pubdata: {}", gas_for_bytecodes_pubdata);
log::trace!(" overhead: {}", overhead);
log::trace!(" full_gas_limit: {}", full_gas_limit);
let fee = Fee {
max_fee_per_gas: base_fee.into(),
max_priority_fee_per_gas: 0u32.into(),
gas_limit: full_gas_limit.into(),
gas_per_pubdata_limit: gas_per_pubdata_byte.into(),
};
Ok(fee)
}
}
}
#[allow(clippy::too_many_arguments)]
fn estimate_gas_step(
mut l2_tx: L2Tx,
gas_per_pubdata_byte: u64,
tx_gas_limit: u32,
l1_gas_price: u64,
mut batch_env: L1BatchEnv,
system_env: SystemEnv,
fork_storage: &ForkStorage<S>,
) -> VmExecutionResultAndLogs {
let tx: Transaction = l2_tx.clone().into();
let l1_gas_price =
adjust_l1_gas_price_for_tx(l1_gas_price, L2_GAS_PRICE, tx.gas_per_pubdata_byte_limit());
let coefficients = OverheadCoeficients::from_tx_type(EIP_712_TX_TYPE);
let gas_limit_with_overhead = tx_gas_limit
+ derive_overhead(
tx_gas_limit,
gas_per_pubdata_byte as u32,
tx.encoding_len(),
coefficients,
);
l2_tx.common_data.fee.gas_limit = gas_limit_with_overhead.into();
let storage = StorageView::new(fork_storage).to_rc_ptr();
let nonce = l2_tx.nonce();
let nonce_key = get_nonce_key(&l2_tx.initiator_account());
let full_nonce = storage.borrow_mut().read_value(&nonce_key);
let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce));
let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce);
storage
.borrow_mut()
.set_value(nonce_key, u256_to_h256(enforced_full_nonce));
let payer = l2_tx.payer();
let balance_key = storage_key_for_eth_balance(&payer);
let mut current_balance = h256_to_u256(storage.borrow_mut().read_value(&balance_key));
let added_balance = l2_tx.common_data.fee.gas_limit * l2_tx.common_data.fee.max_fee_per_gas;
current_balance += added_balance;
storage
.borrow_mut()
.set_value(balance_key, u256_to_h256(current_balance));
batch_env.l1_gas_price = l1_gas_price;
let mut vm = Vm::new(batch_env, system_env, storage, HistoryDisabled);
let tx: Transaction = l2_tx.into();
vm.push_transaction(tx);
vm.execute(vm::VmExecutionMode::OneTx)
}
pub fn set_impersonated_account(&mut self, address: Address) -> bool {
self.impersonated_accounts.insert(address)
}
pub fn stop_impersonating_account(&mut self, address: Address) -> bool {
self.impersonated_accounts.remove(&address)
}
pub fn archive_state(&mut self) -> Result<(), String> {
if self.previous_states.len() > MAX_PREVIOUS_STATES as usize {
if let Some(entry) = self.previous_states.shift_remove_index(0) {
log::debug!("removing archived state for previous block {:#x}", entry.0);
}
}
log::debug!(
"archiving state for {:#x} #{}",
self.current_miniblock_hash,
self.current_miniblock
);
self.previous_states.insert(
self.current_miniblock_hash,
self.fork_storage
.inner
.read()
.map_err(|err| err.to_string())?
.raw_storage
.state
.clone(),
);
Ok(())
}
pub fn snapshot(&self) -> Result<Snapshot, String> {
let storage = self
.fork_storage
.inner
.read()
.map_err(|err| format!("failed acquiring read lock on storage: {:?}", err))?;
Ok(Snapshot {
current_timestamp: self.current_timestamp,
current_batch: self.current_batch,
current_miniblock: self.current_miniblock,
current_miniblock_hash: self.current_miniblock_hash,
l1_gas_price: self.l1_gas_price,
tx_results: self.tx_results.clone(),
blocks: self.blocks.clone(),
block_hashes: self.block_hashes.clone(),
filters: self.filters.clone(),
impersonated_accounts: self.impersonated_accounts.clone(),
rich_accounts: self.rich_accounts.clone(),
previous_states: self.previous_states.clone(),
raw_storage: storage.raw_storage.clone(),
value_read_cache: storage.value_read_cache.clone(),
factory_dep_cache: storage.factory_dep_cache.clone(),
})
}
pub fn restore_snapshot(&mut self, snapshot: Snapshot) -> Result<(), String> {
let mut storage = self
.fork_storage
.inner
.write()
.map_err(|err| format!("failed acquiring write lock on storage: {:?}", err))?;
self.current_timestamp = snapshot.current_timestamp;
self.current_batch = snapshot.current_batch;
self.current_miniblock = snapshot.current_miniblock;
self.current_miniblock_hash = snapshot.current_miniblock_hash;
self.l1_gas_price = snapshot.l1_gas_price;
self.tx_results = snapshot.tx_results;
self.blocks = snapshot.blocks;
self.block_hashes = snapshot.block_hashes;
self.filters = snapshot.filters;
self.impersonated_accounts = snapshot.impersonated_accounts;
self.rich_accounts = snapshot.rich_accounts;
self.previous_states = snapshot.previous_states;
storage.raw_storage = snapshot.raw_storage;
storage.value_read_cache = snapshot.value_read_cache;
storage.factory_dep_cache = snapshot.factory_dep_cache;
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct Snapshot {
pub(crate) current_timestamp: u64,
pub(crate) current_batch: u32,
pub(crate) current_miniblock: u64,
pub(crate) current_miniblock_hash: H256,
pub(crate) l1_gas_price: u64,
pub(crate) tx_results: HashMap<H256, TransactionResult>,
pub(crate) blocks: HashMap<H256, Block<TransactionVariant>>,
pub(crate) block_hashes: HashMap<u64, H256>,
pub(crate) filters: EthFilters,
pub(crate) impersonated_accounts: HashSet<Address>,
pub(crate) rich_accounts: HashSet<H160>,
pub(crate) previous_states: IndexMap<H256, HashMap<StorageKey, StorageValue>>,
pub(crate) raw_storage: InMemoryStorage,
pub(crate) value_read_cache: HashMap<StorageKey, H256>,
pub(crate) factory_dep_cache: HashMap<H256, Option<Vec<u8>>>,
}
pub struct InMemoryNode<S> {
inner: Arc<RwLock<InMemoryNodeInner<S>>>,
}
fn contract_address_from_tx_result(execution_result: &VmExecutionResultAndLogs) -> Option<H160> {
for query in execution_result.logs.storage_logs.iter().rev() {
if query.log_type == StorageLogQueryType::InitialWrite
&& query.log_query.address == ACCOUNT_CODE_STORAGE_ADDRESS
{
return Some(h256_to_account_address(&u256_to_h256(query.log_query.key)));
}
}
None
}
impl<S: ForkSource + std::fmt::Debug> Default for InMemoryNode<S> {
fn default() -> Self {
InMemoryNode::new(
None,
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
)
}
}
impl<S: ForkSource + std::fmt::Debug> InMemoryNode<S> {
pub fn new(
fork: Option<ForkDetails<S>>,
show_calls: ShowCalls,
show_storage_logs: ShowStorageLogs,
show_vm_details: ShowVMDetails,
show_gas_details: ShowGasDetails,
resolve_hashes: bool,
system_contracts_options: &system_contracts::Options,
) -> Self {
let inner = if let Some(f) = &fork {
let mut block_hashes = HashMap::<u64, H256>::new();
block_hashes.insert(f.l2_block.number.as_u64(), f.l2_block.hash);
let mut blocks = HashMap::<H256, Block<TransactionVariant>>::new();
blocks.insert(f.l2_block.hash, f.l2_block.clone());
InMemoryNodeInner {
current_timestamp: f.block_timestamp,
current_batch: f.l1_block.0,
current_miniblock: f.l2_miniblock,
current_miniblock_hash: f.l2_miniblock_hash,
l1_gas_price: f.l1_gas_price,
tx_results: Default::default(),
blocks,
block_hashes,
filters: Default::default(),
fork_storage: ForkStorage::new(fork, system_contracts_options),
show_calls,
show_storage_logs,
show_vm_details,
show_gas_details,
resolve_hashes,
console_log_handler: ConsoleLogHandler::default(),
system_contracts: SystemContracts::from_options(system_contracts_options),
impersonated_accounts: Default::default(),
rich_accounts: HashSet::new(),
previous_states: Default::default(),
}
} else {
let mut block_hashes = HashMap::<u64, H256>::new();
block_hashes.insert(0, H256::zero());
let mut blocks = HashMap::<H256, Block<TransactionVariant>>::new();
blocks.insert(
H256::zero(),
create_empty_block(0, NON_FORK_FIRST_BLOCK_TIMESTAMP, 0),
);
InMemoryNodeInner {
current_timestamp: NON_FORK_FIRST_BLOCK_TIMESTAMP,
current_batch: 0,
current_miniblock: 0,
current_miniblock_hash: H256::zero(),
l1_gas_price: L1_GAS_PRICE,
tx_results: Default::default(),
blocks,
block_hashes,
filters: Default::default(),
fork_storage: ForkStorage::new(fork, system_contracts_options),
show_calls,
show_storage_logs,
show_vm_details,
show_gas_details,
resolve_hashes,
console_log_handler: ConsoleLogHandler::default(),
system_contracts: SystemContracts::from_options(system_contracts_options),
impersonated_accounts: Default::default(),
rich_accounts: HashSet::new(),
previous_states: Default::default(),
}
};
InMemoryNode {
inner: Arc::new(RwLock::new(inner)),
}
}
pub fn get_inner(&self) -> Arc<RwLock<InMemoryNodeInner<S>>> {
self.inner.clone()
}
pub fn apply_txs(&self, txs: Vec<L2Tx>) -> Result<(), String> {
log::info!("Running {:?} transactions (one per batch)", txs.len());
for tx in txs {
self.run_l2_tx(tx, TxExecutionMode::VerifyExecute)?;
}
Ok(())
}
pub fn set_rich_account(&self, address: H160) {
let key = storage_key_for_eth_balance(&address);
let mut inner = match self.inner.write() {
Ok(guard) => guard,
Err(e) => {
log::info!("Failed to acquire write lock: {}", e);
return;
}
};
let keys = {
let mut storage_view = StorageView::new(&inner.fork_storage);
storage_view.set_value(key, u256_to_h256(U256::from(10u128.pow(30))));
storage_view.modified_storage_keys().clone()
};
for (key, value) in keys.iter() {
inner.fork_storage.set_value(*key, *value);
}
inner.rich_accounts.insert(address);
}
fn run_l2_call(&self, mut l2_tx: L2Tx) -> Result<ExecutionResult, String> {
let execution_mode = TxExecutionMode::EthCall;
let inner = self
.inner
.write()
.map_err(|e| format!("Failed to acquire write lock: {}", e))?;
let storage = StorageView::new(&inner.fork_storage).to_rc_ptr();
let bootloader_code = inner.system_contracts.contracts_for_l2_call();
let (batch_env, _) = inner.create_l1_batch_env(storage.clone());
let system_env = inner.create_system_env(bootloader_code.clone(), execution_mode);
let mut vm = Vm::new(batch_env, system_env, storage, HistoryDisabled);
if l2_tx.common_data.signature.is_empty() {
l2_tx.common_data.signature = PackedEthSignature::default().serialize_packed().into();
}
let tx: Transaction = l2_tx.into();
vm.push_transaction(tx);
let call_tracer_result = Arc::new(OnceCell::default());
let custom_tracers =
vec![
Box::new(CallTracer::new(call_tracer_result.clone(), HistoryDisabled))
as Box<dyn VmTracer<StorageView<&ForkStorage<S>>, HistoryDisabled>>,
];
let tx_result = vm.inspect(custom_tracers, vm::VmExecutionMode::OneTx);
let call_traces = Arc::try_unwrap(call_tracer_result)
.unwrap()
.take()
.unwrap_or_default();
match &tx_result.result {
ExecutionResult::Success { output } => {
log::info!("Call: {}", "SUCCESS".green());
let output_bytes = zksync_basic_types::Bytes::from(output.clone());
log::info!("Output: {}", serde_json::to_string(&output_bytes).unwrap());
}
ExecutionResult::Revert { output } => {
log::info!("Call: {}: {}", "FAILED".red(), output);
}
ExecutionResult::Halt { reason } => log::info!("Call: {} {}", "HALTED".red(), reason),
};
log::info!("=== Console Logs: ");
for call in &call_traces {
inner.console_log_handler.handle_call_recursive(call);
}
log::info!("=== Call traces:");
for call in &call_traces {
formatter::print_call(call, 0, &inner.show_calls, inner.resolve_hashes);
}
Ok(tx_result.result)
}
fn display_detailed_gas_info(
&self,
bootloader_debug_result: Option<&eyre::Result<BootloaderDebug, String>>,
spent_on_pubdata: u32,
) -> eyre::Result<(), String> {
if let Some(bootloader_result) = bootloader_debug_result {
let debug = bootloader_result.clone()?;
log::info!("┌─────────────────────────┐");
log::info!("│ GAS DETAILS │");
log::info!("└─────────────────────────┘");
let total_gas_limit = debug
.total_gas_limit_from_user
.saturating_sub(debug.reserved_gas);
let intrinsic_gas = total_gas_limit - debug.gas_limit_after_intrinsic;
let gas_for_validation = debug.gas_limit_after_intrinsic - debug.gas_after_validation;
let gas_spent_on_compute =
debug.gas_spent_on_execution - debug.gas_spent_on_bytecode_preparation;
let gas_used = intrinsic_gas
+ gas_for_validation
+ debug.gas_spent_on_bytecode_preparation
+ gas_spent_on_compute;
log::info!(
"Gas - Limit: {} | Used: {} | Refunded: {}",
to_human_size(total_gas_limit),
to_human_size(gas_used),
to_human_size(debug.refund_by_operator)
);
if debug.total_gas_limit_from_user != total_gas_limit {
log::info!(
"{}",
format!(
" WARNING: user actually provided more gas {}, but system had a lower max limit.",
to_human_size(debug.total_gas_limit_from_user)
)
.yellow()
);
}
if debug.refund_computed != debug.refund_by_operator {
log::info!(
"{}",
format!(
" WARNING: Refund by VM: {}, but operator refunded more: {}",
to_human_size(debug.refund_computed),
to_human_size(debug.refund_by_operator)
)
.yellow()
);
}
if debug.refund_computed + gas_used != total_gas_limit {
log::info!(
"{}",
format!(
" WARNING: Gas totals don't match. {} != {} , delta: {}",
to_human_size(debug.refund_computed + gas_used),
to_human_size(total_gas_limit),
to_human_size(total_gas_limit.abs_diff(debug.refund_computed + gas_used))
)
.yellow()
);
}
let bytes_published = spent_on_pubdata / debug.gas_per_pubdata.as_u32();
log::info!(
"During execution published {} bytes to L1, @{} each - in total {} gas",
to_human_size(bytes_published.into()),
to_human_size(debug.gas_per_pubdata),
to_human_size(spent_on_pubdata.into())
);
log::info!("Out of {} gas used, we spent:", to_human_size(gas_used));
log::info!(
" {:>15} gas ({:>2}%) for transaction setup",
to_human_size(intrinsic_gas),
to_human_size(intrinsic_gas * 100 / gas_used)
);
log::info!(
" {:>15} gas ({:>2}%) for bytecode preparation (decompression etc)",
to_human_size(debug.gas_spent_on_bytecode_preparation),
to_human_size(debug.gas_spent_on_bytecode_preparation * 100 / gas_used)
);
log::info!(
" {:>15} gas ({:>2}%) for account validation",
to_human_size(gas_for_validation),
to_human_size(gas_for_validation * 100 / gas_used)
);
log::info!(
" {:>15} gas ({:>2}%) for computations (opcodes)",
to_human_size(gas_spent_on_compute),
to_human_size(gas_spent_on_compute * 100 / gas_used)
);
log::info!("");
log::info!("");
log::info!(
"{}",
"=== Transaction setup cost breakdown ===".to_owned().bold(),
);
log::info!("Total cost: {}", to_human_size(intrinsic_gas).bold());
log::info!(
" {:>15} gas ({:>2}%) fixed cost",
to_human_size(debug.intrinsic_overhead),
to_human_size(debug.intrinsic_overhead * 100 / intrinsic_gas)
);
log::info!(
" {:>15} gas ({:>2}%) operator cost",
to_human_size(debug.operator_overhead),
to_human_size(debug.operator_overhead * 100 / intrinsic_gas)
);
log::info!("");
log::info!(
" FYI: operator could have charged up to: {}, so you got {}% discount",
to_human_size(debug.required_overhead),
to_human_size(
(debug.required_overhead - debug.operator_overhead) * 100
/ debug.required_overhead
)
);
let publish_block_l1_bytes = BLOCK_OVERHEAD_PUBDATA;
log::info!(
"Publishing full block costs the operator up to: {}, where {} is due to {} bytes published to L1",
to_human_size(debug.total_overhead_for_block),
to_human_size(debug.gas_per_pubdata * publish_block_l1_bytes),
to_human_size(publish_block_l1_bytes.into())
);
log::info!("Your transaction has contributed to filling up the block in the following way (we take the max contribution as the cost):");
log::info!(
" Circuits overhead:{:>15} ({}% of the full block: {})",
to_human_size(debug.overhead_for_circuits),
to_human_size(debug.overhead_for_circuits * 100 / debug.total_overhead_for_block),
to_human_size(debug.total_overhead_for_block)
);
log::info!(
" Length overhead: {:>15}",
to_human_size(debug.overhead_for_length)
);
log::info!(
" Slot overhead: {:>15}",
to_human_size(debug.overhead_for_slot)
);
Ok(())
} else {
Err("Booloader tracer didn't finish.".to_owned())
}
}
pub fn run_l2_tx_inner(
&self,
l2_tx: L2Tx,
execution_mode: TxExecutionMode,
) -> Result<L2TxResult, String> {
let inner = self
.inner
.write()
.map_err(|e| format!("Failed to acquire write lock: {}", e))?;
let storage = StorageView::new(&inner.fork_storage).to_rc_ptr();
let (batch_env, block_ctx) = inner.create_l1_batch_env(storage.clone());
let nonverifying_contracts;
let bootloader_code = {
if inner
.impersonated_accounts
.contains(&l2_tx.common_data.initiator_address)
{
tracing::info!(
"🕵️ Executing tx from impersonated account {:?}",
l2_tx.common_data.initiator_address
);
nonverifying_contracts =
SystemContracts::from_options(&Options::BuiltInWithoutSecurity);
nonverifying_contracts.contracts(execution_mode)
} else {
inner.system_contracts.contracts(execution_mode)
}
};
let system_env = inner.create_system_env(bootloader_code.clone(), execution_mode);
let mut vm = Vm::new(
batch_env.clone(),
system_env,
storage.clone(),
HistoryDisabled,
);
let tx: Transaction = l2_tx.clone().into();
vm.push_transaction(tx.clone());
let call_tracer_result = Arc::new(OnceCell::default());
let bootloader_debug_result = Arc::new(OnceCell::default());
let custom_tracers = vec![
Box::new(CallTracer::new(call_tracer_result.clone(), HistoryDisabled))
as Box<dyn VmTracer<StorageView<&ForkStorage<S>>, HistoryDisabled>>,
Box::new(BootloaderDebugTracer {
result: bootloader_debug_result.clone(),
}) as Box<dyn VmTracer<StorageView<&ForkStorage<S>>, HistoryDisabled>>,
];
let tx_result = vm.inspect(custom_tracers, vm::VmExecutionMode::OneTx);
let call_traces = call_tracer_result.get().unwrap();
let spent_on_pubdata =
tx_result.statistics.gas_used - tx_result.statistics.computational_gas_used;
log::info!("┌─────────────────────────┐");
log::info!("│ TRANSACTION SUMMARY │");
log::info!("└─────────────────────────┘");
match &tx_result.result {
ExecutionResult::Success { .. } => log::info!("Transaction: {}", "SUCCESS".green()),
ExecutionResult::Revert { .. } => log::info!("Transaction: {}", "FAILED".red()),
ExecutionResult::Halt { .. } => log::info!("Transaction: {}", "HALTED".red()),
}
log::info!("Initiator: {:?}", tx.initiator_account());
log::info!("Payer: {:?}", tx.payer());
log::info!(
"Gas - Limit: {} | Used: {} | Refunded: {}",
to_human_size(tx.gas_limit()),
to_human_size(tx.gas_limit() - tx_result.refunds.gas_refunded),
to_human_size(tx_result.refunds.gas_refunded.into())
);
match inner.show_gas_details {
ShowGasDetails::None => log::info!(
"Use --show-gas-details flag or call config_setShowGasDetails to display more info"
),
ShowGasDetails::All => {
if self
.display_detailed_gas_info(bootloader_debug_result.get(), spent_on_pubdata)
.is_err()
{
log::info!(
"{}",
"!!! FAILED TO GET DETAILED GAS INFO !!!".to_owned().red()
);
}
}
}
if inner.show_storage_logs != ShowStorageLogs::None {
log::info!("");
log::info!("┌──────────────────┐");
log::info!("│ STORAGE LOGS │");
log::info!("└──────────────────┘");
}
for log_query in &tx_result.logs.storage_logs {
match inner.show_storage_logs {
ShowStorageLogs::Write => {
if matches!(
log_query.log_type,
StorageLogQueryType::RepeatedWrite | StorageLogQueryType::InitialWrite
) {
formatter::print_logs(log_query);
}
}
ShowStorageLogs::Read => {
if log_query.log_type == StorageLogQueryType::Read {
formatter::print_logs(log_query);
}
}
ShowStorageLogs::All => {
formatter::print_logs(log_query);
}
_ => {}
}
}
if inner.show_vm_details != ShowVMDetails::None {
formatter::print_vm_details(&tx_result);
}
log::info!("");
log::info!("==== Console logs: ");
for call in call_traces {
inner.console_log_handler.handle_call_recursive(call);
}
log::info!("");
let call_traces_count = if !call_traces.is_empty() {
call_traces[0].calls.len()
} else {
0
};
log::info!(
"==== {} Use --show-calls flag or call config_setShowCalls to display more info.",
format!("{:?} call traces. ", call_traces_count).bold()
);
if inner.show_calls != ShowCalls::None {
for call in call_traces {
formatter::print_call(call, 0, &inner.show_calls, inner.resolve_hashes);
}
}
log::info!("");
log::info!(
"==== {}",
format!("{} events", tx_result.logs.events.len()).bold()
);
for event in &tx_result.logs.events {
formatter::print_event(event, inner.resolve_hashes);
}
let hash = compute_hash(block_ctx.miniblock, l2_tx.hash());
let mut transaction = zksync_types::api::Transaction::from(l2_tx);
let block_hash = inner
.block_hashes
.get(&inner.current_miniblock)
.ok_or(format!(
"Block hash not found for block: {}",
inner.current_miniblock
))?;
transaction.block_hash = Some(*block_hash);
transaction.block_number = Some(U64::from(inner.current_miniblock));
let block = Block {
hash,
number: U64::from(block_ctx.miniblock),
timestamp: U256::from(batch_env.timestamp),
l1_batch_number: Some(U64::from(batch_env.number.0)),
transactions: vec![TransactionVariant::Full(transaction)],
gas_used: U256::from(tx_result.statistics.gas_used),
gas_limit: U256::from(BLOCK_GAS_LIMIT),
..Default::default()
};
log::info!("");
log::info!("");
let bytecodes = vm
.get_last_tx_compressed_bytecodes()
.iter()
.map(|b| bytecode_to_factory_dep(b.original.clone()))
.collect();
vm.execute(vm::VmExecutionMode::Bootloader);
let modified_keys = storage.borrow().modified_storage_keys().clone();
Ok((
modified_keys,
tx_result,
call_traces.clone(),
block,
bytecodes,
block_ctx,
))
}
fn run_l2_tx(&self, l2_tx: L2Tx, execution_mode: TxExecutionMode) -> Result<(), String> {
let tx_hash = l2_tx.hash();
log::info!("");
log::info!("Executing {}", format!("{:?}", tx_hash).bold());
{
let mut inner = self
.inner
.write()
.map_err(|e| format!("Failed to acquire write lock: {}", e))?;
inner.filters.notify_new_pending_transaction(tx_hash);
}
let (keys, result, call_traces, block, bytecodes, block_ctx) =
self.run_l2_tx_inner(l2_tx.clone(), execution_mode)?;
if let ExecutionResult::Halt { reason } = result.result {
return Err(format!("Transaction HALT: {}", reason));
}
let mut inner = self
.inner
.write()
.map_err(|e| format!("Failed to acquire write lock: {}", e))?;
for (key, value) in keys.iter() {
inner.fork_storage.set_value(*key, *value);
}
for (hash, code) in bytecodes.iter() {
inner.fork_storage.store_factory_dep(
u256_to_h256(*hash),
code.iter()
.flat_map(|entry| {
let mut bytes = vec![0u8; 32];
entry.to_big_endian(&mut bytes);
bytes.to_vec()
})
.collect(),
)
}
for (log_idx, event) in result.logs.events.iter().enumerate() {
inner.filters.notify_new_log(
&Log {
address: event.address,
topics: event.indexed_topics.clone(),
data: Bytes(event.value.clone()),
block_hash: Some(block.hash),
block_number: Some(block.number),
l1_batch_number: block.l1_batch_number,
transaction_hash: Some(tx_hash),
transaction_index: Some(U64::zero()),
log_index: Some(U256::from(log_idx)),
transaction_log_index: Some(U256::from(log_idx)),
log_type: None,
removed: None,
},
block.number,
);
}
let tx_receipt = TransactionReceipt {
transaction_hash: tx_hash,
transaction_index: U64::from(0),
block_hash: Some(block.hash),
block_number: Some(block.number),
l1_batch_tx_index: None,
l1_batch_number: block.l1_batch_number,
from: l2_tx.initiator_account(),
to: Some(l2_tx.recipient_account()),
cumulative_gas_used: Default::default(),
gas_used: Some(l2_tx.common_data.fee.gas_limit - result.refunds.gas_refunded),
contract_address: contract_address_from_tx_result(&result),
logs: result
.logs
.events
.iter()
.enumerate()
.map(|(log_idx, log)| Log {
address: log.address,
topics: log.indexed_topics.clone(),
data: Bytes(log.value.clone()),
block_hash: Some(block.hash),
block_number: Some(block.number),
l1_batch_number: block.l1_batch_number,
transaction_hash: Some(tx_hash),
transaction_index: Some(U64::zero()),
log_index: Some(U256::from(log_idx)),
transaction_log_index: Some(U256::from(log_idx)),
log_type: None,
removed: None,
})
.collect(),
l2_to_l1_logs: vec![],
status: Some(if result.result.is_failed() {
U64::from(0)
} else {
U64::from(1)
}),
effective_gas_price: Some(L2_GAS_PRICE.into()),
..Default::default()
};
let debug = create_debug_output(&l2_tx, &result, call_traces).expect("create debug output"); inner.tx_results.insert(
tx_hash,
TransactionResult {
info: TxExecutionInfo {
tx: l2_tx,
batch_number: block.l1_batch_number.unwrap_or_default().as_u32(),
miniblock_number: block.number.as_u64(),
result,
},
receipt: tx_receipt,
debug,
},
);
let block_ctx = block_ctx.new_block();
let empty_block_at_end_of_batch =
create_empty_block(block_ctx.miniblock, block_ctx.timestamp, block_ctx.batch);
inner.current_batch = inner.current_batch.saturating_add(1);
for block in vec![block, empty_block_at_end_of_batch] {
if let Err(err) = inner.archive_state() {
log::error!(
"failed archiving state for block {}: {}",
inner.current_miniblock,
err
);
}
inner.current_miniblock = inner.current_miniblock.saturating_add(1);
inner.current_timestamp = inner.current_timestamp.saturating_add(1);
let actual_l1_batch_number = block
.l1_batch_number
.expect("block must have a l1_batch_number");
if actual_l1_batch_number.as_u32() != inner.current_batch {
panic!(
"expected next block to have batch_number {}, got {}",
inner.current_batch,
actual_l1_batch_number.as_u32()
);
}
if block.number.as_u64() != inner.current_miniblock {
panic!(
"expected next block to have miniblock {}, got {}",
inner.current_miniblock,
block.number.as_u64()
);
}
if block.timestamp.as_u64() != inner.current_timestamp {
panic!(
"expected next block to have timestamp {}, got {}",
inner.current_timestamp,
block.timestamp.as_u64()
);
}
let block_hash = block.hash;
inner.current_miniblock_hash = block_hash;
inner.block_hashes.insert(block.number.as_u64(), block.hash);
inner.blocks.insert(block.hash, block);
inner.filters.notify_new_block(block_hash);
}
Ok(())
}
}
pub struct BlockContext {
pub batch: u32,
pub miniblock: u64,
pub timestamp: u64,
}
impl BlockContext {
pub fn from_current(batch: u32, miniblock: u64, timestamp: u64) -> Self {
Self {
batch,
miniblock,
timestamp,
}
}
pub fn new_batch(&self) -> Self {
Self {
batch: self.batch.saturating_add(1),
miniblock: self.miniblock.saturating_add(1),
timestamp: self.timestamp.saturating_add(1),
}
}
pub fn new_block(&self) -> BlockContext {
Self {
batch: self.batch,
miniblock: self.miniblock.saturating_add(1),
timestamp: self.timestamp.saturating_add(1),
}
}
}
impl<S: Send + Sync + 'static + ForkSource + std::fmt::Debug> EthNamespaceT for InMemoryNode<S> {
fn chain_id(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<zksync_basic_types::U64>> {
match self.inner.read() {
Ok(inner) => Ok(U64::from(inner.fork_storage.chain_id.0 as u64)).into_boxed_future(),
Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)).into_boxed_future(),
}
}
fn call(
&self,
req: zksync_types::transaction_request::CallRequest,
_block: Option<zksync_types::api::BlockIdVariant>,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<zksync_basic_types::Bytes>> {
match L2Tx::from_request(req.into(), MAX_TX_SIZE) {
Ok(mut tx) => {
tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into();
let result = self.run_l2_call(tx);
match result {
Ok(execution_result) => match execution_result {
ExecutionResult::Success { output } => {
Ok(output.into()).into_boxed_future()
}
ExecutionResult::Revert { output } => {
let message = output.to_user_friendly_string();
let pretty_message = format!(
"execution reverted{}{}",
if message.is_empty() { "" } else { ": " },
message
);
log::info!("{}", pretty_message.on_red());
Err(into_jsrpc_error(Web3Error::SubmitTransactionError(
pretty_message,
output.encoded_data(),
)))
.into_boxed_future()
}
ExecutionResult::Halt { reason } => {
let message = reason.to_string();
let pretty_message = format!(
"execution halted {}{}",
if message.is_empty() { "" } else { ": " },
message
);
log::info!("{}", pretty_message.on_red());
Err(into_jsrpc_error(Web3Error::SubmitTransactionError(
pretty_message,
vec![],
)))
.into_boxed_future()
}
},
Err(e) => {
let error = Web3Error::InvalidTransactionData(
zksync_types::ethabi::Error::InvalidName(e),
);
Err(into_jsrpc_error(error)).into_boxed_future()
}
}
}
Err(e) => {
let error = Web3Error::SerializationError(e);
Err(into_jsrpc_error(error)).into_boxed_future()
}
}
}
fn get_balance(
&self,
address: zksync_basic_types::Address,
_block: Option<zksync_types::api::BlockIdVariant>,
) -> BoxFuture<Result<U256, jsonrpc_core::Error>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let balance_key = storage_key_for_standard_token_balance(
AccountTreeId::new(L2_ETH_TOKEN_ADDRESS),
&address,
);
match inner.write() {
Ok(mut inner_guard) => {
let balance = inner_guard.fork_storage.read_value(&balance_key);
Ok(h256_to_u256(balance))
}
Err(_) => {
let web3_error = Web3Error::InternalError;
Err(into_jsrpc_error(web3_error))
}
}
})
}
fn get_block_by_number(
&self,
block_number: zksync_types::api::BlockNumber,
full_transactions: bool,
) -> BoxFuture<
jsonrpc_core::Result<
Option<zksync_types::api::Block<zksync_types::api::TransactionVariant>>,
>,
> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let maybe_block = {
let reader = match inner.read() {
Ok(r) => r,
Err(_) => return Err(into_jsrpc_error(Web3Error::InternalError)),
};
let number =
utils::to_real_block_number(block_number, U64::from(reader.current_miniblock))
.as_u64();
reader
.block_hashes
.get(&number)
.and_then(|hash| reader.blocks.get(hash))
.cloned()
.or_else(|| {
reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_block_by_number(block_number, true)
.ok()
.flatten()
})
})
};
match maybe_block {
Some(mut block) => {
let block_hash = block.hash;
block.transactions = block
.transactions
.into_iter()
.map(|transaction| match &transaction {
TransactionVariant::Full(inner) => {
if full_transactions {
transaction
} else {
TransactionVariant::Hash(inner.hash)
}
}
TransactionVariant::Hash(_) => {
if full_transactions {
panic!(
"unexpected non full transaction for block {}",
block_hash
)
} else {
transaction
}
}
})
.collect();
Ok(Some(block))
}
None => Err(into_jsrpc_error(Web3Error::NoBlock)),
}
})
}
fn get_code(
&self,
address: zksync_basic_types::Address,
_block: Option<zksync_types::api::BlockIdVariant>,
) -> BoxFuture<jsonrpc_core::Result<zksync_basic_types::Bytes>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let code_key = get_code_key(&address);
match inner.write() {
Ok(mut guard) => {
let code_hash = guard.fork_storage.read_value(&code_key);
let code = guard
.fork_storage
.load_factory_dep_internal(code_hash)
.unwrap_or_default();
Ok(Bytes::from(code))
}
Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)),
}
})
}
fn get_transaction_count(
&self,
address: zksync_basic_types::Address,
_block: Option<zksync_types::api::BlockIdVariant>,
) -> BoxFuture<jsonrpc_core::Result<U256>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let nonce_key = get_nonce_key(&address);
match inner.write() {
Ok(mut guard) => {
let result = guard.fork_storage.read_value(&nonce_key);
Ok(h256_to_u64(result).into())
}
Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)),
}
})
}
fn get_transaction_receipt(
&self,
hash: zksync_basic_types::H256,
) -> BoxFuture<jsonrpc_core::Result<Option<zksync_types::api::TransactionReceipt>>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let reader = match inner.read() {
Ok(r) => r,
Err(_) => return Err(into_jsrpc_error(Web3Error::InternalError)),
};
let receipt = reader
.tx_results
.get(&hash)
.map(|info| info.receipt.clone());
Ok(receipt)
})
}
fn send_raw_transaction(
&self,
tx_bytes: zksync_basic_types::Bytes,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<zksync_basic_types::H256>> {
let chain_id = match self.inner.read() {
Ok(reader) => reader.fork_storage.chain_id,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
let (tx_req, hash) = match TransactionRequest::from_bytes(&tx_bytes.0, chain_id.0) {
Ok(result) => result,
Err(e) => {
return futures::future::err(into_jsrpc_error(Web3Error::SerializationError(e)))
.boxed()
}
};
let mut l2_tx: L2Tx = match L2Tx::from_request(tx_req, MAX_TX_SIZE) {
Ok(tx) => tx,
Err(e) => {
return futures::future::err(into_jsrpc_error(Web3Error::SerializationError(e)))
.boxed()
}
};
l2_tx.set_input(tx_bytes.0, hash);
if hash != l2_tx.hash() {
return futures::future::err(into_jsrpc_error(Web3Error::InvalidTransactionData(
zksync_types::ethabi::Error::InvalidData,
)))
.boxed();
};
match self.run_l2_tx(l2_tx.clone(), TxExecutionMode::VerifyExecute) {
Ok(_) => Ok(hash).into_boxed_future(),
Err(e) => {
let error_message = format!("Execution error: {}", e);
futures::future::err(into_jsrpc_error(Web3Error::SubmitTransactionError(
error_message,
l2_tx.hash().as_bytes().to_vec(),
)))
.boxed()
}
}
}
fn get_block_by_hash(
&self,
hash: zksync_basic_types::H256,
full_transactions: bool,
) -> jsonrpc_core::BoxFuture<
jsonrpc_core::Result<
Option<zksync_types::api::Block<zksync_types::api::TransactionVariant>>,
>,
> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let maybe_block = {
let reader = inner
.read()
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))?;
reader.blocks.get(&hash).cloned().or_else(|| {
reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_block_by_hash(hash, true)
.ok()
.flatten()
})
})
};
match maybe_block {
Some(mut block) => {
let block_hash = block.hash;
block.transactions = block
.transactions
.into_iter()
.map(|transaction| match &transaction {
TransactionVariant::Full(inner) => {
if full_transactions {
transaction
} else {
TransactionVariant::Hash(inner.hash)
}
}
TransactionVariant::Hash(_) => {
if full_transactions {
panic!(
"unexpected non full transaction for block {}",
block_hash
)
} else {
transaction
}
}
})
.collect();
Ok(Some(block))
}
None => Err(into_jsrpc_error(Web3Error::NoBlock)),
}
})
}
fn get_transaction_by_hash(
&self,
hash: zksync_basic_types::H256,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Option<zksync_types::api::Transaction>>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let reader = inner
.read()
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))?;
let maybe_result = {
reader.tx_results.get(&hash).and_then(|TransactionResult { info, .. }| {
let input_data = info.tx.common_data.input.clone().or(None)?;
let chain_id = info.tx.common_data.extract_chain_id().or(None)?;
Some(zksync_types::api::Transaction {
hash,
nonce: U256::from(info.tx.common_data.nonce.0),
block_hash: Some(hash),
block_number: Some(U64::from(info.miniblock_number)),
transaction_index: Some(U64::from(1)),
from: Some(info.tx.initiator_account()),
to: Some(info.tx.recipient_account()),
value: info.tx.execute.value,
gas_price: Default::default(),
gas: Default::default(),
input: input_data.data.into(),
v: Some(chain_id.into()),
r: Some(U256::zero()),
s: Some(U256::zero()),
raw: None,
transaction_type: {
let tx_type = match info.tx.common_data.transaction_type {
zksync_types::l2::TransactionType::LegacyTransaction => 0,
zksync_types::l2::TransactionType::EIP2930Transaction => 1,
zksync_types::l2::TransactionType::EIP1559Transaction => 2,
zksync_types::l2::TransactionType::EIP712Transaction => 113,
zksync_types::l2::TransactionType::PriorityOpTransaction => 255,
zksync_types::l2::TransactionType::ProtocolUpgradeTransaction => 254,
};
Some(tx_type.into())
},
access_list: None,
max_fee_per_gas: Some(info.tx.common_data.fee.max_fee_per_gas),
max_priority_fee_per_gas: Some(
info.tx.common_data.fee.max_priority_fee_per_gas,
),
chain_id: chain_id.into(),
l1_batch_number: Some(U64::from(info.batch_number as u64)),
l1_batch_tx_index: None,
})
}).or_else(|| {
reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_transaction_by_hash(hash)
.ok()
.flatten()
})
})
};
Ok(maybe_result)
})
}
fn get_block_number(
&self,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<zksync_basic_types::U64>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let reader = inner
.read()
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))?;
Ok(U64::from(reader.current_miniblock))
})
}
fn estimate_gas(
&self,
req: zksync_types::transaction_request::CallRequest,
_block: Option<zksync_types::api::BlockNumber>,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<U256>> {
let inner = Arc::clone(&self.inner);
let reader = match inner.read() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
let result: jsonrpc_core::Result<Fee> = reader.estimate_gas_impl(req);
match result {
Ok(fee) => Ok(fee.gas_limit).into_boxed_future(),
Err(err) => return futures::future::err(err).boxed(),
}
}
fn gas_price(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<U256>> {
let fair_l2_gas_price: u64 = L2_GAS_PRICE;
Ok(U256::from(fair_l2_gas_price)).into_boxed_future()
}
fn new_filter(&self, filter: Filter) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<U256>> {
let inner = Arc::clone(&self.inner);
let mut writer = match inner.write() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
let from_block = filter
.from_block
.unwrap_or(zksync_types::api::BlockNumber::Latest);
let to_block = filter
.to_block
.unwrap_or(zksync_types::api::BlockNumber::Latest);
let addresses = filter.address.unwrap_or_default().0;
let mut topics: [Option<HashSet<H256>>; 4] = Default::default();
if let Some(filter_topics) = filter.topics {
filter_topics
.into_iter()
.take(4)
.enumerate()
.for_each(|(i, maybe_topic_set)| {
if let Some(topic_set) = maybe_topic_set {
topics[i] = Some(topic_set.0.into_iter().collect());
}
})
}
writer
.filters
.add_log_filter(from_block, to_block, addresses, topics)
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))
.into_boxed_future()
}
fn new_block_filter(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<U256>> {
let inner = Arc::clone(&self.inner);
let mut writer = match inner.write() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
writer
.filters
.add_block_filter()
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))
.into_boxed_future()
}
fn new_pending_transaction_filter(
&self,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<U256>> {
let inner = Arc::clone(&self.inner);
let mut writer = match inner.write() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
writer
.filters
.add_pending_transaction_filter()
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))
.into_boxed_future()
}
fn uninstall_filter(&self, id: U256) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<bool>> {
let inner = Arc::clone(&self.inner);
let mut writer = match inner.write() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
let result = writer.filters.remove_filter(id);
Ok(result).into_boxed_future()
}
fn get_logs(
&self,
filter: Filter,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Vec<zksync_types::api::Log>>> {
let reader = match self.inner.read() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
let from_block = filter
.from_block
.unwrap_or(zksync_types::api::BlockNumber::Earliest);
let to_block = filter
.to_block
.unwrap_or(zksync_types::api::BlockNumber::Latest);
let addresses = filter.address.unwrap_or_default().0;
let mut topics: [Option<HashSet<H256>>; 4] = Default::default();
if let Some(filter_topics) = filter.topics {
filter_topics
.into_iter()
.take(4)
.enumerate()
.for_each(|(i, maybe_topic_set)| {
if let Some(topic_set) = maybe_topic_set {
topics[i] = Some(topic_set.0.into_iter().collect());
}
})
}
let log_filter = LogFilter::new(from_block, to_block, addresses, topics);
let latest_block_number = U64::from(reader.current_miniblock);
let logs = reader
.tx_results
.values()
.flat_map(|tx_result| {
tx_result
.receipt
.logs
.iter()
.filter(|log| log_filter.matches(log, latest_block_number))
.cloned()
})
.collect_vec();
Ok(logs).into_boxed_future()
}
fn get_filter_logs(
&self,
id: U256,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<FilterChanges>> {
let reader = match self.inner.read() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
let latest_block_number = U64::from(reader.current_miniblock);
let logs = match reader.filters.get_filter(id) {
Some(FilterType::Log(f)) => reader
.tx_results
.values()
.flat_map(|tx_result| {
tx_result
.receipt
.logs
.iter()
.filter(|log| f.matches(log, latest_block_number))
.cloned()
})
.collect_vec(),
_ => return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed(),
};
Ok(FilterChanges::Logs(logs)).into_boxed_future()
}
fn get_filter_changes(
&self,
id: U256,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<FilterChanges>> {
let inner = Arc::clone(&self.inner);
let mut writer = match inner.write() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
writer
.filters
.get_new_changes(id)
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))
.into_boxed_future()
}
fn get_block_transaction_count_by_number(
&self,
block_number: zksync_types::api::BlockNumber,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Option<U256>>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let maybe_result = {
let reader = match inner.read() {
Ok(r) => r,
Err(_) => return Err(into_jsrpc_error(Web3Error::InternalError)),
};
let number =
utils::to_real_block_number(block_number, U64::from(reader.current_miniblock))
.as_u64();
reader
.block_hashes
.get(&number)
.and_then(|hash| reader.blocks.get(hash))
.map(|block| U256::from(block.transactions.len()))
.or_else(|| {
reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_block_transaction_count_by_number(block_number)
.ok()
.flatten()
})
})
};
match maybe_result {
Some(value) => Ok(Some(value)),
None => Err(into_jsrpc_error(Web3Error::NoBlock)),
}
})
}
fn get_block_transaction_count_by_hash(
&self,
block_hash: zksync_basic_types::H256,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Option<U256>>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let reader = inner
.read()
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))?;
let maybe_result = reader
.blocks
.get(&block_hash)
.map(|block| U256::from(block.transactions.len()))
.or_else(|| {
reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_block_transaction_count_by_hash(block_hash)
.ok()
.flatten()
})
});
match maybe_result {
Some(value) => Ok(Some(value)),
None => Err(into_jsrpc_error(Web3Error::NoBlock)),
}
})
}
fn get_storage(
&self,
address: zksync_basic_types::Address,
idx: U256,
block: Option<zksync_types::api::BlockIdVariant>,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<zksync_basic_types::H256>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let mut writer = match inner.write() {
Ok(r) => r,
Err(_) => {
return Err(into_jsrpc_error(Web3Error::InternalError));
}
};
let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx));
let block_number = block
.map(|block| match block {
zksync_types::api::BlockIdVariant::BlockNumber(block_number) => {
Ok(utils::to_real_block_number(
block_number,
U64::from(writer.current_miniblock),
))
}
zksync_types::api::BlockIdVariant::BlockNumberObject(o) => {
Ok(utils::to_real_block_number(
o.block_number,
U64::from(writer.current_miniblock),
))
}
zksync_types::api::BlockIdVariant::BlockHashObject(o) => writer
.blocks
.get(&o.block_hash)
.map(|block| block.number)
.ok_or_else(|| {
log::error!("unable to map block number to hash #{:#x}", o.block_hash);
into_jsrpc_error(Web3Error::InternalError)
}),
})
.unwrap_or_else(|| Ok(U64::from(writer.current_miniblock)))?;
if block_number.as_u64() == writer.current_miniblock {
Ok(H256(writer.fork_storage.read_value(&storage_key).0))
} else if writer.block_hashes.contains_key(&block_number.as_u64()) {
let value = writer
.block_hashes
.get(&block_number.as_u64())
.and_then(|block_hash| writer.previous_states.get(block_hash))
.and_then(|state| state.get(&storage_key))
.cloned()
.unwrap_or_default();
if value.is_zero() {
Ok(H256(writer.fork_storage.read_value(&storage_key).0))
} else {
Ok(value)
}
} else {
writer
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| fork.fork_source.get_storage_at(address, idx, block).ok())
.ok_or_else(|| {
log::error!(
"unable to get storage at address {:?}, index {:?} for block {:?}",
address,
idx,
block
);
into_jsrpc_error(Web3Error::InternalError)
})
}
})
}
fn get_transaction_by_block_hash_and_index(
&self,
block_hash: zksync_basic_types::H256,
index: zksync_basic_types::web3::types::Index,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Option<zksync_types::api::Transaction>>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let reader = match inner.read() {
Ok(r) => r,
Err(_) => {
return Err(into_jsrpc_error(Web3Error::InternalError));
}
};
let maybe_tx = reader
.blocks
.get(&block_hash)
.and_then(|block| block.transactions.get(index.as_usize()))
.and_then(|tx| match tx {
TransactionVariant::Full(tx) => Some(tx.clone()),
TransactionVariant::Hash(tx_hash) => reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_transaction_by_hash(*tx_hash)
.ok()
.flatten()
}),
})
.or_else(|| {
reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_transaction_by_block_hash_and_index(block_hash, index)
.ok()
})
.flatten()
});
Ok(maybe_tx)
})
}
fn get_transaction_by_block_number_and_index(
&self,
block_number: zksync_types::api::BlockNumber,
index: zksync_basic_types::web3::types::Index,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Option<zksync_types::api::Transaction>>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let reader = match inner.read() {
Ok(r) => r,
Err(_) => {
return Err(into_jsrpc_error(Web3Error::InternalError));
}
};
let real_block_number =
utils::to_real_block_number(block_number, U64::from(reader.current_miniblock));
let maybe_tx = reader
.block_hashes
.get(&real_block_number.as_u64())
.and_then(|block_hash| reader.blocks.get(block_hash))
.and_then(|block| block.transactions.get(index.as_usize()))
.and_then(|tx| match tx {
TransactionVariant::Full(tx) => Some(tx.clone()),
TransactionVariant::Hash(tx_hash) => reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_transaction_by_hash(*tx_hash)
.ok()
.flatten()
}),
})
.or_else(|| {
reader
.fork_storage
.inner
.read()
.expect("failed reading fork storage")
.fork
.as_ref()
.and_then(|fork| {
fork.fork_source
.get_transaction_by_block_number_and_index(block_number, index)
.ok()
})
.flatten()
});
Ok(maybe_tx)
})
}
fn protocol_version(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<String>> {
Ok(String::from(PROTOCOL_VERSION)).into_boxed_future()
}
fn syncing(
&self,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<zksync_basic_types::web3::types::SyncState>>
{
Ok(zksync_basic_types::web3::types::SyncState::NotSyncing).into_boxed_future()
}
fn accounts(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Vec<H160>>> {
let inner = Arc::clone(&self.inner);
let reader = match inner.read() {
Ok(r) => r,
Err(_) => {
return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed()
}
};
let accounts: Vec<H160> = reader.rich_accounts.clone().into_iter().collect();
futures::future::ok(accounts).boxed()
}
fn coinbase(
&self,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<zksync_basic_types::Address>> {
not_implemented("eth_coinbase")
}
fn compilers(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Vec<String>>> {
not_implemented("eth_getCompilers")
}
fn hashrate(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<U256>> {
not_implemented("eth_hashrate")
}
fn get_uncle_count_by_block_hash(
&self,
_hash: zksync_basic_types::H256,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Option<U256>>> {
not_implemented("eth_getUncleCountByBlockHash")
}
fn get_uncle_count_by_block_number(
&self,
_number: zksync_types::api::BlockNumber,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<Option<U256>>> {
not_implemented("eth_getUncleCountByBlockNumber")
}
fn mining(&self) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<bool>> {
not_implemented("eth_mining")
}
fn fee_history(
&self,
block_count: U64,
_newest_block: zksync_types::api::BlockNumber,
reward_percentiles: Vec<f32>,
) -> jsonrpc_core::BoxFuture<jsonrpc_core::Result<FeeHistory>> {
let inner = Arc::clone(&self.inner);
Box::pin(async move {
let reader = inner
.read()
.map_err(|_| into_jsrpc_error(Web3Error::InternalError))?;
let block_count = block_count
.as_u64()
.min(1024)
.clamp(1, reader.current_miniblock + 1);
let mut base_fee_per_gas = vec![U256::from(L2_GAS_PRICE); block_count as usize];
let oldest_block = reader.current_miniblock + 1 - base_fee_per_gas.len() as u64;
let gas_used_ratio = vec![0.0; base_fee_per_gas.len()];
let reward = Some(vec![
vec![U256::zero(); reward_percentiles.len()];
base_fee_per_gas.len()
]);
base_fee_per_gas.push(*base_fee_per_gas.last().unwrap());
Ok(FeeHistory {
oldest_block: web3::types::BlockNumber::Number(oldest_block.into()),
base_fee_per_gas,
gas_used_ratio,
reward,
})
})
}
}
#[cfg(test)]
mod tests {
use crate::{
cache::CacheConfig,
http_fork_source::HttpForkSource,
node::InMemoryNode,
testing::{
self, default_tx_debug_info, ForkBlockConfig, LogBuilder, MockServer,
TransactionResponseBuilder,
},
};
use maplit::hashmap;
use zksync_basic_types::Nonce;
use zksync_types::{
api::{BlockHashObject, BlockNumber, BlockNumberObject},
utils::deployed_address_create,
};
use zksync_web3_decl::types::{SyncState, ValueOrArray};
use super::*;
#[tokio::test]
async fn test_eth_syncing() {
let node = InMemoryNode::<HttpForkSource>::default();
let syncing = node.syncing().await.expect("failed syncing");
assert!(matches!(syncing, SyncState::NotSyncing));
}
#[tokio::test]
async fn test_get_fee_history_with_1_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let fee_history = node
.fee_history(U64::from(1), BlockNumber::Latest, vec![25.0, 50.0, 75.0])
.await
.expect("fee_history failed");
assert_eq!(
fee_history.oldest_block,
web3::types::BlockNumber::Number(U64::from(0))
);
assert_eq!(
fee_history.base_fee_per_gas,
vec![U256::from(L2_GAS_PRICE); 2]
);
assert_eq!(fee_history.gas_used_ratio, vec![0.0]);
assert_eq!(fee_history.reward, Some(vec![vec![U256::from(0); 3]]));
}
#[tokio::test]
async fn test_get_fee_history_with_no_reward_percentiles() {
let node = InMemoryNode::<HttpForkSource>::default();
let fee_history = node
.fee_history(U64::from(1), BlockNumber::Latest, vec![])
.await
.expect("fee_history failed");
assert_eq!(
fee_history.oldest_block,
web3::types::BlockNumber::Number(U64::from(0))
);
assert_eq!(
fee_history.base_fee_per_gas,
vec![U256::from(L2_GAS_PRICE); 2]
);
assert_eq!(fee_history.gas_used_ratio, vec![0.0]);
assert_eq!(fee_history.reward, Some(vec![vec![]]));
}
#[tokio::test]
async fn test_get_fee_history_with_multiple_blocks() {
let node = InMemoryNode::<HttpForkSource>::default();
testing::apply_tx(&node, H256::repeat_byte(0x01));
let latest_block = node
.get_block_number()
.await
.expect("Block number fetch failed");
let fee_history = node
.fee_history(U64::from(2), BlockNumber::Latest, vec![25.0, 50.0, 75.0])
.await
.expect("fee_history failed");
assert_eq!(latest_block, U64::from(2));
assert_eq!(
fee_history.oldest_block,
web3::types::BlockNumber::Number(U64::from(1))
);
assert_eq!(
fee_history.base_fee_per_gas,
vec![U256::from(L2_GAS_PRICE); 3]
);
assert_eq!(fee_history.gas_used_ratio, vec![0.0, 0.0]);
assert_eq!(fee_history.reward, Some(vec![vec![U256::from(0); 3]; 2]));
}
#[tokio::test]
async fn test_get_block_by_hash_produces_no_block_error_for_non_existing_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let expected_err = into_jsrpc_error(Web3Error::NoBlock);
let result = node.get_block_by_hash(H256::repeat_byte(0x01), false).await;
assert_eq!(expected_err, result.unwrap_err());
}
#[tokio::test]
async fn test_node_run_has_genesis_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let block = node
.get_block_by_number(BlockNumber::Latest, false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(0, block.number.as_u64());
assert_eq!(compute_hash(0, H256::zero()), block.hash);
}
#[tokio::test]
async fn test_get_block_by_hash_for_produced_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let (expected_block_hash, _) = testing::apply_tx(&node, H256::repeat_byte(0x01));
let actual_block = node
.get_block_by_hash(expected_block_hash, false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(expected_block_hash, actual_block.hash);
assert_eq!(U64::from(1), actual_block.number);
assert_eq!(Some(U64::from(1)), actual_block.l1_batch_number);
}
#[tokio::test]
async fn test_node_block_mapping_is_correctly_populated_when_using_fork_source() {
let input_block_number = 8;
let input_block_hash = H256::repeat_byte(0x01);
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: input_block_number,
hash: input_block_hash,
transaction_count: 0,
});
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let inner = node.inner.read().unwrap();
assert!(
inner.blocks.contains_key(&input_block_hash),
"block wasn't cached"
);
assert!(
inner.block_hashes.contains_key(&input_block_number),
"block number wasn't cached"
);
}
#[tokio::test]
async fn test_get_block_by_hash_uses_fork_source() {
let input_block_hash = H256::repeat_byte(0x01);
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
hash: H256::repeat_byte(0xab),
transaction_count: 0,
});
let mock_block_number = 8;
let block_response = testing::BlockResponseBuilder::new()
.set_hash(input_block_hash)
.set_number(mock_block_number)
.build();
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getBlockByHash",
"params": [
format!("{input_block_hash:#x}"),
true
],
}),
block_response,
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_block = node
.get_block_by_hash(input_block_hash, false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(input_block_hash, actual_block.hash);
assert_eq!(U64::from(mock_block_number), actual_block.number);
assert_eq!(Some(U64::from(6)), actual_block.l1_batch_number);
}
#[tokio::test]
async fn test_get_block_by_number_produces_no_block_error_for_non_existing_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let expected_err = into_jsrpc_error(Web3Error::NoBlock);
let result = node
.get_block_by_number(BlockNumber::Number(U64::from(42)), false)
.await;
assert_eq!(expected_err, result.unwrap_err());
}
#[tokio::test]
async fn test_get_block_by_number_for_produced_block() {
let node = InMemoryNode::<HttpForkSource>::default();
testing::apply_tx(&node, H256::repeat_byte(0x01));
let expected_block_number = 1;
let actual_block = node
.get_block_by_number(BlockNumber::Number(U64::from(expected_block_number)), false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(U64::from(expected_block_number), actual_block.number);
assert_eq!(1, actual_block.transactions.len());
}
#[tokio::test]
async fn test_get_block_by_number_uses_fork_source_if_missing_number() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
hash: H256::repeat_byte(0xab),
transaction_count: 0,
});
let mock_block_number = 8;
let block_response = testing::BlockResponseBuilder::new()
.set_number(mock_block_number)
.build();
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getBlockByNumber",
"params": [
"0x8",
true
],
}),
block_response,
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_block = node
.get_block_by_number(BlockNumber::Number(U64::from(8)), false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(U64::from(mock_block_number), actual_block.number);
}
#[tokio::test]
async fn test_get_block_by_number_for_latest_block_produced_locally() {
let node = InMemoryNode::<HttpForkSource>::default();
testing::apply_tx(&node, H256::repeat_byte(0x01));
let virtual_block = node
.get_block_by_number(BlockNumber::Latest, true)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(U64::from(2), virtual_block.number);
assert_eq!(0, virtual_block.transactions.len());
let actual_block = node
.get_block_by_number(BlockNumber::Number(U64::from(1)), true)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(U64::from(1), actual_block.number);
assert_eq!(1, actual_block.transactions.len());
}
#[tokio::test]
async fn test_get_block_by_number_uses_locally_available_block_for_latest_block() {
let input_block_number = 10;
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: input_block_number,
hash: H256::repeat_byte(0x01),
transaction_count: 0,
});
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_block = node
.get_block_by_number(BlockNumber::Latest, false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(U64::from(input_block_number), actual_block.number);
}
#[tokio::test]
async fn test_get_block_by_number_uses_fork_source_for_earliest_block() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
hash: H256::repeat_byte(0xab),
transaction_count: 0,
});
let input_block_number = 1;
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getBlockByNumber",
"params": [
"earliest",
true
],
}),
testing::BlockResponseBuilder::new()
.set_number(input_block_number)
.build(),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_block = node
.get_block_by_number(BlockNumber::Earliest, false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(U64::from(input_block_number), actual_block.number);
}
#[tokio::test]
async fn test_get_block_by_number_uses_locally_available_for_latest_alike_blocks() {
for block_number in [
BlockNumber::Pending,
BlockNumber::Committed,
BlockNumber::Finalized,
] {
let input_block_number = 10;
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: input_block_number,
hash: H256::repeat_byte(0xab),
transaction_count: 0,
});
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_block = node
.get_block_by_number(block_number, false)
.await
.expect("failed fetching block by hash")
.expect("no block");
assert_eq!(
U64::from(input_block_number),
actual_block.number,
"case {}",
block_number,
);
}
}
#[tokio::test]
async fn test_get_block_transaction_count_by_hash_for_produced_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let (expected_block_hash, _) = testing::apply_tx(&node, H256::repeat_byte(0x01));
let actual_transaction_count = node
.get_block_transaction_count_by_hash(expected_block_hash)
.await
.expect("failed fetching block by hash")
.expect("no result");
assert_eq!(U256::from(1), actual_transaction_count);
}
#[tokio::test]
async fn test_get_block_transaction_count_by_hash_uses_fork_source() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
hash: H256::repeat_byte(0xab),
transaction_count: 0,
});
let input_block_hash = H256::repeat_byte(0x01);
let input_transaction_count = 1;
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getBlockTransactionCountByHash",
"params": [
format!("{:#x}", input_block_hash),
],
}),
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"result": format!("{:#x}", input_transaction_count),
}),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_transaction_count = node
.get_block_transaction_count_by_hash(input_block_hash)
.await
.expect("failed fetching block by hash")
.expect("no result");
assert_eq!(
U256::from(input_transaction_count),
actual_transaction_count
);
}
#[tokio::test]
async fn test_get_block_transaction_count_by_number_for_produced_block() {
let node = InMemoryNode::<HttpForkSource>::default();
testing::apply_tx(&node, H256::repeat_byte(0x01));
let actual_transaction_count = node
.get_block_transaction_count_by_number(BlockNumber::Number(U64::from(1)))
.await
.expect("failed fetching block by hash")
.expect("no result");
assert_eq!(U256::from(1), actual_transaction_count);
}
#[tokio::test]
async fn test_get_block_transaction_count_by_number_uses_fork_source() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
hash: H256::repeat_byte(0xab),
transaction_count: 0,
});
let input_block_number = 1;
let input_transaction_count = 1;
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getBlockTransactionCountByNumber",
"params": [
format!("{:#x}", input_block_number),
],
}),
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"result": format!("{:#x}", input_transaction_count),
}),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_transaction_count = node
.get_block_transaction_count_by_number(BlockNumber::Number(U64::from(1)))
.await
.expect("failed fetching block by hash")
.expect("no result");
assert_eq!(
U256::from(input_transaction_count),
actual_transaction_count
);
}
#[tokio::test]
async fn test_get_block_transaction_count_by_number_earliest_uses_fork_source() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
hash: H256::repeat_byte(0xab),
transaction_count: 0,
});
let input_transaction_count = 1;
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getBlockTransactionCountByNumber",
"params": [
"earliest",
],
}),
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"result": format!("{:#x}", input_transaction_count),
}),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_transaction_count = node
.get_block_transaction_count_by_number(BlockNumber::Earliest)
.await
.expect("failed fetching block by hash")
.expect("no result");
assert_eq!(
U256::from(input_transaction_count),
actual_transaction_count
);
}
#[tokio::test]
async fn test_get_block_transaction_count_by_number_latest_alike_uses_fork_source() {
for block_number in [
BlockNumber::Latest,
BlockNumber::Pending,
BlockNumber::Committed,
BlockNumber::Finalized,
] {
let input_transaction_count = 1;
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
transaction_count: input_transaction_count,
hash: H256::repeat_byte(0xab),
});
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_transaction_count = node
.get_block_transaction_count_by_number(block_number)
.await
.expect("failed fetching block by hash")
.expect("no result");
assert_eq!(
U256::from(input_transaction_count),
actual_transaction_count,
"case {}",
block_number,
);
}
}
#[tokio::test]
async fn test_get_transaction_receipt_uses_produced_block_hash() {
let node = InMemoryNode::<HttpForkSource>::default();
let tx_hash = H256::repeat_byte(0x01);
let (expected_block_hash, _) = testing::apply_tx(&node, tx_hash);
let actual_tx_receipt = node
.get_transaction_receipt(tx_hash)
.await
.expect("failed fetching transaction receipt by hash")
.expect("no transaction receipt");
assert_eq!(Some(expected_block_hash), actual_tx_receipt.block_hash);
}
#[tokio::test]
async fn test_new_block_filter_returns_filter_id() {
let node = InMemoryNode::<HttpForkSource>::default();
let actual_filter_id = node
.new_block_filter()
.await
.expect("failed creating filter");
assert_eq!(U256::from(1), actual_filter_id);
}
#[tokio::test]
async fn test_new_filter_returns_filter_id() {
let node = InMemoryNode::<HttpForkSource>::default();
let actual_filter_id = node
.new_filter(Filter::default())
.await
.expect("failed creating filter");
assert_eq!(U256::from(1), actual_filter_id);
}
#[tokio::test]
async fn test_new_pending_transaction_filter_returns_filter_id() {
let node = InMemoryNode::<HttpForkSource>::default();
let actual_filter_id = node
.new_pending_transaction_filter()
.await
.expect("failed creating filter");
assert_eq!(U256::from(1), actual_filter_id);
}
#[tokio::test]
async fn test_uninstall_filter_returns_true_if_filter_exists() {
let node = InMemoryNode::<HttpForkSource>::default();
let filter_id = node
.new_block_filter()
.await
.expect("failed creating filter");
let actual_result = node
.uninstall_filter(filter_id)
.await
.expect("failed creating filter");
assert!(actual_result);
}
#[tokio::test]
async fn test_uninstall_filter_returns_false_if_filter_does_not_exist() {
let node = InMemoryNode::<HttpForkSource>::default();
let actual_result = node
.uninstall_filter(U256::from(100))
.await
.expect("failed creating filter");
assert!(!actual_result);
}
#[tokio::test]
async fn test_get_filter_changes_returns_block_hash_updates_only_once() {
let node = InMemoryNode::<HttpForkSource>::default();
let filter_id = node
.new_block_filter()
.await
.expect("failed creating filter");
let (block_hash, _) = testing::apply_tx(&node, H256::repeat_byte(0x1));
match node
.get_filter_changes(filter_id)
.await
.expect("failed getting filter changes")
{
FilterChanges::Hashes(result) => {
assert_eq!(2, result.len());
assert_eq!(block_hash, result[0]);
}
changes => panic!("unexpected filter changes: {:?}", changes),
}
match node
.get_filter_changes(filter_id)
.await
.expect("failed getting filter changes")
{
FilterChanges::Empty(_) => (),
changes => panic!("expected no changes in the second call, got {:?}", changes),
}
}
#[tokio::test]
async fn test_get_filter_changes_returns_log_updates_only_once() {
let node = InMemoryNode::<HttpForkSource>::default();
let filter_id = node
.new_filter(Filter {
from_block: None,
to_block: None,
address: None,
topics: None,
block_hash: None,
})
.await
.expect("failed creating filter");
testing::apply_tx(&node, H256::repeat_byte(0x1));
match node
.get_filter_changes(filter_id)
.await
.expect("failed getting filter changes")
{
FilterChanges::Logs(result) => assert_eq!(3, result.len()),
changes => panic!("unexpected filter changes: {:?}", changes),
}
match node
.get_filter_changes(filter_id)
.await
.expect("failed getting filter changes")
{
FilterChanges::Empty(_) => (),
changes => panic!("expected no changes in the second call, got {:?}", changes),
}
}
#[tokio::test]
async fn test_get_filter_changes_returns_pending_transaction_updates_only_once() {
let node = InMemoryNode::<HttpForkSource>::default();
let filter_id = node
.new_pending_transaction_filter()
.await
.expect("failed creating filter");
testing::apply_tx(&node, H256::repeat_byte(0x1));
match node
.get_filter_changes(filter_id)
.await
.expect("failed getting filter changes")
{
FilterChanges::Hashes(result) => assert_eq!(vec![H256::repeat_byte(0x1)], result),
changes => panic!("unexpected filter changes: {:?}", changes),
}
match node
.get_filter_changes(filter_id)
.await
.expect("failed getting filter changes")
{
FilterChanges::Empty(_) => (),
changes => panic!("expected no changes in the second call, got {:?}", changes),
}
}
#[tokio::test]
async fn test_produced_block_archives_previous_blocks() {
let node = InMemoryNode::<HttpForkSource>::default();
let input_storage_key = StorageKey::new(
AccountTreeId::new(H160::repeat_byte(0x1)),
u256_to_h256(U256::zero()),
);
let input_storage_value = H256::repeat_byte(0xcd);
node.inner
.write()
.unwrap()
.fork_storage
.set_value(input_storage_key, input_storage_value);
let initial_miniblock = node.inner.read().unwrap().current_miniblock;
testing::apply_tx(&node, H256::repeat_byte(0x1));
let current_miniblock = node.inner.read().unwrap().current_miniblock;
let reader = node.inner.read().unwrap();
for miniblock in initial_miniblock..current_miniblock {
let actual_cached_value = reader
.block_hashes
.get(&miniblock)
.map(|hash| {
reader
.previous_states
.get(hash)
.unwrap_or_else(|| panic!("state was not cached for block {}", miniblock))
})
.and_then(|state| state.get(&input_storage_key))
.copied();
assert_eq!(
Some(input_storage_value),
actual_cached_value,
"unexpected cached state value for block {}",
miniblock
);
}
}
#[tokio::test]
async fn test_get_storage_fetches_zero_value_for_non_existent_key() {
let node = InMemoryNode::<HttpForkSource>::default();
let value = node
.get_storage(H160::repeat_byte(0xf1), U256::from(1024), None)
.await
.expect("failed retrieving storage");
assert_eq!(H256::zero(), value);
}
#[tokio::test]
async fn test_get_storage_uses_fork_to_get_value_for_historical_block() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
transaction_count: 0,
hash: H256::repeat_byte(0xab),
});
let input_address = H160::repeat_byte(0x1);
let input_storage_value = H256::repeat_byte(0xcd);
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getStorageAt",
"params": [
format!("{:#x}", input_address),
"0x0",
{ "blockNumber": "0x2" },
],
}),
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"result": format!("{:#x}", input_storage_value),
}),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_value = node
.get_storage(
input_address,
U256::zero(),
Some(zksync_types::api::BlockIdVariant::BlockNumberObject(
BlockNumberObject {
block_number: BlockNumber::Number(U64::from(2)),
},
)),
)
.await
.expect("failed retrieving storage");
assert_eq!(input_storage_value, actual_value);
}
#[tokio::test]
async fn test_get_storage_uses_archived_storage_to_get_value_for_missing_key() {
let input_address = H160::repeat_byte(0x1);
let input_storage_key = StorageKey::new(
AccountTreeId::new(input_address),
u256_to_h256(U256::zero()),
);
let input_storage_value = H256::repeat_byte(0xcd);
let node = InMemoryNode::<HttpForkSource>::default();
node.inner
.write()
.map(|mut writer| {
let historical_block = Block::<TransactionVariant> {
hash: H256::repeat_byte(0x2),
number: U64::from(2),
..Default::default()
};
writer.block_hashes.insert(2, historical_block.hash);
writer.previous_states.insert(
historical_block.hash,
hashmap! {
input_storage_key => input_storage_value,
},
);
writer
.blocks
.insert(historical_block.hash, historical_block);
})
.expect("failed setting storage for historical block");
let actual_value = node
.get_storage(
input_address,
U256::zero(),
Some(zksync_types::api::BlockIdVariant::BlockNumberObject(
BlockNumberObject {
block_number: BlockNumber::Number(U64::from(2)),
},
)),
)
.await
.expect("failed retrieving storage");
assert_eq!(input_storage_value, actual_value);
}
#[tokio::test]
async fn test_get_storage_uses_fork_to_get_value_for_latest_block_for_missing_key() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
transaction_count: 0,
hash: H256::repeat_byte(0xab),
});
let input_address = H160::repeat_byte(0x1);
let input_storage_value = H256::repeat_byte(0xcd);
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getStorageAt",
"params": [
format!("{:#x}", input_address),
"0x0",
"0xa",
],
}),
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"result": format!("{:#x}", input_storage_value),
}),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
node.inner
.write()
.map(|mut writer| {
let historical_block = Block::<TransactionVariant> {
hash: H256::repeat_byte(0x2),
number: U64::from(2),
..Default::default()
};
writer.block_hashes.insert(2, historical_block.hash);
writer
.previous_states
.insert(historical_block.hash, Default::default());
writer
.blocks
.insert(historical_block.hash, historical_block);
})
.expect("failed setting storage for historical block");
let actual_value = node
.get_storage(
input_address,
U256::zero(),
Some(zksync_types::api::BlockIdVariant::BlockNumberObject(
BlockNumberObject {
block_number: BlockNumber::Number(U64::from(2)),
},
)),
)
.await
.expect("failed retrieving storage");
assert_eq!(input_storage_value, actual_value);
}
#[tokio::test]
async fn test_get_storage_fetches_state_for_deployed_smart_contract_in_current_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let private_key = H256::repeat_byte(0xef);
let from_account = zksync_types::PackedEthSignature::address_from_private_key(&private_key)
.expect("failed generating address");
node.set_rich_account(from_account);
let deployed_address = deployed_address_create(from_account, U256::zero());
testing::deploy_contract(
&node,
H256::repeat_byte(0x1),
private_key,
hex::decode(testing::STORAGE_CONTRACT_BYTECODE).unwrap(),
None,
Nonce(0),
);
let number1 = node
.get_storage(deployed_address, U256::from(0), None)
.await
.expect("failed retrieving storage at slot 0");
assert_eq!(U256::from(1024), h256_to_u256(number1));
let number2 = node
.get_storage(deployed_address, U256::from(1), None)
.await
.expect("failed retrieving storage at slot 1");
assert_eq!(U256::MAX, h256_to_u256(number2));
}
#[tokio::test]
async fn test_get_storage_fetches_state_for_deployed_smart_contract_in_old_block() {
let node = InMemoryNode::<HttpForkSource>::default();
let private_key = H256::repeat_byte(0xef);
let from_account = zksync_types::PackedEthSignature::address_from_private_key(&private_key)
.expect("failed generating address");
node.set_rich_account(from_account);
let deployed_address = deployed_address_create(from_account, U256::zero());
let initial_block_hash = testing::deploy_contract(
&node,
H256::repeat_byte(0x1),
private_key,
hex::decode(testing::STORAGE_CONTRACT_BYTECODE).unwrap(),
None,
Nonce(0),
);
testing::apply_tx(&node, H256::repeat_byte(0x2));
let key = StorageKey::new(
AccountTreeId::new(deployed_address),
u256_to_h256(U256::from(0)),
);
node.inner
.write()
.unwrap()
.fork_storage
.inner
.write()
.unwrap()
.raw_storage
.state
.insert(key, u256_to_h256(U256::from(512)));
let number1_current = node
.get_storage(deployed_address, U256::from(0), None)
.await
.expect("failed retrieving storage at slot 0");
assert_eq!(U256::from(512), h256_to_u256(number1_current));
let number1_old = node
.get_storage(
deployed_address,
U256::from(0),
Some(zksync_types::api::BlockIdVariant::BlockHashObject(
BlockHashObject {
block_hash: initial_block_hash,
},
)),
)
.await
.expect("failed retrieving storage at slot 0");
assert_eq!(U256::from(1024), h256_to_u256(number1_old));
}
#[tokio::test]
async fn test_get_filter_logs_returns_matching_logs_for_valid_id() {
let node = InMemoryNode::<HttpForkSource>::default();
{
let mut writer = node.inner.write().unwrap();
writer.tx_results.insert(
H256::repeat_byte(0x1),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: TransactionReceipt {
logs: vec![LogBuilder::new()
.set_address(H160::repeat_byte(0xa1))
.build()],
..Default::default()
},
debug: default_tx_debug_info(),
},
);
writer.tx_results.insert(
H256::repeat_byte(0x2),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: TransactionReceipt {
logs: vec![
LogBuilder::new()
.set_address(H160::repeat_byte(0xa1))
.build(),
LogBuilder::new()
.set_address(H160::repeat_byte(0xa2))
.build(),
],
..Default::default()
},
debug: default_tx_debug_info(),
},
);
}
let filter_id = node
.new_filter(Filter {
address: Some(ValueOrArray(vec![H160::repeat_byte(0xa1)])),
..Default::default()
})
.await
.expect("failed creating filter");
match node
.get_filter_logs(filter_id)
.await
.expect("failed getting filter changes")
{
FilterChanges::Logs(result) => assert_eq!(2, result.len()),
changes => panic!("unexpected filter changes: {:?}", changes),
}
}
#[tokio::test]
async fn test_get_filter_logs_returns_error_for_invalid_id() {
let node = InMemoryNode::<HttpForkSource>::default();
{
let mut writer = node.inner.write().unwrap();
writer.tx_results.insert(
H256::repeat_byte(0x1),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: TransactionReceipt {
logs: vec![LogBuilder::new()
.set_address(H160::repeat_byte(0xa1))
.build()],
..Default::default()
},
debug: default_tx_debug_info(),
},
);
}
let invalid_filter_id = U256::from(100);
let result = node.get_filter_logs(invalid_filter_id).await;
assert!(result.is_err(), "expected an error for invalid filter id");
}
#[tokio::test]
async fn test_get_logs_returns_matching_logs() {
let node = InMemoryNode::<HttpForkSource>::default();
{
let mut writer = node.inner.write().unwrap();
writer.tx_results.insert(
H256::repeat_byte(0x1),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: TransactionReceipt {
logs: vec![LogBuilder::new()
.set_address(H160::repeat_byte(0xa1))
.build()],
..Default::default()
},
debug: testing::default_tx_debug_info(),
},
);
writer.tx_results.insert(
H256::repeat_byte(0x2),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: TransactionReceipt {
logs: vec![
LogBuilder::new()
.set_address(H160::repeat_byte(0xa1))
.build(),
LogBuilder::new()
.set_address(H160::repeat_byte(0xa2))
.build(),
],
..Default::default()
},
debug: testing::default_tx_debug_info(),
},
);
}
let result = node
.get_logs(Filter {
address: Some(ValueOrArray(vec![H160::repeat_byte(0xa2)])),
..Default::default()
})
.await
.expect("failed getting filter changes");
assert_eq!(1, result.len());
let result = node
.get_logs(Filter {
address: Some(ValueOrArray(vec![H160::repeat_byte(0xa1)])),
..Default::default()
})
.await
.expect("failed getting filter changes");
assert_eq!(2, result.len());
let result = node
.get_logs(Filter {
address: Some(ValueOrArray(vec![H160::repeat_byte(0x11)])),
..Default::default()
})
.await
.expect("failed getting filter changes");
assert_eq!(0, result.len());
}
#[tokio::test]
async fn test_accounts() {
let node = InMemoryNode::<HttpForkSource>::default();
let private_key = H256::repeat_byte(0x01);
let from_account = PackedEthSignature::address_from_private_key(&private_key).unwrap();
node.set_rich_account(from_account);
let account_result = node.accounts().await;
let expected_accounts: Vec<H160> = vec![from_account];
match account_result {
Ok(accounts) => {
assert_eq!(expected_accounts, accounts);
}
Err(e) => {
panic!("Failed to fetch accounts: {:?}", e);
}
}
}
#[tokio::test]
async fn test_snapshot() {
let node = InMemoryNode::<HttpForkSource>::default();
let mut inner = node.inner.write().unwrap();
inner
.blocks
.insert(H256::repeat_byte(0x1), Default::default());
inner.block_hashes.insert(1, H256::repeat_byte(0x1));
inner.tx_results.insert(
H256::repeat_byte(0x1),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: Default::default(),
debug: testing::default_tx_debug_info(),
},
);
inner.current_batch = 1;
inner.current_miniblock = 1;
inner.current_miniblock_hash = H256::repeat_byte(0x1);
inner.current_timestamp = 1;
inner
.filters
.add_block_filter()
.expect("failed adding block filter");
inner.impersonated_accounts.insert(H160::repeat_byte(0x1));
inner.rich_accounts.insert(H160::repeat_byte(0x1));
inner
.previous_states
.insert(H256::repeat_byte(0x1), Default::default());
inner.fork_storage.set_value(
StorageKey::new(AccountTreeId::new(H160::repeat_byte(0x1)), H256::zero()),
H256::repeat_byte(0x1),
);
let storage = inner.fork_storage.inner.read().unwrap();
let expected_snapshot = Snapshot {
current_timestamp: inner.current_timestamp.clone(),
current_batch: inner.current_batch.clone(),
current_miniblock: inner.current_miniblock.clone(),
current_miniblock_hash: inner.current_miniblock_hash.clone(),
l1_gas_price: inner.l1_gas_price.clone(),
tx_results: inner.tx_results.clone(),
blocks: inner.blocks.clone(),
block_hashes: inner.block_hashes.clone(),
filters: inner.filters.clone(),
impersonated_accounts: inner.impersonated_accounts.clone(),
rich_accounts: inner.rich_accounts.clone(),
previous_states: inner.previous_states.clone(),
raw_storage: storage.raw_storage.clone(),
value_read_cache: storage.value_read_cache.clone(),
factory_dep_cache: storage.factory_dep_cache.clone(),
};
let actual_snapshot = inner.snapshot().expect("failed taking snapshot");
assert_eq!(
expected_snapshot.current_timestamp,
actual_snapshot.current_timestamp
);
assert_eq!(
expected_snapshot.current_batch,
actual_snapshot.current_batch
);
assert_eq!(
expected_snapshot.current_miniblock,
actual_snapshot.current_miniblock
);
assert_eq!(
expected_snapshot.current_miniblock_hash,
actual_snapshot.current_miniblock_hash
);
assert_eq!(expected_snapshot.l1_gas_price, actual_snapshot.l1_gas_price);
assert_eq!(
expected_snapshot.tx_results.keys().collect_vec(),
actual_snapshot.tx_results.keys().collect_vec()
);
assert_eq!(expected_snapshot.blocks, actual_snapshot.blocks);
assert_eq!(expected_snapshot.block_hashes, actual_snapshot.block_hashes);
assert_eq!(expected_snapshot.filters, actual_snapshot.filters);
assert_eq!(
expected_snapshot.impersonated_accounts,
actual_snapshot.impersonated_accounts
);
assert_eq!(
expected_snapshot.rich_accounts,
actual_snapshot.rich_accounts
);
assert_eq!(
expected_snapshot.previous_states,
actual_snapshot.previous_states
);
assert_eq!(expected_snapshot.raw_storage, actual_snapshot.raw_storage);
assert_eq!(
expected_snapshot.value_read_cache,
actual_snapshot.value_read_cache
);
assert_eq!(
expected_snapshot.factory_dep_cache,
actual_snapshot.factory_dep_cache
);
}
#[tokio::test]
async fn test_snapshot_restore() {
let node = InMemoryNode::<HttpForkSource>::default();
let mut inner = node.inner.write().unwrap();
inner
.blocks
.insert(H256::repeat_byte(0x1), Default::default());
inner.block_hashes.insert(1, H256::repeat_byte(0x1));
inner.tx_results.insert(
H256::repeat_byte(0x1),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: Default::default(),
debug: testing::default_tx_debug_info(),
},
);
inner.current_batch = 1;
inner.current_miniblock = 1;
inner.current_miniblock_hash = H256::repeat_byte(0x1);
inner.current_timestamp = 1;
inner
.filters
.add_block_filter()
.expect("failed adding block filter");
inner.impersonated_accounts.insert(H160::repeat_byte(0x1));
inner.rich_accounts.insert(H160::repeat_byte(0x1));
inner
.previous_states
.insert(H256::repeat_byte(0x1), Default::default());
inner.fork_storage.set_value(
StorageKey::new(AccountTreeId::new(H160::repeat_byte(0x1)), H256::zero()),
H256::repeat_byte(0x1),
);
let expected_snapshot = {
let storage = inner.fork_storage.inner.read().unwrap();
Snapshot {
current_timestamp: inner.current_timestamp.clone(),
current_batch: inner.current_batch.clone(),
current_miniblock: inner.current_miniblock.clone(),
current_miniblock_hash: inner.current_miniblock_hash.clone(),
l1_gas_price: inner.l1_gas_price.clone(),
tx_results: inner.tx_results.clone(),
blocks: inner.blocks.clone(),
block_hashes: inner.block_hashes.clone(),
filters: inner.filters.clone(),
impersonated_accounts: inner.impersonated_accounts.clone(),
rich_accounts: inner.rich_accounts.clone(),
previous_states: inner.previous_states.clone(),
raw_storage: storage.raw_storage.clone(),
value_read_cache: storage.value_read_cache.clone(),
factory_dep_cache: storage.factory_dep_cache.clone(),
}
};
let snapshot = inner.snapshot().expect("failed taking snapshot");
inner
.blocks
.insert(H256::repeat_byte(0x2), Default::default());
inner.block_hashes.insert(2, H256::repeat_byte(0x2));
inner.tx_results.insert(
H256::repeat_byte(0x2),
TransactionResult {
info: testing::default_tx_execution_info(),
receipt: Default::default(),
debug: default_tx_debug_info(),
},
);
inner.current_batch = 2;
inner.current_miniblock = 2;
inner.current_miniblock_hash = H256::repeat_byte(0x2);
inner.current_timestamp = 2;
inner
.filters
.add_pending_transaction_filter()
.expect("failed adding pending transaction filter");
inner.impersonated_accounts.insert(H160::repeat_byte(0x2));
inner.rich_accounts.insert(H160::repeat_byte(0x2));
inner
.previous_states
.insert(H256::repeat_byte(0x2), Default::default());
inner.fork_storage.set_value(
StorageKey::new(AccountTreeId::new(H160::repeat_byte(0x2)), H256::zero()),
H256::repeat_byte(0x2),
);
inner
.restore_snapshot(snapshot)
.expect("failed restoring snapshot");
let storage = inner.fork_storage.inner.read().unwrap();
assert_eq!(expected_snapshot.current_timestamp, inner.current_timestamp);
assert_eq!(expected_snapshot.current_batch, inner.current_batch);
assert_eq!(expected_snapshot.current_miniblock, inner.current_miniblock);
assert_eq!(
expected_snapshot.current_miniblock_hash,
inner.current_miniblock_hash
);
assert_eq!(expected_snapshot.l1_gas_price, inner.l1_gas_price);
assert_eq!(
expected_snapshot.tx_results.keys().collect_vec(),
inner.tx_results.keys().collect_vec()
);
assert_eq!(expected_snapshot.blocks, inner.blocks);
assert_eq!(expected_snapshot.block_hashes, inner.block_hashes);
assert_eq!(expected_snapshot.filters, inner.filters);
assert_eq!(
expected_snapshot.impersonated_accounts,
inner.impersonated_accounts
);
assert_eq!(expected_snapshot.rich_accounts, inner.rich_accounts);
assert_eq!(expected_snapshot.previous_states, inner.previous_states);
assert_eq!(expected_snapshot.raw_storage, storage.raw_storage);
assert_eq!(expected_snapshot.value_read_cache, storage.value_read_cache);
assert_eq!(
expected_snapshot.factory_dep_cache,
storage.factory_dep_cache
);
}
#[tokio::test]
async fn test_get_transaction_by_block_hash_and_index_returns_none_for_invalid_block_hash() {
let node = InMemoryNode::<HttpForkSource>::default();
let input_tx_hash = H256::repeat_byte(0x01);
let (input_block_hash, _) = testing::apply_tx(&node, input_tx_hash);
let invalid_block_hash = H256::repeat_byte(0xab);
assert_ne!(input_block_hash, invalid_block_hash);
let result = node
.get_transaction_by_block_hash_and_index(invalid_block_hash, U64::from(0))
.await
.expect("failed fetching transaction");
assert!(result.is_none());
}
#[tokio::test]
async fn test_get_transaction_by_block_hash_and_index_returns_none_for_invalid_index() {
let node = InMemoryNode::<HttpForkSource>::default();
let input_tx_hash = H256::repeat_byte(0x01);
let (input_block_hash, _) = testing::apply_tx(&node, input_tx_hash);
let result = node
.get_transaction_by_block_hash_and_index(input_block_hash, U64::from(10))
.await
.expect("failed fetching transaction");
assert!(result.is_none());
}
#[tokio::test]
async fn test_get_transaction_by_block_hash_and_index_returns_transaction_for_valid_input() {
let node = InMemoryNode::<HttpForkSource>::default();
let input_tx_hash = H256::repeat_byte(0x01);
let (input_block_hash, _) = testing::apply_tx(&node, input_tx_hash);
let actual_tx = node
.get_transaction_by_block_hash_and_index(input_block_hash, U64::from(0))
.await
.expect("failed fetching transaction")
.expect("no transaction");
assert_eq!(input_tx_hash, actual_tx.hash);
}
#[tokio::test]
async fn test_get_transaction_by_block_hash_and_index_fetches_full_transaction_for_hash_from_fork(
) {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
transaction_count: 0,
hash: H256::repeat_byte(0xab),
});
let input_block_hash = H256::repeat_byte(0x01);
let input_tx_hash = H256::repeat_byte(0x02);
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getTransactionByHash",
"params": [
format!("{:#x}", input_tx_hash),
],
}),
TransactionResponseBuilder::new()
.set_hash(input_tx_hash)
.set_block_hash(input_block_hash)
.set_block_number(U64::from(1))
.build(),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
{
let mut writer = node.inner.write().unwrap();
writer.blocks.insert(
input_block_hash,
Block {
transactions: vec![TransactionVariant::Hash(input_tx_hash)],
..Default::default()
},
);
}
let actual_tx = node
.get_transaction_by_block_hash_and_index(input_block_hash, U64::from(0))
.await
.expect("failed fetching transaction")
.expect("no transaction");
assert_eq!(input_tx_hash, actual_tx.hash);
assert_eq!(Some(U64::from(1)), actual_tx.block_number);
}
#[tokio::test]
async fn test_get_transaction_by_block_hash_and_index_fetches_from_fork_if_block_missing() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
transaction_count: 0,
hash: H256::repeat_byte(0xab),
});
let input_block_hash = H256::repeat_byte(0x01);
let input_tx_hash = H256::repeat_byte(0x02);
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getTransactionByBlockHashAndIndex",
"params": [
format!("{:#x}", input_block_hash),
"0x1"
],
}),
TransactionResponseBuilder::new()
.set_hash(input_tx_hash)
.set_block_hash(input_block_hash)
.set_block_number(U64::from(100))
.build(),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_tx = node
.get_transaction_by_block_hash_and_index(input_block_hash, U64::from(1))
.await
.expect("failed fetching transaction")
.expect("no transaction");
assert_eq!(input_tx_hash, actual_tx.hash);
assert_eq!(Some(U64::from(100)), actual_tx.block_number);
}
#[tokio::test]
async fn test_get_transaction_by_block_number_and_index_returns_none_for_invalid_block_number()
{
let node = InMemoryNode::<HttpForkSource>::default();
let input_tx_hash = H256::repeat_byte(0x01);
let (input_block_hash, _) = testing::apply_tx(&node, input_tx_hash);
let invalid_block_hash = H256::repeat_byte(0xab);
assert_ne!(input_block_hash, invalid_block_hash);
let result = node
.get_transaction_by_block_number_and_index(
BlockNumber::Number(U64::from(100)),
U64::from(0),
)
.await
.expect("failed fetching transaction");
assert!(result.is_none());
}
#[tokio::test]
async fn test_get_transaction_by_block_number_and_index_returns_none_for_invalid_index() {
let node = InMemoryNode::<HttpForkSource>::default();
let input_tx_hash = H256::repeat_byte(0x01);
testing::apply_tx(&node, input_tx_hash);
let result = node
.get_transaction_by_block_number_and_index(BlockNumber::Latest, U64::from(10))
.await
.expect("failed fetching transaction");
assert!(result.is_none());
}
#[tokio::test]
async fn test_get_transaction_by_block_number_and_index_returns_transaction_for_valid_input() {
let node = InMemoryNode::<HttpForkSource>::default();
let input_tx_hash = H256::repeat_byte(0x01);
let (_, input_block_number) = testing::apply_tx(&node, input_tx_hash);
let actual_tx = node
.get_transaction_by_block_number_and_index(
BlockNumber::Number(input_block_number),
U64::from(0),
)
.await
.expect("failed fetching transaction")
.expect("no transaction");
assert_eq!(input_tx_hash, actual_tx.hash);
}
#[tokio::test]
async fn test_get_transaction_by_block_number_and_index_fetches_full_transaction_for_hash_from_fork(
) {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
transaction_count: 0,
hash: H256::repeat_byte(0xab),
});
let input_block_hash = H256::repeat_byte(0x01);
let input_block_number = U64::from(100);
let input_tx_hash = H256::repeat_byte(0x02);
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getTransactionByHash",
"params": [
format!("{:#x}", input_tx_hash),
],
}),
TransactionResponseBuilder::new()
.set_hash(input_tx_hash)
.set_block_hash(input_block_hash)
.set_block_number(input_block_number)
.build(),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
{
let mut writer = node.inner.write().unwrap();
writer
.block_hashes
.insert(input_block_number.as_u64(), input_block_hash);
writer.blocks.insert(
input_block_hash,
Block {
transactions: vec![TransactionVariant::Hash(input_tx_hash)],
..Default::default()
},
);
}
let actual_tx = node
.get_transaction_by_block_number_and_index(
BlockNumber::Number(input_block_number),
U64::from(0),
)
.await
.expect("failed fetching transaction")
.expect("no transaction");
assert_eq!(input_tx_hash, actual_tx.hash);
assert_eq!(Some(U64::from(input_block_number)), actual_tx.block_number);
}
#[tokio::test]
async fn test_get_transaction_by_block_number_and_index_fetches_from_fork_if_block_missing() {
let mock_server = MockServer::run_with_config(ForkBlockConfig {
number: 10,
transaction_count: 0,
hash: H256::repeat_byte(0xab),
});
let input_block_hash = H256::repeat_byte(0x01);
let input_block_number = U64::from(100);
let input_tx_hash = H256::repeat_byte(0x02);
mock_server.expect(
serde_json::json!({
"jsonrpc": "2.0",
"id": 0,
"method": "eth_getTransactionByBlockNumberAndIndex",
"params": [
format!("{:#x}", input_block_number),
"0x1"
],
}),
TransactionResponseBuilder::new()
.set_hash(input_tx_hash)
.set_block_hash(input_block_hash)
.set_block_number(U64::from(input_block_number))
.build(),
);
let node = InMemoryNode::<HttpForkSource>::new(
Some(ForkDetails::from_network(&mock_server.url(), None, CacheConfig::None).await),
crate::node::ShowCalls::None,
ShowStorageLogs::None,
ShowVMDetails::None,
ShowGasDetails::None,
false,
&system_contracts::Options::BuiltIn,
);
let actual_tx = node
.get_transaction_by_block_number_and_index(
BlockNumber::Number(input_block_number),
U64::from(1),
)
.await
.expect("failed fetching transaction")
.expect("no transaction");
assert_eq!(input_tx_hash, actual_tx.hash);
assert_eq!(Some(input_block_number), actual_tx.block_number);
}
#[tokio::test]
async fn test_protocol_version_returns_currently_supported_version() {
let node = InMemoryNode::<HttpForkSource>::default();
let expected_version = String::from(PROTOCOL_VERSION);
let actual_version = node
.protocol_version()
.await
.expect("failed creating filter");
assert_eq!(expected_version, actual_version);
}
}