use crate::instructions::{self, Instruction};
use bit_set::BitSet;
use cfx_types::H256;
use keccak_hash::KECCAK_EMPTY;
use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
use memory_cache::MemoryLruCache;
use parking_lot::Mutex;
use std::sync::Arc;
#[cfg(test)]
use rustc_hex::FromHex;
const DEFAULT_CACHE_SIZE: usize = 4 * 1024 * 1024;
#[derive(Clone)]
struct Bits(Arc<BitSet>);
impl MallocSizeOf for Bits {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
self.0.capacity() * 8
}
}
#[derive(Clone)]
struct CacheItem {
jump_destination: Bits,
sub_entrypoint: Bits,
}
impl MallocSizeOf for CacheItem {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.jump_destination.size_of(ops) + self.sub_entrypoint.size_of(ops)
}
}
pub struct SharedCache<const CANCUN: bool> {
jump_destinations: Mutex<MemoryLruCache<H256, CacheItem>>,
}
impl<const CANCUN: bool> SharedCache<CANCUN> {
pub fn new(max_size: usize) -> Self {
SharedCache {
jump_destinations: Mutex::new(MemoryLruCache::new(max_size)),
}
}
pub fn jump_and_sub_destinations(
&self, code_hash: &H256, code: &[u8],
) -> (Arc<BitSet>, Arc<BitSet>) {
if code_hash == &KECCAK_EMPTY {
let cache_item = Self::find_jump_and_sub_destinations(code);
return (
cache_item.jump_destination.0,
cache_item.sub_entrypoint.0,
);
}
if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) {
return (d.jump_destination.0.clone(), d.sub_entrypoint.0.clone());
}
let d = Self::find_jump_and_sub_destinations(code);
self.jump_destinations.lock().insert(*code_hash, d.clone());
(d.jump_destination.0, d.sub_entrypoint.0)
}
fn find_jump_and_sub_destinations(code: &[u8]) -> CacheItem {
let mut jump_dests = BitSet::with_capacity(code.len());
let mut sub_entrypoints = BitSet::with_capacity(code.len());
let mut position = 0;
while position < code.len() {
let instruction = Instruction::from_u8(code[position]);
if let Some(instruction) = instruction {
match instruction {
instructions::JUMPDEST => {
jump_dests.insert(position);
}
instructions::BEGINSUB_TLOAD if !CANCUN => {
sub_entrypoints.insert(position);
}
_ => {
if let Some(push_bytes) = instruction.push_bytes() {
position += push_bytes;
}
}
}
}
position += 1;
}
jump_dests.shrink_to_fit();
CacheItem {
jump_destination: Bits(Arc::new(jump_dests)),
sub_entrypoint: Bits(Arc::new(sub_entrypoints)),
}
}
}
impl<const CANCUN: bool> Default for SharedCache<CANCUN> {
fn default() -> Self { SharedCache::new(DEFAULT_CACHE_SIZE) }
}
#[test]
fn test_find_jump_destinations() {
let code: Vec<u8> = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap();
let cache_item =
SharedCache::<false>::find_jump_and_sub_destinations(&code);
assert!(cache_item
.jump_destination
.0
.iter()
.eq(vec![66].into_iter()));
assert!(cache_item.sub_entrypoint.0.is_empty());
}
#[test]
fn test_find_jump_destinations_not_in_data_segments() {
let code: Vec<u8> = "600656605B565B6004".from_hex().unwrap();
let cache_item =
SharedCache::<false>::find_jump_and_sub_destinations(&code);
assert!(cache_item.jump_destination.0.iter().eq(vec![6].into_iter()));
assert!(cache_item.sub_entrypoint.0.is_empty());
}
#[test]
fn test_find_sub_entrypoints() {
let code: Vec<u8> =
"6800000000000000000c5e005c60115e5d5c5d".from_hex().unwrap();
let cache_item =
SharedCache::<false>::find_jump_and_sub_destinations(&code);
assert!(cache_item.jump_destination.0.is_empty());
assert!(cache_item
.sub_entrypoint
.0
.iter()
.eq(vec![12, 17].into_iter()));
}
#[test]
fn test_find_jump_and_sub_allowing_unknown_opcodes() {
assert!(Instruction::from_u8(0xcc) == None);
let code: Vec<u8> = "5BCC5C".from_hex().unwrap();
let cache_item =
SharedCache::<false>::find_jump_and_sub_destinations(&code);
assert!(cache_item.jump_destination.0.iter().eq(vec![0].into_iter()));
assert!(cache_item.sub_entrypoint.0.iter().eq(vec![2].into_iter()));
}