cfxcore/consensus/consensus_inner/
mod.rs

1// Copyright 2019 Conflux Foundation. All rights reserved.
2// Conflux is free software and distributed under GNU General Public License.
3// See http://www.gnu.org/licenses/
4
5mod blame_verifier;
6pub mod confirmation_meter;
7pub mod consensus_executor;
8pub mod consensus_new_block_handler;
9use cfxcore_errors::ProviderBlockError;
10use cfxcore_pow as pow;
11
12use pow::{PowComputer, ProofOfWorkConfig};
13
14use crate::{
15    block_data_manager::{
16        BlockDataManager, BlockExecutionResultWithEpoch, DataVersionTuple,
17        EpochExecutionContext,
18    },
19    consensus::{
20        anticone_cache::AnticoneCache,
21        consensus_inner::consensus_executor::ConsensusExecutor,
22        debug_recompute::log_invalid_state_root, pastset_cache::PastSetCache,
23        pos_handler::PosVerifier,
24    },
25    pos::pow_handler::POS_TERM_EPOCHS,
26    state_exposer::{ConsensusGraphBlockExecutionState, STATE_EXPOSER},
27    verification::VerificationConfig,
28};
29use cfx_internal_common::{
30    consensus_api::StateMaintenanceTrait, EpochExecutionCommitment,
31};
32use cfx_parameters::{consensus::*, consensus_internal::*};
33use cfx_types::{H256, U256, U512};
34use dag::{
35    get_future, topological_sort, Graph, RichDAG, RichTreeGraph, TreeGraph, DAG,
36};
37use hashbrown::HashMap as FastHashMap;
38use hibitset::{BitSet, BitSetLike, DrainableBitSet};
39use link_cut_tree::{CaterpillarMinLinkCutTree, SizeMinLinkCutTree};
40use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
41use malloc_size_of_derive::MallocSizeOf as DeriveMallocSizeOf;
42use metrics::{Counter, CounterUsize};
43use primitives::{
44    pos::PosBlockId, Block, BlockHeader, BlockHeaderBuilder, EpochId,
45};
46use slab::Slab;
47use std::{
48    cmp::{max, min},
49    collections::{BinaryHeap, HashMap, HashSet, VecDeque},
50    convert::TryFrom,
51    mem,
52    sync::Arc,
53};
54lazy_static! {
55    static ref INVALID_BLAME_OR_STATE_ROOT_COUNTER: Arc<dyn Counter<usize>> =
56        CounterUsize::register_with_group(
57            "system_metrics",
58            "invalid_blame_or_state_root_count"
59        );
60}
61
62#[derive(Clone)]
63pub struct ConsensusInnerConfig {
64    /// Beta is the threshold in GHAST algorithm
65    pub adaptive_weight_beta: u64,
66    /// The heavy block ratio (h) in GHAST algorithm
67    pub heavy_block_difficulty_ratio: u64,
68    /// The timer block ratio in timer chain algorithm
69    pub timer_chain_block_difficulty_ratio: u64,
70    /// The timer chain beta ratio
71    pub timer_chain_beta: u64,
72    /// The number of epochs per era. Each era is a potential checkpoint
73    /// position. The parent_edge checking and adaptive checking are defined
74    /// relative to the era start blocks.
75    pub era_epoch_count: u64,
76    /// Optimistic execution is the feature to execute ahead of the deferred
77    /// execution boundary. The goal is to pipeline the transaction
78    /// execution and the block packaging and verification.
79    /// optimistic_executed_height is the number of step to go ahead
80    pub enable_optimistic_execution: bool,
81    /// Control whether we enable the state exposer for the testing purpose.
82    pub enable_state_expose: bool,
83    /// The deferred epoch count before a confirmed epoch.
84    pub pos_pivot_decision_defer_epoch_count: u64,
85
86    pub cip113_pivot_decision_defer_epoch_count: u64,
87    pub cip113_transition_height: u64,
88
89    /// If we hit invalid state root, we will dump the information into a
90    /// directory specified here. This is useful for testing.
91    pub debug_dump_dir_invalid_state_root: Option<String>,
92    pub debug_invalid_state_root_epoch: Option<H256>,
93    pub force_recompute_height_during_construct_pivot: Option<u64>,
94    pub recovery_latest_mpt_snapshot: bool,
95    pub use_isolated_db_for_mpt_table: bool,
96}
97
98impl ConsensusInnerConfig {
99    pub fn pos_pivot_decision_defer_epoch_count(
100        &self, confirmed_height: u64,
101    ) -> u64 {
102        if confirmed_height >= self.cip113_transition_height {
103            self.cip113_pivot_decision_defer_epoch_count
104        } else {
105            self.pos_pivot_decision_defer_epoch_count
106        }
107    }
108}
109
110#[derive(Copy, Clone, DeriveMallocSizeOf)]
111pub struct StateBlameInfo {
112    pub blame: u32,
113    pub state_vec_root: H256,
114    pub receipts_vec_root: H256,
115    pub logs_bloom_vec_root: H256,
116}
117
118/// ConsensusGraphNodeData contains all extra information of a block that will
119/// change as the consensus graph state evolves (e.g., pivot chain changes).
120/// Unlike the ConsensusGraphNode fields, fields in ConsensusGraphNodeData will
121/// only be available after the block is *preactivated* (after calling
122/// preactivate_block().
123#[derive(DeriveMallocSizeOf)]
124pub struct ConsensusGraphNodeData {
125    /// It indicates the epoch number of the block, i.e., the height of the
126    /// corresponding pivot chain block of this one
127    pub epoch_number: u64,
128    /// It indicates whether the block is partial invalid or not. A block
129    /// is partial invalid if it selects an incorrect parent or filling an
130    /// incorrect adaptive field.
131    partial_invalid: bool,
132    /// It indicates whether the block is pending or not. A block is pending if
133    /// the consensus engine determines that it is not necessary to determine
134    /// its partial invalid status.
135    pending: bool,
136    /// This is a special counter marking whether the block is active or not.
137    /// A block is active only if the counter is zero
138    /// A partial invalid block will get a NULL counter
139    /// A normal block which referenced directly or indirectly will have a
140    /// positive counter
141    inactive_dependency_cnt: usize,
142    /// This is an implementation flag indicate whether the node is active or
143    /// not. Because multiple blocks may have their `inactive_dependency_cnt`
144    /// turning zero in the same time, we need this flag to process them
145    /// correctly one by one.
146    activated: bool,
147    /// This records the force confirm point in the past view of this block.
148    force_confirm: usize,
149    /// The indices set of the blocks in the epoch when the current
150    /// block is as pivot chain block. This set does not contain
151    /// the block itself.
152    blockset_in_own_view_of_epoch: Vec<usize>,
153    /// Ordered executable blocks in this epoch. This filters out blocks that
154    /// are not in the same era of the epoch pivot block.
155    ///
156    /// For cur_era_genesis, this field should NOT be used because they contain
157    /// out-of-era blocks not maintained in the memory.
158    ordered_executable_epoch_blocks: Vec<usize>,
159    /// If an epoch has more than ``EPOCH_EXECUTED_BLOCK_BOUND''. We will only
160    /// execute the last ``EPOCH_EXECUTED_BLOCK_BOUND'' and skip the
161    /// remaining. The `skipped_epoch_blocks` also contain those blocks that
162    /// are not in the same era of the pivot block.
163    /// We use the block hashes instead of block arena indices here to ensure
164    /// the consistency with the database after making a checkpoint.
165    skipped_epoch_blocks: Vec<H256>,
166    /// It indicates whether `blockset_in_own_view_of_epoch` and
167    /// `skipped_epoch_blocks` are cleared due to its size.
168    blockset_cleared: bool,
169    /// The sequence number is used to identify the order of each block
170    /// entering the consensus. The sequence number of the genesis is used
171    /// by the syncronization layer to determine whether a block exists in
172    /// the consensus or not.
173    sequence_number: u64,
174    /// The longest chain of all timer blocks.
175    past_view_timer_longest_difficulty: i128,
176    /// The last timer block index in the chain.
177    past_view_last_timer_block_arena_index: usize,
178    /// The height of the closest timer block in the longest timer chain.
179    /// Note that this only considers the current longest timer chain and
180    /// ignores the remaining timer blocks.
181    ledger_view_timer_chain_height: u64,
182    /// vote_valid_lca_height indicates the fork_at height that the vote_valid
183    /// field corresponds to.
184    vote_valid_lca_height: u64,
185    /// It indicates whether the blame voting information of this block is
186    /// correct or not.
187    vote_valid: bool,
188    /// It denotes the height of the last pivot chain in the past set of this
189    /// block.
190    last_pivot_in_past: u64,
191    /// It indicates whether the states stored in header is correct or not.
192    /// It's evaluated when needed, i.e., when we need the blame information to
193    /// generate a new block or to compute rewards.
194    pub state_valid: Option<bool>,
195    /// It stores the correct blame info for the block if its state is invalid.
196    /// It's evaluated when needed and acts as a cache.
197    blame_info: Option<StateBlameInfo>,
198}
199
200impl ConsensusGraphNodeData {
201    fn new(
202        epoch_number: u64, sequence_number: u64, inactive_dependency_cnt: usize,
203    ) -> Self {
204        ConsensusGraphNodeData {
205            epoch_number,
206            partial_invalid: false,
207            pending: false,
208            inactive_dependency_cnt,
209            activated: false,
210            force_confirm: NULL,
211            blockset_in_own_view_of_epoch: Default::default(),
212            ordered_executable_epoch_blocks: Default::default(),
213            skipped_epoch_blocks: Default::default(),
214            blockset_cleared: true,
215            sequence_number,
216            past_view_timer_longest_difficulty: 0,
217            past_view_last_timer_block_arena_index: NULL,
218            ledger_view_timer_chain_height: 0,
219            vote_valid_lca_height: NULLU64,
220            vote_valid: true,
221            last_pivot_in_past: 0,
222            state_valid: None,
223            blame_info: None,
224        }
225    }
226}
227
228#[derive(DeriveMallocSizeOf)]
229struct ConsensusGraphPivotData {
230    /// The set of blocks whose last_pivot_in_past point to this pivot chain
231    /// location
232    last_pivot_in_past_blocks: HashSet<usize>,
233    /// The total weight of the past set of the pivot block. This value
234    /// is used by the confirmation meter.
235    past_weight: i128,
236}
237
238impl Default for ConsensusGraphPivotData {
239    fn default() -> Self {
240        ConsensusGraphPivotData {
241            last_pivot_in_past_blocks: HashSet::new(),
242            past_weight: 0,
243        }
244    }
245}
246
247/// # Implementation details of Eras, Timer chain and Checkpoints
248///
249/// Era in Conflux is defined based on the height of a block. Every
250/// epoch_block_count height corresponds to one era. For example, if
251/// era_block_count is 50000, then blocks at height 0 (the original genesis)
252/// is the era genesis of the first era. The blocks at height 50000 are era
253/// genesis blocks of the following era. Note that it is possible to have
254/// multiple era genesis blocks for one era period. Eventually, only
255/// one era genesis block and its subtree will become dominant and all other
256/// genesis blocks together with their subtrees will be discarded. The
257/// definition of Era enables Conflux to form checkpoints at the stabilized
258/// era genesis blocks.
259///
260/// # Implementation details of the Timer chain
261///
262/// Timer chain contains special blocks whose PoW qualities are significantly
263/// higher than normal blocks. The goal of timer chain is to enable a slowly
264/// growing longest chain to indicate the time elapsed between two blocks.
265/// Timer chain also provides a force confirmation rule which will enable us
266/// to safely form the checkpoint.
267///
268/// Any block whose PoW quality is timer_chain_block_difficulty_ratio times
269/// higher than its supposed difficulty is *timer block*. The longest chain of
270/// timer blocks (counting both parent edges and reference edges) is the timer
271/// chain. When timer_chain_beta is large enough, malicious attackers can
272/// neither control the timer chain nor stop its growth. We use Timer(G) to
273/// denote the number of timer chain blocks in G. We use TimerDis(b_1, b_2) to
274/// denote Timer(Past(B_1)) - Timer(Past(B_2)). In case that b_2 \in
275/// Future(b_1), TimerDis(b_1, b_2) is a good indicator about how long it has
276/// past between the generation of the two blocks.
277///
278/// A block b in G is considered force-confirm if 1) there are *consecutively*
279/// timer_chain_beta timer chain blocks under the subtree of b and 2) there are
280/// at least timer_chain_beta blocks after these blocks (not necessarily in the
281/// subtree of b). Force-confirm rule overrides any GHAST weight rule, i.e.,
282/// new blocks will always be generated under b.
283///
284///
285/// # Implementation details of the GHAST algorithm
286///
287/// Conflux uses the Greedy Heaviest Adaptive SubTree (GHAST) algorithm to
288/// select a chain from the genesis block to one of the leaf blocks as the pivot
289/// chain. For each block b, GHAST algorithm computes it is adaptive
290///
291/// ```python
292/// B = Past(b)
293/// f is the force confirm point of b in the view of Past(b)
294/// a = b.parent
295/// adaptive = False
296/// Let f(x) = 2 * SubTW(B, x) - SubTW(B, x.parent) + x.parent.weight
297/// Let g(x) = adaptive_weight_beta * b.diff
298/// while a != force_confirm do
299///     if TimerDis(a, b) >= timer_chain_beta and f(a) < g(a) then
300///         adaptive = True
301///     a = a.parent
302/// ```
303///
304/// To efficiently compute adaptive, we maintain a link-cut tree called
305/// adaptive_weight_tree. The value for x in the link-cut-tree is
306/// 2 * SubTW(B, x) + x.parent.weight - SubTW(B, x.parent). Note that we need to
307/// do special caterpillar update in the Link-Cut-Tree, i.e., given a node X, we
308/// need to update the values of all of those nodes A such that A is the child
309/// of one of the node in the path from Genesis to X.
310///
311/// For an adaptive block, its weights will be calculated in a special way. If
312/// its PoW quality is adaptive_heavy_weight_ratio times higher than the normal
313/// difficulty, its weight will be adaptive_heavy_weight_ratio instead of one.
314/// Otherwise, the weight will be zero. The goal of adaptive weight is to deal
315/// with potential liveness attacks that balance two subtrees. Note that when
316/// computing adaptive we only consider the nodes after force_confirm.
317///
318/// # Implementation details of partial invalid blocks
319///
320/// One block may become partial invalid because 1) it chooses incorrect parent
321/// or 2) it generates an adaptive block when it should not. In normal
322/// situations, we should verify every block we receive and determine whether it
323/// is partial invalid or not. For a partial invalid block b, it will not
324/// receive any reward. Normal nodes will also refrain from *directly or
325/// indirectly* referencing b until TimerDis(*b*, new_block) is greater than or
326/// equal to timer_dis_delta. Normal nodes essentially ignores partial invalid
327/// blocks for a while. We implement this via our inactive_dependency_cnt field.
328/// Last but not least, we exclude *partial invalid* blocks from the timer chain
329/// consideration. They are not timer blocks!
330///
331/// # Implementation details of checkpoints
332///
333/// Our consensus engine will form a checkpoint pair (a, b) given a DAG state G
334/// if:
335///
336/// 1) b is force confirmed in G
337/// 2) a is force confirmed in Past(b)
338///
339/// Now we are safe to remove all blocks that are not in Future(a). For those
340/// blocks that are in the Future(a) but not in Subtree(a), we can also redirect
341/// a as their parents. We call *a* the cur_era_genesis_block and *b* the
342/// cur_era_stable_block.
343///
344/// We no longer need to check the partial invalid block which does not
345/// referencing b (directly and indirectly), because such block would never go
346/// into the timer chain. Our assumption is that the timer chain will not reorg
347/// on a length greater than timer_chain_beta. For those blocks which
348/// referencing *b* but also not under the subtree of a, they are by default
349/// partial invalid. We can ignore them as well. Therefore *a* can be treated as
350/// a new genesis block. We are going to check the possibility of making
351/// checkpoints only at the era boundary.
352///
353/// Note that we have the assumption that the force confirmation point will
354/// always move along parental edges, i.e., it is not possible for the point
355/// to move to a sibling tree. This assumption is true if the timer_chain_beta
356/// and the timer_chain_difficulty_ratio are set to large enough values.
357///
358/// # Introduction of blaming mechanism
359///
360/// Blaming is used to provide proof for state root of a specific pivot block.
361/// The rationale behind is as follows. Verifying state roots of blocks off
362/// pivot chain is very costly and sometimes impractical, e.g., when the block
363/// refers to another block that is not in the current era. It is preferred to
364/// avoid this verification if possible. Normally, Conflux only needs to store
365/// correct state root in header of pivot block to provide proof for light node.
366/// However, the pivot chain may oscillate at the place close to ledger tail,
367/// which means that a block that is off pivot at some point may become pivot
368/// block in the future. If we do not verify the state root in the header of
369/// that block, when it becomes a pivot block later, we cannot guarantee the
370/// correctness of the state root in its header. Therefore, if we do not verify
371/// the state root in off-pivot block, we cannot guarantee the correctness of
372/// state root in pivot block. Of course, one may argue that you can switch
373/// pivot chain when incorrect state root in pivot block is observed. However,
374/// this makes the check for the correct parent selection rely on state root
375/// checking. Then, since Conflux is an inclusive protocol which adopts
376/// off-pivot blocks in its final ledger, it needs to verify the correctness of
377/// parent selection of off-pivot blocks and this relies on the state
378/// verification on all the parent candidates of the off-pivot blocks.
379/// Therefore, this eventually will lead to state root verification on any
380/// blocks including off-pivot ones. This violates the original goal of saving
381/// cost of the state root verification in off-pivot blocks.
382///
383/// We therefore allow incorrect state root in pivot block header, and use the
384/// blaming mechanism to enable the proof generation of the correct state root.
385/// A full/archive node verifies the deferred state root and the blaming
386/// information stored in the header of each pivot block. It blames the blocks
387/// with incorrect information and stores the blaming result in the header of
388/// the newly mined block. The blaming result is simply a count which represents
389/// the distance (in the number of blocks) between the last correct block on the
390/// pivot chain and the newly mined block. For example, consider the blocks
391/// Bi-1, Bi, Bi+1, Bi+2, Bi+3. Assume the blaming count in Bi+3 is 2.
392/// This means when Bi+3 was mined, the node thinks Bi's information is correct,
393/// while the information in Bi+1 and Bi+2 are wrong. Therefore, the node
394/// recovers the true deferred state roots (DSR) of Bi+1, Bi+2, and Bi+3 by
395/// computing locally, and then computes keccak(DSRi+3, keccak(DSRi+2, DSRi+1))
396/// and stores the hash into the header of Bi+3 as its final deferred
397/// state root. A special case is if the blaming count is 0, the final deferred
398/// state root of the block is simply the original deferred state root, i.e.,
399/// DSRi+3 for block Bi+3 in the above case.
400///
401/// Computing the reward for a block relies on correct blaming behavior of
402/// the block. If the block is a pivot block when computing its reward,
403/// it is required that:
404///
405/// 1. the block correctly chooses its parent;
406/// 2. the block contains the correct deferred state root;
407/// 3. the block correctly blames all its previous blocks following parent
408///    edges.
409///
410/// If the block is an off-pivot block when computing its reward,
411/// it is required that:
412/// 1. the block correctly chooses its parent;
413/// 2. the block correctly blames the blocks in the intersection of pivot chain
414///    blocks and all its previous blocks following parent edges. (This is to
415///    encourage the node generating the off-pivot block to keep verifying pivot
416///    chain blocks.)
417///
418/// To provide proof of state root to light node (or a full node when it tries
419/// to recover from a checkpoint), the protocol goes through the following
420/// steps. Let's assume the verifier has a subtree of block headers which
421/// includes the block whose state root is to be verified.
422///
423/// 1. The verifier node gets a merkle path whose merkle root corresponds
424/// to the state root after executing block Bi. Let's call it the path root
425/// which is to be verified.
426///
427/// 2. Assume deferred count is 2, the verifier node gets block header Bi+2
428/// whose deferred state root should be the state root of Bi.
429///
430/// 3. The verifier node locally searches for the first block whose information
431/// in header is correct, starting from block Bi+2 along with the pivot
432/// chain. The correctness of header information of a block is decided based
433/// on the ratio of the number of blamers in the subtree of the block. If the
434/// ratio is small enough, the information is correct. Assume the first such
435/// block is block Bj.
436///
437/// 4. The verifier then searches backward along the pivot chain from Bj for
438/// the block whose blaming count is larger than or equal to the distance
439/// between block Bi+2 and it. Let's call this block as Bk.
440///
441/// 5. The verifier asks the prover which is a full or archive node to get the
442/// deferred state root of block Bk and its DSR vector, i.e., [..., DSRi+2,
443/// ...].
444///
445/// 6. The verifier verifies the accumulated keccak hash of [..., DSRi+2, ...]
446/// equals to deferred state root of Bk, and then verifies that DSRi+2 equals
447/// to the path root of Bi.
448///
449/// In ConsensusGraphInner, every block corresponds to a ConsensusGraphNode and
450/// each node has an internal index. This enables fast internal implementation
451/// to use integer index instead of H256 block hashes.
452pub struct ConsensusGraphInner {
453    /// data_man is the handle to access raw block data
454    pub data_man: Arc<BlockDataManager>,
455    pub pos_verifier: Arc<PosVerifier>,
456    pub inner_conf: ConsensusInnerConfig,
457    pub pow_config: ProofOfWorkConfig,
458    pub pow: Arc<PowComputer>,
459    //executor: Arc<ConsensusExecutor>,
460    /// This slab hold consensus graph node data and the array index is the
461    /// internal index.
462    pub arena: Slab<ConsensusGraphNode>,
463    /// indices maps block hash to internal index.
464    pub hash_to_arena_indices: FastHashMap<H256, usize>,
465    /// The current pivot chain indexes.
466    pivot_chain: Vec<usize>,
467    /// The metadata associated with each pivot chain block
468    pivot_chain_metadata: Vec<ConsensusGraphPivotData>,
469    /// The longest timer chain block indexes
470    timer_chain: Vec<usize>,
471    /// The accumulative LCA of timer_chain for consecutive
472    timer_chain_accumulative_lca: Vec<usize>,
473    /// The set of *graph* tips in the TreeGraph for mining.
474    /// Note that this set does not include non-active partial invalid blocks
475    terminal_hashes: HashSet<H256>,
476    /// The ``current'' era_genesis block index. It will start being the
477    /// original genesis. As time goes, it will move to future era genesis
478    /// checkpoint.
479    cur_era_genesis_block_arena_index: usize,
480    /// The height of the ``current'' era_genesis block
481    cur_era_genesis_height: u64,
482    /// The height of the ``stable'' era block, unless from the start, it is
483    /// always era_epoch_count higher than era_genesis_height
484    cur_era_stable_height: u64,
485    /// If this value is not none, then we are still expecting the initial
486    /// stable block to come. This value would equal to the expected hash of
487    /// the block.
488    cur_era_stable_block_hash: H256,
489    /// If this value is not none, then we are manually maintain the future set
490    /// of the expected stable block. We have to do this because during the
491    /// initial stage it may not be always on the pivot chain.
492    initial_stable_future: Option<BitSet>,
493    /// The timer chain height of the ``current'' era_genesis block
494    cur_era_genesis_timer_chain_height: u64,
495    /// The best timer chain difficulty and hash in the current graph
496    best_timer_chain_difficulty: i128,
497    best_timer_chain_hash: H256,
498
499    // TODO(lpl): This is initialized as cur_era_genesis for now.
500    // TODO(lpl): It's always used after being updated, so this should be okay.
501    /// The pivot decision of the best (the round is the largest) pos
502    /// reference.
503    best_pos_pivot_decision: (H256, u64),
504
505    /// weight_tree maintains the subtree weight of each node in the TreeGraph
506    weight_tree: SizeMinLinkCutTree,
507    /// adaptive_tree maintains 2 * SubStableTW(B, x) - SubTW(B, P(x)) +
508    /// Weight(P(x))
509    adaptive_tree: CaterpillarMinLinkCutTree,
510    /// A priority that holds for every non-active partial invalid block, the
511    /// timer chain stamp that will become valid
512    invalid_block_queue: BinaryHeap<(i128, usize)>,
513    /// It maintains the expected difficulty of the next local mined block.
514    pub current_difficulty: U256,
515    /// The cache to store Anticone information of each node. This could be
516    /// very large so we periodically remove old ones in the cache.
517    anticone_cache: AnticoneCache,
518    pastset_cache: PastSetCache,
519    sequence_number_of_block_entrance: u64,
520
521    /// This is a cache map to speed up the lca computation of terminals in the
522    /// best terminals call. The basic idea is that if no major
523    /// reorganization happens, then it could use the last results
524    /// instead of calling it again.
525    best_terminals_lca_height_cache: FastHashMap<usize, u64>,
526    /// This is to record the pivot chain reorganization height since the last
527    /// invocation of best_terminals()
528    best_terminals_reorg_height: u64,
529    /// This is a cache to record history of checking whether a block has timer
530    /// block in its anticone.
531    has_timer_block_in_anticone_cache: HashSet<usize>,
532
533    /// `true` before we enter `CacheUpSyncBlock`. We need to execute
534    /// transactions and process state if it's `false`.
535    header_only: bool,
536}
537
538impl MallocSizeOf for ConsensusGraphInner {
539    fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
540        self.arena.size_of(ops)
541            + self.hash_to_arena_indices.size_of(ops)
542            + self.pivot_chain.size_of(ops)
543            + self.pivot_chain_metadata.size_of(ops)
544            + self.timer_chain.size_of(ops)
545            + self.timer_chain_accumulative_lca.size_of(ops)
546            + self.terminal_hashes.size_of(ops)
547            + self.initial_stable_future.size_of(ops)
548            + self.weight_tree.size_of(ops)
549            + self.adaptive_tree.size_of(ops)
550            + self.invalid_block_queue.size_of(ops)
551            + self.pow_config.size_of(ops)
552            + self.data_man.size_of(ops)
553            + self.anticone_cache.size_of(ops)
554            + self.pastset_cache.size_of(ops)
555            + self.best_terminals_lca_height_cache.size_of(ops)
556            + self.best_terminals_reorg_height.size_of(ops)
557    }
558}
559
560#[derive(DeriveMallocSizeOf)]
561pub struct ConsensusGraphNode {
562    pub hash: H256,
563    pub height: u64,
564    pub parent: usize,
565    difficulty: U256,
566    is_heavy: bool,
567    is_timer: bool,
568    /// The total number of *executed* blocks in its past (not including self)
569    past_num_blocks: u64,
570    adaptive: bool,
571
572    /// The genesis arena index of the era that `self` is in.
573    ///
574    /// It is `NULL` if `self` is not in the subtree of `cur_era_genesis`.
575    era_block: usize,
576    children: Vec<usize>,
577    referrers: Vec<usize>,
578    referees: Vec<usize>,
579    /// data contains all extra information of a block that will change as the
580    /// consensus graph state evolves (e.g., pivot chain changes). Unlike the
581    /// above fields, this information will only be available after the
582    /// block is *preactivated* (after calling preactivate_block().
583    pub data: ConsensusGraphNodeData,
584}
585
586impl ConsensusGraphNode {
587    pub fn past_num_blocks(&self) -> u64 { self.past_num_blocks }
588
589    pub fn adaptive(&self) -> bool { self.adaptive }
590
591    pub fn pending(&self) -> bool { self.data.pending }
592
593    pub fn partial_invalid(&self) -> bool { self.data.partial_invalid }
594
595    pub fn era_block(&self) -> usize { self.era_block }
596}
597
598impl ConsensusGraphInner {
599    pub fn with_era_genesis(
600        pow_config: ProofOfWorkConfig, pow: Arc<PowComputer>,
601        pos_verifier: Arc<PosVerifier>, data_man: Arc<BlockDataManager>,
602        inner_conf: ConsensusInnerConfig, cur_era_genesis_block_hash: &H256,
603        cur_era_stable_block_hash: &H256,
604    ) -> Self {
605        let genesis_block_header = data_man
606            .block_header_by_hash(cur_era_genesis_block_hash)
607            .expect("genesis block header should exist here");
608        let cur_era_genesis_height = genesis_block_header.height();
609        let stable_block_header = data_man
610            .block_header_by_hash(cur_era_stable_block_hash)
611            .expect("stable genesis block header should exist here");
612        let cur_era_stable_height = stable_block_header.height();
613        let initial_difficulty = pow_config.initial_difficulty;
614        let mut inner = ConsensusGraphInner {
615            arena: Slab::new(),
616            hash_to_arena_indices: FastHashMap::new(),
617            pivot_chain: Vec::new(),
618            pivot_chain_metadata: Vec::new(),
619            timer_chain: Vec::new(),
620            timer_chain_accumulative_lca: Vec::new(),
621            terminal_hashes: Default::default(),
622            cur_era_genesis_block_arena_index: NULL,
623            cur_era_genesis_height,
624            cur_era_stable_height,
625            // Timer chain height is an internal number. We always start from
626            // zero.
627            cur_era_stable_block_hash: cur_era_stable_block_hash.clone(),
628            initial_stable_future: Some(BitSet::new()),
629            cur_era_genesis_timer_chain_height: 0,
630            best_timer_chain_difficulty: 0,
631            best_timer_chain_hash: Default::default(),
632            best_pos_pivot_decision: (
633                *cur_era_genesis_block_hash,
634                cur_era_genesis_height,
635            ),
636            weight_tree: SizeMinLinkCutTree::new(),
637            adaptive_tree: CaterpillarMinLinkCutTree::new(),
638            invalid_block_queue: BinaryHeap::new(),
639            pow_config,
640            pow,
641            current_difficulty: initial_difficulty.into(),
642            data_man: data_man.clone(),
643            pos_verifier,
644            inner_conf,
645            anticone_cache: AnticoneCache::new(),
646            pastset_cache: Default::default(),
647            sequence_number_of_block_entrance: 0,
648            best_terminals_lca_height_cache: Default::default(),
649            best_terminals_reorg_height: NULLU64,
650            has_timer_block_in_anticone_cache: Default::default(),
651            header_only: true,
652        };
653
654        // NOTE: Only genesis block will be first inserted into consensus graph
655        // and then into synchronization graph. All the other blocks will be
656        // inserted first into synchronization graph then consensus graph.
657        // For genesis block, its past weight is simply zero (default value).
658        let (genesis_arena_index, _) = inner.insert(&genesis_block_header);
659        if cur_era_genesis_block_hash == cur_era_stable_block_hash {
660            inner
661                .initial_stable_future
662                .as_mut()
663                .unwrap()
664                .add(genesis_arena_index as u32);
665        }
666        inner.arena[genesis_arena_index].data.blockset_cleared = false;
667        if genesis_block_header.height() == 0 {
668            inner.arena[genesis_arena_index].data.state_valid = Some(true);
669        }
670        inner.cur_era_genesis_block_arena_index = genesis_arena_index;
671        inner.arena[genesis_arena_index].data.activated = true;
672        let genesis_block_weight = genesis_block_header.difficulty().low_u128();
673        inner
674            .weight_tree
675            .make_tree(inner.cur_era_genesis_block_arena_index);
676        inner.weight_tree.path_apply(
677            inner.cur_era_genesis_block_arena_index,
678            genesis_block_weight as i128,
679        );
680        inner
681            .adaptive_tree
682            .make_tree(inner.cur_era_genesis_block_arena_index);
683        // The genesis node can be zero in adaptive_tree because it is never
684        // used!
685        inner
686            .adaptive_tree
687            .set(inner.cur_era_genesis_block_arena_index, 0);
688        inner.arena[inner.cur_era_genesis_block_arena_index]
689            .data
690            .epoch_number = cur_era_genesis_height;
691        let genesis_epoch_size = inner
692            .data_man
693            .executed_epoch_set_hashes_from_db(cur_era_genesis_height)
694            .expect("Genesis epoch set should be in data manager.")
695            .len();
696        inner.arena[inner.cur_era_genesis_block_arena_index].past_num_blocks =
697            inner
698                .data_man
699                .get_epoch_execution_context(cur_era_genesis_block_hash)
700                .expect("ExecutionContext for cur_era_genesis exists")
701                .start_block_number
702                + genesis_epoch_size as u64
703                - 1;
704        inner.arena[inner.cur_era_genesis_block_arena_index]
705            .data
706            .last_pivot_in_past = cur_era_genesis_height;
707        inner
708            .pivot_chain
709            .push(inner.cur_era_genesis_block_arena_index);
710        let mut last_pivot_in_past_blocks = HashSet::new();
711        last_pivot_in_past_blocks
712            .insert(inner.cur_era_genesis_block_arena_index);
713        inner.pivot_chain_metadata.push(ConsensusGraphPivotData {
714            last_pivot_in_past_blocks,
715            past_weight: genesis_block_weight as i128,
716        });
717        if inner.arena[inner.cur_era_genesis_block_arena_index].is_timer {
718            inner
719                .timer_chain
720                .push(inner.cur_era_genesis_block_arena_index);
721        }
722        inner.arena[inner.cur_era_genesis_block_arena_index]
723            .data
724            .ledger_view_timer_chain_height = 0;
725        inner.best_timer_chain_difficulty =
726            inner.get_timer_difficulty(inner.cur_era_genesis_block_arena_index);
727
728        inner
729            .anticone_cache
730            .update(inner.cur_era_genesis_block_arena_index, &BitSet::new());
731
732        inner
733    }
734
735    fn persist_epoch_set_hashes(&mut self, pivot_index: usize) {
736        let height = self.pivot_index_to_height(pivot_index);
737        let arena_index = self.pivot_chain[pivot_index];
738        let epoch_set_hashes = self
739            .get_ordered_executable_epoch_blocks(arena_index)
740            .iter()
741            .map(|arena_index| self.arena[*arena_index].hash)
742            .collect();
743        let skipped_set_hashes = self
744            .get_or_compute_skipped_epoch_blocks(arena_index)
745            .clone();
746        self.data_man
747            .insert_executed_epoch_set_hashes_to_db(height, &epoch_set_hashes);
748        self.data_man
749            .insert_skipped_epoch_set_hashes_to_db(height, &skipped_set_hashes);
750    }
751
752    #[inline]
753    pub fn current_era_genesis_seq_num(&self) -> u64 {
754        self.arena[self.cur_era_genesis_block_arena_index]
755            .data
756            .sequence_number
757    }
758
759    #[inline]
760    /// The caller should ensure that `height` is within the current
761    /// `self.pivot_chain` range. Otherwise the function may panic.
762    pub fn get_pivot_block_arena_index(&self, height: u64) -> usize {
763        let pivot_index = (height - self.cur_era_genesis_height) as usize;
764        assert!(pivot_index < self.pivot_chain.len());
765        self.pivot_chain[pivot_index]
766    }
767
768    #[inline]
769    pub fn get_pivot_height(&self) -> u64 {
770        self.cur_era_genesis_height + self.pivot_chain.len() as u64
771    }
772
773    #[inline]
774    pub fn height_to_pivot_index(&self, height: u64) -> usize {
775        (height - self.cur_era_genesis_height) as usize
776    }
777
778    #[inline]
779    pub fn pivot_index_to_height(&self, pivot_index: usize) -> u64 {
780        self.cur_era_genesis_height + pivot_index as u64
781    }
782
783    #[inline]
784    fn get_next_sequence_number(&mut self) -> u64 {
785        let sn = self.sequence_number_of_block_entrance;
786        self.sequence_number_of_block_entrance += 1;
787        sn
788    }
789
790    #[inline]
791    pub fn set_initial_sequence_number(&mut self, initial_sn: u64) {
792        self.arena[self.cur_era_genesis_block_arena_index]
793            .data
794            .sequence_number = initial_sn;
795        self.sequence_number_of_block_entrance = initial_sn + 1;
796    }
797
798    #[inline]
799    fn is_heavier(a: (i128, &H256), b: (i128, &H256)) -> bool {
800        (a.0 > b.0) || ((a.0 == b.0) && (*a.1 > *b.1))
801    }
802
803    #[inline]
804    fn ancestor_at(&self, me: usize, height: u64) -> usize {
805        let height_index = self.height_to_pivot_index(height);
806        self.weight_tree.ancestor_at(me, height_index)
807    }
808
809    #[inline]
810    /// for outside era block, consider the lca is NULL
811    fn lca(&self, me: usize, v: usize) -> usize {
812        if self.arena[v].era_block == NULL || self.arena[me].era_block == NULL {
813            return NULL;
814        }
815        self.weight_tree.lca(me, v)
816    }
817
818    #[inline]
819    fn get_era_genesis_height(&self, parent_height: u64) -> u64 {
820        parent_height / self.inner_conf.era_epoch_count
821            * self.inner_conf.era_epoch_count
822    }
823
824    #[inline]
825    pub fn get_cur_era_genesis_height(&self) -> u64 {
826        self.cur_era_genesis_height
827    }
828
829    #[inline]
830    fn get_era_genesis_block_with_parent(&self, parent: usize) -> usize {
831        if parent == NULL {
832            return 0;
833        }
834        let height = self.arena[parent].height;
835        let era_genesis_height = self.get_era_genesis_height(height);
836        trace!(
837            "height={} era_height={} era_genesis_height={}",
838            height,
839            era_genesis_height,
840            self.cur_era_genesis_height
841        );
842        self.ancestor_at(parent, era_genesis_height)
843    }
844
845    #[inline]
846    pub fn get_epoch_block_hashes(
847        &self, epoch_arena_index: usize,
848    ) -> Vec<H256> {
849        self.get_ordered_executable_epoch_blocks(epoch_arena_index)
850            .iter()
851            .map(|idx| self.arena[*idx].hash)
852            .collect()
853    }
854
855    #[inline]
856    fn get_epoch_start_block_number(&self, epoch_arena_index: usize) -> u64 {
857        let parent = self.arena[epoch_arena_index].parent;
858
859        return self.arena[parent].past_num_blocks + 1;
860    }
861
862    #[inline]
863    fn is_legacy_block(&self, index: usize) -> bool {
864        self.arena[index].era_block == NULL
865    }
866
867    /// This function computes the epoch set of block *pivot* based on
868    /// the past set of block *lca* which is the parental ancestor of
869    /// *pivot*. The algorithm processes the blocks along the parental
870    /// path from *lca* to *pivot* to produce the *visited* block set,
871    /// and then compute the epoch block set of *pivot* based on the
872    /// *pastset* and the *visited*. The following figure illustrates
873    /// this process. B[lca] refers to the block *lca*, B[piv] refers
874    /// to the block *pivot*, and B[par] refers to the parent of *pivot*
875    ///
876    /// I    ------------\-------------------\-------------------\
877    /// I        lca      \                   \       epoch       \
878    /// I    -- pastset -B[lca]-- visited --B[par]-- blockset --B[piv]
879    /// I                 /                   /                   /
880    /// I    ------------/-------------------/-------------------/
881    fn compute_blockset_in_own_view_of_epoch_impl(
882        &mut self, lca: usize, pivot: usize,
883    ) {
884        let pastset = self.pastset_cache.get(lca).unwrap();
885        let mut path_to_lca = Vec::new();
886        let mut cur = pivot;
887        while cur != lca {
888            path_to_lca.push(cur);
889            cur = self.arena[cur].parent;
890        }
891        path_to_lca.reverse();
892        let mut visited = BitSet::new();
893        for ancestor_arena_index in path_to_lca {
894            visited.add(ancestor_arena_index as u32);
895            if ancestor_arena_index == pivot
896                || self.arena[ancestor_arena_index].data.blockset_cleared
897            {
898                let mut queue = VecDeque::new();
899                for referee in &self.arena[ancestor_arena_index].referees {
900                    if !pastset.contains(*referee as u32)
901                        && !visited.contains(*referee as u32)
902                    {
903                        visited.add(*referee as u32);
904                        queue.push_back(*referee);
905                    }
906                }
907                while let Some(index) = queue.pop_front() {
908                    if ancestor_arena_index == pivot {
909                        self.arena[pivot]
910                            .data
911                            .blockset_in_own_view_of_epoch
912                            .push(index);
913                    }
914                    let parent = self.arena[index].parent;
915                    if parent != NULL
916                        && !pastset.contains(parent as u32)
917                        && !visited.contains(parent as u32)
918                    {
919                        visited.add(parent as u32);
920                        queue.push_back(parent);
921                    }
922                    for referee in &self.arena[index].referees {
923                        if !pastset.contains(*referee as u32)
924                            && !visited.contains(*referee as u32)
925                        {
926                            visited.add(*referee as u32);
927                            queue.push_back(*referee);
928                        }
929                    }
930                }
931            } else {
932                for index in &self.arena[ancestor_arena_index]
933                    .data
934                    .blockset_in_own_view_of_epoch
935                {
936                    visited.add(*index as u32);
937                }
938            }
939        }
940    }
941
942    /// This function computes the epoch block set under the view of
943    /// block *pivot*. It also computes the ordered set of executable
944    /// blocks in the epoch. This set has a bound specified by
945    /// EPOCH_EXECUTED_BLOCK_BOUND. To compute this set, it first
946    /// filters out the blocks in the raw epoch set but not in the
947    /// same era with *pivot*. It then topologically sorts the retained
948    /// blocks and preserves at most the last EPOCH_EXECUTED_BLOCK_BOUND
949    /// blocks. All the filtered-out blocks are added into
950    /// *skipped_epoch_blocks*.
951    fn compute_blockset_in_own_view_of_epoch(&mut self, pivot: usize) {
952        if !self.arena[pivot].data.blockset_cleared {
953            return;
954        }
955        // TODO: consider the speed for recovery from db
956        let parent = self.arena[pivot].parent;
957        if parent != NULL {
958            let last = *self.pivot_chain.last().unwrap();
959            let lca = self.lca(last, parent);
960            assert!(lca != NULL);
961            if self.pastset_cache.get_and_update_cache(lca).is_none() {
962                let pastset = self.compute_pastset_brutal(lca);
963                self.pastset_cache.update(lca, pastset);
964            }
965            self.compute_blockset_in_own_view_of_epoch_impl(lca, pivot);
966        }
967
968        let mut filtered_blockset = HashSet::new();
969        let mut different_era_blocks = Vec::new();
970        for idx in &self.arena[pivot].data.blockset_in_own_view_of_epoch {
971            if self.is_same_era(*idx, pivot) {
972                filtered_blockset.insert(*idx);
973            } else {
974                different_era_blocks.push(*idx);
975            }
976        }
977
978        let mut ordered_executable_epoch_blocks = self
979            .topological_sort_with_order_indicator(filtered_blockset, |i| {
980                self.arena[i].hash
981            });
982        ordered_executable_epoch_blocks.push(pivot);
983        let skipped_epoch_block_indices = if ordered_executable_epoch_blocks
984            .len()
985            > EPOCH_EXECUTED_BLOCK_BOUND
986        {
987            let cut_off = ordered_executable_epoch_blocks.len()
988                - EPOCH_EXECUTED_BLOCK_BOUND;
989            let mut skipped_epoch_block_indices =
990                ordered_executable_epoch_blocks;
991            ordered_executable_epoch_blocks =
992                skipped_epoch_block_indices.split_off(cut_off);
993            skipped_epoch_block_indices.append(&mut different_era_blocks);
994            skipped_epoch_block_indices
995        } else {
996            different_era_blocks
997        };
998
999        self.arena[pivot].data.skipped_epoch_blocks =
1000            skipped_epoch_block_indices
1001                .into_iter()
1002                .map(|i| self.arena[i].hash)
1003                .collect();
1004        self.arena[pivot].data.ordered_executable_epoch_blocks =
1005            ordered_executable_epoch_blocks;
1006        self.arena[pivot].data.blockset_cleared = false;
1007    }
1008
1009    #[inline]
1010    fn exchange_or_compute_blockset_in_own_view_of_epoch(
1011        &mut self, index: usize, blockset_opt: Option<Vec<usize>>,
1012    ) -> Vec<usize> {
1013        if let Some(blockset) = blockset_opt {
1014            mem::replace(
1015                &mut self.arena[index].data.blockset_in_own_view_of_epoch,
1016                blockset,
1017            )
1018        } else {
1019            if self.arena[index].data.blockset_cleared {
1020                self.compute_blockset_in_own_view_of_epoch(index);
1021            }
1022            mem::replace(
1023                &mut self.arena[index].data.blockset_in_own_view_of_epoch,
1024                Default::default(),
1025            )
1026        }
1027    }
1028
1029    #[inline]
1030    pub fn get_ordered_executable_epoch_blocks(
1031        &self, index: usize,
1032    ) -> &Vec<usize> {
1033        &self.arena[index].data.ordered_executable_epoch_blocks
1034    }
1035
1036    #[inline]
1037    pub fn get_or_compute_skipped_epoch_blocks(
1038        &mut self, index: usize,
1039    ) -> &Vec<H256> {
1040        if self.arena[index].data.blockset_cleared {
1041            self.compute_blockset_in_own_view_of_epoch(index);
1042        }
1043        &self.arena[index].data.skipped_epoch_blocks
1044    }
1045
1046    #[inline]
1047    pub fn get_skipped_epoch_blocks(&self, index: usize) -> Option<&Vec<H256>> {
1048        if self.arena[index].data.blockset_cleared {
1049            None
1050        } else {
1051            Some(&self.arena[index].data.skipped_epoch_blocks)
1052        }
1053    }
1054
1055    fn get_blame(&self, arena_index: usize) -> u32 {
1056        let block_header = self
1057            .data_man
1058            .block_header_by_hash(&self.arena[arena_index].hash)
1059            .unwrap();
1060        block_header.blame()
1061    }
1062
1063    fn get_blame_with_pivot_index(&self, pivot_index: usize) -> u32 {
1064        let arena_index = self.pivot_chain[pivot_index];
1065        self.get_blame(arena_index)
1066    }
1067
1068    pub fn find_first_index_with_correct_state_of(
1069        &self, pivot_index: usize, blame_bound: Option<u32>,
1070        min_vote_count: usize,
1071    ) -> Option<usize> {
1072        // this is the earliest block we need to consider; blocks before `from`
1073        // cannot have any information about the state root of `pivot_index`
1074        let from = pivot_index + DEFERRED_STATE_EPOCH_COUNT as usize;
1075
1076        self.find_first_trusted_starting_from(from, blame_bound, min_vote_count)
1077    }
1078
1079    pub fn find_first_trusted_starting_from(
1080        &self, from: usize, blame_bound: Option<u32>, min_vote_count: usize,
1081    ) -> Option<usize> {
1082        let mut trusted_index = match self
1083            .find_first_with_trusted_blame_starting_from(
1084                from,
1085                blame_bound,
1086                min_vote_count,
1087            ) {
1088            None => return None,
1089            Some(index) => index,
1090        };
1091
1092        // iteratively search for the smallest trusted index greater than
1093        // or equal to `from`
1094        while trusted_index != from {
1095            let blame = self.get_blame_with_pivot_index(trusted_index);
1096            let prev_trusted = trusted_index - blame as usize - 1;
1097
1098            if prev_trusted < from {
1099                break;
1100            }
1101
1102            trusted_index = prev_trusted;
1103        }
1104
1105        Some(trusted_index)
1106    }
1107
1108    fn find_first_with_trusted_blame_starting_from(
1109        &self, pivot_index: usize, blame_bound: Option<u32>,
1110        min_vote_count: usize,
1111    ) -> Option<usize> {
1112        trace!(
1113            "find_first_with_trusted_blame_starting_from pivot_index={:?}",
1114            pivot_index
1115        );
1116        let mut cur_pivot_index = pivot_index;
1117        while cur_pivot_index < self.pivot_chain.len() {
1118            let arena_index = self.pivot_chain[cur_pivot_index];
1119            let blame_ratio = self.compute_blame_ratio(
1120                arena_index,
1121                blame_bound,
1122                min_vote_count,
1123            );
1124            trace!(
1125                "blame_ratio for {:?} is {}",
1126                self.arena[arena_index].hash,
1127                blame_ratio
1128            );
1129            if blame_ratio < MAX_BLAME_RATIO_FOR_TRUST {
1130                return Some(cur_pivot_index);
1131            }
1132            cur_pivot_index += 1;
1133        }
1134
1135        None
1136    }
1137
1138    // Compute the ratio of blames that the block gets
1139    fn compute_blame_ratio(
1140        &self, arena_index: usize, blame_bound: Option<u32>,
1141        min_vote_count: usize,
1142    ) -> f64 {
1143        let blame_bound = if let Some(bound) = blame_bound {
1144            bound
1145        } else {
1146            u32::max_value()
1147        };
1148        let mut total_blame_count = 0 as u64;
1149        let mut queue = VecDeque::new();
1150        let mut votes = HashMap::new();
1151        queue.push_back((arena_index, 0 as u32));
1152        while let Some((index, step)) = queue.pop_front() {
1153            if index == arena_index {
1154                votes.insert(index, true);
1155            } else {
1156                let mut my_blame = self.get_blame(index);
1157                let mut parent = index;
1158                loop {
1159                    parent = self.arena[parent].parent;
1160                    if my_blame == 0 {
1161                        let parent_vote = *votes.get(&parent).unwrap();
1162                        votes.insert(index, parent_vote);
1163                        if !parent_vote {
1164                            total_blame_count += 1;
1165                        }
1166                        break;
1167                    } else if parent == arena_index {
1168                        votes.insert(index, false);
1169                        total_blame_count += 1;
1170                        break;
1171                    }
1172                    my_blame -= 1;
1173                }
1174            }
1175
1176            if step == blame_bound {
1177                continue;
1178            }
1179
1180            let next_step = step + 1;
1181            for child in &self.arena[index].children {
1182                queue.push_back((*child, next_step));
1183            }
1184        }
1185
1186        let total_vote_count = votes.len();
1187
1188        if total_vote_count < min_vote_count {
1189            // if we do not have enough votes, we will signal 100% blame
1190            return 1.0;
1191        }
1192
1193        // TODO(thegaram): compute `total_vote_count` on non-past set,
1194        // not on future
1195        total_blame_count as f64 / total_vote_count as f64
1196    }
1197
1198    pub fn check_mining_adaptive_block(
1199        &mut self, parent_arena_index: usize, referee_indices: Vec<usize>,
1200        difficulty: U256, pos_reference: Option<PosBlockId>,
1201    ) -> bool {
1202        // We first compute anticone barrier for newly mined block
1203        let parent_anticone_opt = self.anticone_cache.get(parent_arena_index);
1204        let mut anticone;
1205        if parent_anticone_opt.is_none() {
1206            anticone = consensus_new_block_handler::ConsensusNewBlockHandler::compute_anticone_bruteforce(
1207                self, parent_arena_index,
1208            );
1209            for i in self.compute_future_bitset(parent_arena_index) {
1210                anticone.add(i);
1211            }
1212        } else {
1213            anticone = self.compute_future_bitset(parent_arena_index);
1214            for index in parent_anticone_opt.unwrap() {
1215                anticone.add(*index as u32);
1216            }
1217        }
1218        let mut my_past = BitSet::new();
1219        let mut queue: VecDeque<usize> = VecDeque::new();
1220        for index in &referee_indices {
1221            queue.push_back(*index);
1222        }
1223        while let Some(index) = queue.pop_front() {
1224            if my_past.contains(index as u32) {
1225                continue;
1226            }
1227            my_past.add(index as u32);
1228            let idx_parent = self.arena[index].parent;
1229            if idx_parent != NULL {
1230                if anticone.contains(idx_parent as u32)
1231                    || self.arena[idx_parent].era_block == NULL
1232                {
1233                    queue.push_back(idx_parent);
1234                }
1235            }
1236            for referee in &self.arena[index].referees {
1237                if anticone.contains(*referee as u32)
1238                    || self.arena[*referee].era_block == NULL
1239                {
1240                    queue.push_back(*referee);
1241                }
1242            }
1243        }
1244        for index in my_past.drain() {
1245            anticone.remove(index);
1246        }
1247
1248        let mut anticone_barrier = BitSet::new();
1249        for index in (&anticone).iter() {
1250            let parent = self.arena[index as usize].parent as u32;
1251            if self.arena[index as usize].era_block != NULL
1252                && !anticone.contains(parent)
1253            {
1254                anticone_barrier.add(index);
1255            }
1256        }
1257
1258        let timer_chain_tuple = self.compute_timer_chain_tuple(
1259            parent_arena_index,
1260            &referee_indices,
1261            Some(&anticone),
1262        );
1263
1264        self.adaptive_weight_impl(
1265            parent_arena_index,
1266            &anticone_barrier,
1267            None,
1268            &timer_chain_tuple,
1269            i128::try_from(difficulty.low_u128()).unwrap(),
1270            pos_reference,
1271        )
1272    }
1273
1274    /// This function computes the subtree weight for each node
1275    /// in the subtree of the past view of *me* by conducting
1276    /// DFS from the cur_era_genesis. The subtree to process
1277    /// is illustrated as follows:
1278    ///                     cur_era_genesis
1279    /// I                        / \
1280    /// I                      /     \
1281    /// I                    /  the    \
1282    /// I                  /   subtree   \
1283    /// I                /   to process    \
1284    /// I                \__             __/
1285    /// I                 a \__       __/ a
1286    /// I                    a \_____/ a
1287    /// I                      a me a
1288    ///
1289    /// *a* refers to the elements in anticone barrier who are
1290    /// anticone of *me* while whose parents are in the past set
1291    /// of *me*. The subtree to process does not include *a* and
1292    /// *me*.
1293    fn compute_subtree_weights(
1294        &self, me: usize, anticone_barrier: &BitSet,
1295    ) -> Vec<i128> {
1296        let mut subtree_weight = Vec::new();
1297        let n = self.arena.capacity();
1298        subtree_weight.resize_with(n, Default::default);
1299        let mut stack = Vec::new();
1300        stack.push((0, self.cur_era_genesis_block_arena_index));
1301        while let Some((stage, index)) = stack.pop() {
1302            if stage == 0 {
1303                stack.push((1, index));
1304                subtree_weight[index] = 0;
1305                for child in &self.arena[index].children {
1306                    if !anticone_barrier.contains(*child as u32) && *child != me
1307                    {
1308                        stack.push((0, *child));
1309                    }
1310                }
1311            } else {
1312                let weight = self.block_weight(index);
1313                subtree_weight[index] += weight;
1314                let parent = self.arena[index].parent;
1315                if parent != NULL {
1316                    subtree_weight[parent] += subtree_weight[index];
1317                }
1318            }
1319        }
1320        subtree_weight
1321    }
1322
1323    fn get_best_timer_tick(
1324        &self,
1325        timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1326    ) -> u64 {
1327        let (fork_at, _, _, c) = timer_chain_tuple;
1328        *fork_at + c.len() as u64
1329    }
1330
1331    fn get_timer_tick(
1332        &self, me: usize,
1333        timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1334    ) -> u64 {
1335        let (fork_at, m, _, _) = timer_chain_tuple;
1336        if let Some(t) = m.get(&me) {
1337            return *t;
1338        } else {
1339            assert!(
1340                self.arena[me].data.ledger_view_timer_chain_height <= *fork_at
1341            );
1342        }
1343        return self.arena[me].data.ledger_view_timer_chain_height;
1344    }
1345
1346    fn adaptive_weight_impl_brutal(
1347        &self, parent_0: usize, subtree_weight: &Vec<i128>,
1348        timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1349        force_confirm: usize, difficulty: i128,
1350    ) -> bool {
1351        let mut parent = parent_0;
1352
1353        let force_confirm_height = self.arena[force_confirm].height;
1354        let timer_me = self.get_best_timer_tick(timer_chain_tuple);
1355
1356        let adjusted_beta =
1357            (self.inner_conf.adaptive_weight_beta as i128) * difficulty;
1358
1359        let mut adaptive = false;
1360        while self.arena[parent].height != force_confirm_height {
1361            let grandparent = self.arena[parent].parent;
1362            let timer_parent = self.get_timer_tick(parent, timer_chain_tuple);
1363            assert!(timer_me >= timer_parent);
1364            if timer_me - timer_parent >= self.inner_conf.timer_chain_beta {
1365                let w = 2 * subtree_weight[parent]
1366                    - subtree_weight[grandparent]
1367                    + self.block_weight(grandparent);
1368                if w < adjusted_beta {
1369                    adaptive = true;
1370                    break;
1371                }
1372            }
1373            parent = grandparent;
1374        }
1375
1376        adaptive
1377    }
1378
1379    fn adaptive_weight_impl(
1380        &mut self, parent_0: usize, anticone_barrier: &BitSet,
1381        weight_tuple: Option<&Vec<i128>>,
1382        timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1383        difficulty: i128, pos_reference: Option<PosBlockId>,
1384    ) -> bool {
1385        let mut parent = parent_0;
1386        let force_confirm =
1387            self.compute_block_force_confirm(timer_chain_tuple, pos_reference);
1388        let force_confirm_height = self.arena[force_confirm].height;
1389        // This may happen if we are forced to generate at a position choosing
1390        // incorrect parent. We should return false here.
1391        if self.arena[parent].height < force_confirm_height
1392            || self.ancestor_at(parent, force_confirm_height) != force_confirm
1393        {
1394            return false;
1395        }
1396        if let Some(subtree_weight) = weight_tuple {
1397            return self.adaptive_weight_impl_brutal(
1398                parent_0,
1399                subtree_weight,
1400                timer_chain_tuple,
1401                force_confirm,
1402                difficulty,
1403            );
1404        }
1405
1406        let mut weight_delta = HashMap::new();
1407
1408        for index in anticone_barrier.iter() {
1409            assert!(!self.is_legacy_block(index as usize));
1410            weight_delta
1411                .insert(index as usize, self.weight_tree.get(index as usize));
1412        }
1413
1414        for (index, delta) in &weight_delta {
1415            self.weight_tree.path_apply(*index, -*delta);
1416            let parent = self.arena[*index].parent;
1417            assert!(parent != NULL);
1418            self.adaptive_tree.caterpillar_apply(parent, *delta);
1419            self.adaptive_tree.path_apply(*index, -*delta * 2);
1420        }
1421
1422        let timer_me = self.get_best_timer_tick(timer_chain_tuple);
1423        let adjusted_beta = self.inner_conf.timer_chain_beta;
1424
1425        let mut high = self.arena[parent].height;
1426        let mut low = force_confirm_height + 1;
1427        // [low, high]
1428        let mut best = force_confirm_height;
1429
1430        while low <= high {
1431            let mid = (low + high) / 2;
1432            let p = self.ancestor_at(parent, mid);
1433            let timer_mid = self.get_timer_tick(p, timer_chain_tuple);
1434            assert!(timer_me >= timer_mid);
1435            if timer_me - timer_mid >= adjusted_beta {
1436                best = mid;
1437                low = mid + 1;
1438            } else {
1439                high = mid - 1;
1440            }
1441        }
1442
1443        let adaptive = if best != force_confirm_height {
1444            parent = self.ancestor_at(parent, best);
1445
1446            let a = self
1447                .adaptive_tree
1448                .path_aggregate_chop(parent, force_confirm);
1449            let b = self.inner_conf.adaptive_weight_beta as i128 * difficulty;
1450
1451            if a < b {
1452                debug!("block is adaptive: {:?} < {:?}!", a, b);
1453            } else {
1454                debug!("block is not adaptive: {:?} >= {:?}!", a, b);
1455            }
1456            a < b
1457        } else {
1458            debug!(
1459                "block is not adaptive: too close to genesis, timer tick {:?}",
1460                timer_me
1461            );
1462            false
1463        };
1464
1465        for (index, delta) in &weight_delta {
1466            self.weight_tree.path_apply(*index, *delta);
1467            let parent = self.arena[*index].parent;
1468            self.adaptive_tree.caterpillar_apply(parent, -*delta);
1469            self.adaptive_tree.path_apply(*index, *delta * 2)
1470        }
1471
1472        adaptive
1473    }
1474
1475    /// Determine whether we should generate adaptive blocks or not. It is used
1476    /// both for block generations and for block validations.
1477    fn adaptive_weight(
1478        &mut self, me: usize, anticone_barrier: &BitSet,
1479        weight_tuple: Option<&Vec<i128>>,
1480        timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1481    ) -> bool {
1482        let parent = self.arena[me].parent;
1483        assert!(parent != NULL);
1484
1485        let difficulty =
1486            i128::try_from(self.arena[me].difficulty.low_u128()).unwrap();
1487
1488        self.adaptive_weight_impl(
1489            parent,
1490            anticone_barrier,
1491            weight_tuple,
1492            timer_chain_tuple,
1493            difficulty,
1494            self.data_man
1495                .pos_reference_by_hash(&self.arena[me].hash)
1496                .expect("header exist"),
1497        )
1498    }
1499
1500    #[inline]
1501    fn is_same_era(&self, me: usize, pivot: usize) -> bool {
1502        self.arena[me].era_block == self.arena[pivot].era_block
1503    }
1504
1505    fn compute_pastset_brutal(&mut self, me: usize) -> BitSet {
1506        let mut path = Vec::new();
1507        let mut cur = me;
1508        while cur != NULL && self.pastset_cache.get(cur).is_none() {
1509            path.push(cur);
1510            cur = self.arena[cur].parent;
1511        }
1512        path.reverse();
1513        let mut result = self
1514            .pastset_cache
1515            .get(cur)
1516            .unwrap_or(&BitSet::new())
1517            .clone();
1518        for ancestor_arena_index in path {
1519            result.add(ancestor_arena_index as u32);
1520            if self.arena[ancestor_arena_index].data.blockset_cleared {
1521                let mut queue = VecDeque::new();
1522                queue.push_back(ancestor_arena_index);
1523                while let Some(index) = queue.pop_front() {
1524                    let parent = self.arena[index].parent;
1525                    if parent != NULL && !result.contains(parent as u32) {
1526                        result.add(parent as u32);
1527                        queue.push_back(parent);
1528                    }
1529                    for referee in &self.arena[index].referees {
1530                        if !result.contains(*referee as u32) {
1531                            result.add(*referee as u32);
1532                            queue.push_back(*referee);
1533                        }
1534                    }
1535                }
1536            } else {
1537                let blockset = self
1538                    .exchange_or_compute_blockset_in_own_view_of_epoch(
1539                        ancestor_arena_index,
1540                        None,
1541                    );
1542                for index in &blockset {
1543                    result.add(*index as u32);
1544                }
1545                self.exchange_or_compute_blockset_in_own_view_of_epoch(
1546                    ancestor_arena_index,
1547                    Some(blockset),
1548                );
1549            }
1550        }
1551        result
1552    }
1553
1554    /// All referees should be in the anticone of each other.
1555    /// If there is a path from a referee `A` to another referee `B` by
1556    /// following edges towards parent/referees, we will treat `A` as a
1557    /// valid referee and ignore `B` because `B` is not the graph terminal.
1558    /// This should only happen if the miner generating this
1559    /// block is malicious. TODO: Explain why not `partial_invalid`?
1560    fn insert_referee_if_not_duplicate(
1561        &self, referees: &mut Vec<usize>, me: usize,
1562    ) {
1563        for i in 0..referees.len() {
1564            let x = referees[i];
1565            let lca = self.lca(x, me);
1566            if lca == me {
1567                // `me` is an ancestor of `x`
1568                return;
1569            } else if lca == x {
1570                // `x` is an ancestor of `me`
1571                referees[i] = me;
1572                return;
1573            }
1574        }
1575        referees.push(me)
1576    }
1577
1578    /// Try to insert an outside era block, return it's sequence number. If both
1579    /// it's parent and referees are empty, we will not insert it into
1580    /// `arena`.
1581    pub fn insert_out_era_block(
1582        &mut self, block_header: &BlockHeader, partial_invalid: bool,
1583    ) -> (u64, usize) {
1584        let sn = self.get_next_sequence_number();
1585        let hash = block_header.hash();
1586        // we make cur_era_genesis be it's parent if it doesn‘t has one.
1587        let parent = self
1588            .hash_to_arena_indices
1589            .get(block_header.parent_hash())
1590            .cloned()
1591            .unwrap_or(NULL);
1592
1593        let mut referees: Vec<usize> = Vec::new();
1594        for hash in block_header.referee_hashes().iter() {
1595            if let Some(x) = self.hash_to_arena_indices.get(hash) {
1596                self.insert_referee_if_not_duplicate(&mut referees, *x);
1597            }
1598        }
1599
1600        if parent == NULL && referees.is_empty() {
1601            return (sn, NULL);
1602        }
1603
1604        let mut inactive_dependency_cnt = 0;
1605        for referee in &referees {
1606            if !self.arena[*referee].data.activated {
1607                inactive_dependency_cnt += 1;
1608            }
1609        }
1610
1611        // actually, we only need these fields: `parent`, `referees`,
1612        // `children`, `referrers`, `era_block`
1613        let index = self.arena.insert(ConsensusGraphNode {
1614            hash,
1615            height: block_header.height(),
1616            is_heavy: true,
1617            difficulty: *block_header.difficulty(),
1618            past_num_blocks: 0,
1619            is_timer: false,
1620            // Block header contains an adaptive field, we will verify with our
1621            // own computation
1622            adaptive: block_header.adaptive(),
1623            parent,
1624            era_block: NULL,
1625            children: Vec::new(),
1626            referees,
1627            referrers: Vec::new(),
1628            data: ConsensusGraphNodeData::new(
1629                NULLU64,
1630                sn,
1631                inactive_dependency_cnt,
1632            ),
1633        });
1634        self.arena[index].data.pending = true;
1635        self.arena[index].data.activated = false;
1636        self.arena[index].data.partial_invalid = partial_invalid;
1637        self.hash_to_arena_indices.insert(hash, index);
1638
1639        let referees = self.arena[index].referees.clone();
1640        for referee in referees {
1641            self.arena[referee].referrers.push(index);
1642        }
1643        if parent != NULL {
1644            self.arena[parent].children.push(index);
1645        }
1646
1647        self.weight_tree.make_tree(index);
1648        self.adaptive_tree.make_tree(index);
1649
1650        (sn, index)
1651    }
1652
1653    fn get_timer_difficulty(&self, me: usize) -> i128 {
1654        if self.arena[me].is_timer && !self.arena[me].data.partial_invalid {
1655            i128::try_from(self.arena[me].difficulty.low_u128()).unwrap()
1656        } else {
1657            0
1658        }
1659    }
1660
1661    fn compute_global_force_confirm(&self) -> usize {
1662        let timer_chain_choice =
1663            if let Some(x) = self.timer_chain_accumulative_lca.last() {
1664                *x
1665            } else {
1666                self.cur_era_genesis_block_arena_index
1667            };
1668        self.compute_force_confirm(
1669            timer_chain_choice,
1670            &self.best_pos_pivot_decision,
1671        )
1672    }
1673
1674    fn compute_block_force_confirm(
1675        &self,
1676        timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1677        pos_reference: Option<PosBlockId>,
1678    ) -> usize {
1679        let (fork_at, _, extra_lca, tmp_chain) = timer_chain_tuple;
1680        let fork_end_index =
1681            (*fork_at - self.cur_era_genesis_timer_chain_height) as usize
1682                + tmp_chain.len();
1683        let acc_lca_ref = extra_lca;
1684        let timer_chain_choice = if let Some(x) = acc_lca_ref.last() {
1685            *x
1686        } else if fork_end_index > self.inner_conf.timer_chain_beta as usize {
1687            self.timer_chain_accumulative_lca
1688                [fork_end_index - self.inner_conf.timer_chain_beta as usize - 1]
1689        } else {
1690            self.cur_era_genesis_block_arena_index
1691        };
1692        match pos_reference {
1693            None => timer_chain_choice,
1694            Some(pos_reference) => {
1695                let pos_pivot_decision = self
1696                    .pos_verifier
1697                    .get_pivot_decision(&pos_reference)
1698                    .expect("pos_reference checked");
1699                self.compute_force_confirm(
1700                    timer_chain_choice,
1701                    &(
1702                        pos_pivot_decision,
1703                        self.data_man
1704                            .block_height_by_hash(&pos_pivot_decision)
1705                            .expect("pos pivot decision checked"),
1706                    ),
1707                )
1708            }
1709        }
1710    }
1711
1712    fn compute_force_confirm(
1713        &self, timer_chain_choice: usize, pos_pivot_decision: &(H256, u64),
1714    ) -> usize {
1715        if let Some(arena_index) =
1716            self.hash_to_arena_indices.get(&pos_pivot_decision.0)
1717        {
1718            if self.arena[timer_chain_choice].height > pos_pivot_decision.1
1719                && self.lca(timer_chain_choice, *arena_index) == *arena_index
1720            {
1721                // timer chain force confirm is newer and following pos
1722                // reference.
1723                timer_chain_choice
1724            } else {
1725                // timer chain force confirm should be overwritten by a conflict
1726                // pos reference.
1727                *arena_index
1728            }
1729        } else {
1730            // If pos_pivot_decision is before checkpoint, we just think it's on
1731            // the pivot chain.
1732            timer_chain_choice
1733        }
1734    }
1735
1736    fn insert(&mut self, block_header: &BlockHeader) -> (usize, usize) {
1737        let hash = block_header.hash();
1738
1739        let pow_quality =
1740            U512::from(VerificationConfig::get_or_compute_header_pow_quality(
1741                &self.pow,
1742                block_header,
1743            ));
1744        let is_heavy = pow_quality
1745            >= U512::from(self.inner_conf.heavy_block_difficulty_ratio)
1746                * U512::from(block_header.difficulty());
1747        let is_timer = pow_quality
1748            >= U512::from(self.inner_conf.timer_chain_block_difficulty_ratio)
1749                * U512::from(block_header.difficulty());
1750
1751        let parent =
1752            if hash != self.data_man.get_cur_consensus_era_genesis_hash() {
1753                self.hash_to_arena_indices
1754                    .get(block_header.parent_hash())
1755                    .cloned()
1756                    .unwrap()
1757            } else {
1758                NULL
1759            };
1760
1761        let mut referees: Vec<usize> = Vec::new();
1762        for hash in block_header.referee_hashes().iter() {
1763            if let Some(x) = self.hash_to_arena_indices.get(hash) {
1764                self.insert_referee_if_not_duplicate(&mut referees, *x);
1765            }
1766        }
1767
1768        let mut inactive_dependency_cnt =
1769            if parent != NULL && !self.arena[parent].data.activated {
1770                1
1771            } else {
1772                0
1773            };
1774        for referee in &referees {
1775            if !self.arena[*referee].data.activated {
1776                inactive_dependency_cnt += 1;
1777            }
1778        }
1779
1780        let my_height = block_header.height();
1781        let sn = self.get_next_sequence_number();
1782        let index = self.arena.insert(ConsensusGraphNode {
1783            hash,
1784            height: my_height,
1785            is_heavy,
1786            difficulty: *block_header.difficulty(),
1787            past_num_blocks: 0,
1788            is_timer,
1789            // Block header contains an adaptive field, we will verify with our
1790            // own computation
1791            adaptive: block_header.adaptive(),
1792            parent,
1793            era_block: self.get_era_genesis_block_with_parent(parent),
1794            children: Vec::new(),
1795            referees,
1796            referrers: Vec::new(),
1797            data: ConsensusGraphNodeData::new(
1798                NULLU64,
1799                sn,
1800                inactive_dependency_cnt,
1801            ),
1802        });
1803        self.hash_to_arena_indices.insert(hash, index);
1804
1805        if parent != NULL {
1806            self.arena[parent].children.push(index);
1807        }
1808        let referees = self.arena[index].referees.clone();
1809        for referee in referees {
1810            self.arena[referee].referrers.push(index);
1811        }
1812
1813        self.compute_blockset_in_own_view_of_epoch(index);
1814        let executed_epoch_len =
1815            self.get_ordered_executable_epoch_blocks(index).len();
1816
1817        if parent != NULL {
1818            let past_num_blocks =
1819                self.arena[parent].past_num_blocks + executed_epoch_len as u64;
1820
1821            self.data_man.insert_epoch_execution_context(
1822                hash.clone(),
1823                EpochExecutionContext {
1824                    start_block_number: self
1825                        .get_epoch_start_block_number(index),
1826                },
1827                true, /* persistent to db */
1828            );
1829
1830            self.arena[index].past_num_blocks = past_num_blocks;
1831        }
1832
1833        debug!(
1834            "Block {} inserted into Consensus with index={}",
1835            hash, index
1836        );
1837
1838        (index, self.hash_to_arena_indices.len())
1839    }
1840
1841    fn compute_future_bitset(&self, me: usize) -> BitSet {
1842        // Compute future set of parent
1843        let mut queue: VecDeque<usize> = VecDeque::new();
1844        let mut visited = BitSet::with_capacity(self.arena.len() as u32);
1845        queue.push_back(me);
1846        visited.add(me as u32);
1847        while let Some(index) = queue.pop_front() {
1848            for child in &self.arena[index].children {
1849                if !visited.contains(*child as u32)
1850                    && (self.arena[*child].data.activated
1851                        || self.arena[*child].data.inactive_dependency_cnt
1852                            == NULL)
1853                /* We include all preactivated blocks */
1854                {
1855                    visited.add(*child as u32);
1856                    queue.push_back(*child);
1857                }
1858            }
1859            for referrer in &self.arena[index].referrers {
1860                if !visited.contains(*referrer as u32)
1861                    && (self.arena[*referrer].data.activated
1862                        || self.arena[*referrer].data.inactive_dependency_cnt
1863                            == NULL)
1864                /* We include all preactivated blocks */
1865                {
1866                    visited.add(*referrer as u32);
1867                    queue.push_back(*referrer);
1868                }
1869            }
1870        }
1871        visited.remove(me as u32);
1872        visited
1873    }
1874
1875    /// Return the consensus graph indexes of the pivot block where the rewards
1876    /// of its epoch should be computed.
1877    ///
1878    ///   epoch to                         Block holding
1879    ///   compute reward                   the reward state
1880    ///                         Block epoch                  Block with
1881    ///   | \[Bi1\]   |           for cared                    \[Bj\]'s state
1882    ///   |     \   |           anticone                     as deferred root
1883    /// --|----\[Bi\]-|--------------\[Ba\]---------\[Bj\]----------\[Bt\]
1884    ///   |    /    |
1885    ///   | \[Bi2\]   |
1886    ///
1887    /// Let i(\[Bi\]) is the arena index of \[Bi\].
1888    /// Let h(\[Bi\]) is the height of \[Bi\].
1889    ///
1890    /// Params:
1891    ///   epoch_arena_index: the arena index of \[Bj\]
1892    /// Return:
1893    ///   Option<(i(\[Bi\]), i(\[Ba\]))>
1894    ///
1895    /// The gap between \[Bj\] and \[Bi\], i.e., h(\[Bj\])-h(\[Bi\]),
1896    /// is REWARD_EPOCH_COUNT.
1897    /// Let D is the gap between the parent of the genesis of next era and
1898    /// \[Bi\]. The gap between \[Ba\] and \[Bi\] is
1899    ///     min(ANTICONE_PENALTY_UPPER_EPOCH_COUNT, D).
1900    pub fn get_pivot_reward_index(
1901        &self, epoch_arena_index: usize,
1902    ) -> Option<(usize, usize)> {
1903        // We are going to exclude the original genesis block here!
1904        if self.arena[epoch_arena_index].height <= REWARD_EPOCH_COUNT {
1905            return None;
1906        }
1907        let parent_index = self.arena[epoch_arena_index].parent;
1908        // Recompute epoch.
1909        let anticone_cut_height =
1910            REWARD_EPOCH_COUNT - ANTICONE_PENALTY_UPPER_EPOCH_COUNT;
1911        let mut anticone_penalty_cutoff_epoch_block = parent_index;
1912        for _i in 1..anticone_cut_height {
1913            if anticone_penalty_cutoff_epoch_block == NULL {
1914                break;
1915            }
1916            anticone_penalty_cutoff_epoch_block =
1917                self.arena[anticone_penalty_cutoff_epoch_block].parent;
1918        }
1919        let mut reward_epoch_block = anticone_penalty_cutoff_epoch_block;
1920        for _i in 0..ANTICONE_PENALTY_UPPER_EPOCH_COUNT {
1921            if reward_epoch_block == NULL {
1922                break;
1923            }
1924            reward_epoch_block = self.arena[reward_epoch_block].parent;
1925        }
1926        if reward_epoch_block != NULL {
1927            // The anticone_penalty_cutoff respect the era bound!
1928            while !self.is_same_era(
1929                reward_epoch_block,
1930                anticone_penalty_cutoff_epoch_block,
1931            ) {
1932                anticone_penalty_cutoff_epoch_block =
1933                    self.arena[anticone_penalty_cutoff_epoch_block].parent;
1934            }
1935        }
1936        let reward_index = if reward_epoch_block == NULL {
1937            None
1938        } else {
1939            Some((reward_epoch_block, anticone_penalty_cutoff_epoch_block))
1940        };
1941        reward_index
1942    }
1943
1944    fn get_executable_epoch_blocks(
1945        &self, epoch_arena_index: usize,
1946    ) -> Vec<Arc<Block>> {
1947        let mut epoch_blocks = Vec::new();
1948        for idx in self.get_ordered_executable_epoch_blocks(epoch_arena_index) {
1949            let block = self
1950                .data_man
1951                .block_by_hash(
1952                    &self.arena[*idx].hash,
1953                    true, /* update_cache */
1954                )
1955                .expect("Exist");
1956            epoch_blocks.push(block);
1957        }
1958        epoch_blocks
1959    }
1960
1961    /// Compute the expected difficulty of a new block given its parent.
1962    /// Assume the difficulty adjustment period being p.
1963    /// The period boundary is [i*p+1, (i+1)*p].
1964    /// Genesis block does not belong to any period, and the first
1965    /// period is [1, p]. Then, if parent height is less than p, the
1966    /// current block belongs to the first period, and its difficulty
1967    /// should be the initial difficulty. Otherwise, we need to consider
1968    /// 2 cases:
1969    ///
1970    /// 1. The parent height is at the period boundary, i.e., the height
1971    /// is exactly divisible by p. In this case, the new block and its
1972    /// parent do not belong to the same period. The expected difficulty
1973    /// of the new block should be computed based on the situation of
1974    /// parent's period.
1975    ///
1976    /// 2. The parent height is not at the period boundary. In this case,
1977    /// the new block and its parent belong to the same period, and hence,
1978    /// its difficulty should be same as its parent's.
1979    pub fn expected_difficulty(&self, parent_hash: &H256) -> U256 {
1980        let parent_arena_index =
1981            *self.hash_to_arena_indices.get(parent_hash).unwrap();
1982        let parent_epoch = self.arena[parent_arena_index].height;
1983        if parent_epoch
1984            < self
1985                .pow_config
1986                .difficulty_adjustment_epoch_period(parent_epoch)
1987        {
1988            // Use initial difficulty for early epochs
1989            self.pow_config.initial_difficulty.into()
1990        } else {
1991            let last_period_upper = (parent_epoch
1992                / self
1993                    .pow_config
1994                    .difficulty_adjustment_epoch_period(parent_epoch))
1995                * self
1996                    .pow_config
1997                    .difficulty_adjustment_epoch_period(parent_epoch);
1998            if last_period_upper != parent_epoch {
1999                self.arena[parent_arena_index].difficulty
2000            } else {
2001                self.data_man.target_difficulty_manager.target_difficulty(
2002                    self,
2003                    // &self.data_man.target_difficulty_manager,
2004                    &self.pow_config,
2005                    &self.arena[parent_arena_index].hash,
2006                )
2007            }
2008        }
2009    }
2010
2011    fn adjust_difficulty(&mut self, new_best_arena_index: usize) {
2012        let new_best_hash = self.arena[new_best_arena_index].hash.clone();
2013        let new_best_difficulty = self.arena[new_best_arena_index].difficulty;
2014        let old_best_arena_index = *self.pivot_chain.last().expect("not empty");
2015        if old_best_arena_index == self.arena[new_best_arena_index].parent {
2016            // Pivot chain prolonged
2017            assert!(self.current_difficulty == new_best_difficulty);
2018        }
2019
2020        let epoch = self.arena[new_best_arena_index].height;
2021        if epoch == 0 {
2022            // This may happen since the block at height 1 may have wrong
2023            // state root and do not update the pivot chain.
2024            self.current_difficulty = self.pow_config.initial_difficulty.into();
2025        } else if epoch
2026            == (epoch
2027                / self.pow_config.difficulty_adjustment_epoch_period(epoch))
2028                * self.pow_config.difficulty_adjustment_epoch_period(epoch)
2029        {
2030            let this: &ConsensusGraphInner = self;
2031            self.current_difficulty = self
2032                .data_man
2033                .target_difficulty_manager
2034                .target_difficulty(this, &self.pow_config, &new_best_hash);
2035        } else {
2036            self.current_difficulty = new_best_difficulty;
2037        }
2038    }
2039
2040    pub fn best_block_hash(&self) -> H256 {
2041        self.arena[*self.pivot_chain.last().unwrap()].hash
2042    }
2043
2044    pub fn best_block_number(&self) -> u64 {
2045        self.arena[*self.pivot_chain.last().unwrap()].past_num_blocks
2046    }
2047
2048    /// Return the latest epoch number whose state has been enqueued.
2049    ///
2050    /// The state may not exist, so the caller should wait for the result if its
2051    /// state will be used.
2052    pub fn best_state_epoch_number(&self) -> u64 {
2053        let pivot_height = self.pivot_index_to_height(self.pivot_chain.len());
2054        if pivot_height < DEFERRED_STATE_EPOCH_COUNT {
2055            0
2056        } else {
2057            pivot_height - DEFERRED_STATE_EPOCH_COUNT
2058        }
2059    }
2060
2061    fn best_state_arena_index(&self) -> usize {
2062        self.get_pivot_block_arena_index(self.best_state_epoch_number())
2063    }
2064
2065    pub fn best_state_block_hash(&self) -> H256 {
2066        self.arena[self.best_state_arena_index()].hash
2067    }
2068
2069    pub fn get_state_block_with_delay(
2070        &self, block_hash: &H256, delay: usize,
2071    ) -> Result<&H256, String> {
2072        let idx_opt = self.hash_to_arena_indices.get(block_hash);
2073        if idx_opt == None {
2074            return Err(
2075                "Parent hash is too old for computing the deferred state"
2076                    .to_owned(),
2077            );
2078        }
2079        let mut idx = *idx_opt.unwrap();
2080        for _i in 0..delay {
2081            trace!(
2082                "get_state_block_with_delay: idx={}, height={}",
2083                idx,
2084                self.arena[idx].height
2085            );
2086            if idx == self.cur_era_genesis_block_arena_index {
2087                // If it is the original genesis, we just break
2088                if self.arena[self.cur_era_genesis_block_arena_index].height
2089                    == 0
2090                {
2091                    break;
2092                } else {
2093                    return Err(
2094                        "Parent is too old for computing the deferred state"
2095                            .to_owned(),
2096                    );
2097                }
2098            }
2099            idx = self.arena[idx].parent;
2100        }
2101        Ok(&self.arena[idx].hash)
2102    }
2103
2104    pub fn best_epoch_number(&self) -> u64 {
2105        self.cur_era_genesis_height + self.pivot_chain.len() as u64 - 1
2106    }
2107
2108    pub fn best_timer_chain_height(&self) -> u64 {
2109        self.cur_era_genesis_timer_chain_height + self.timer_chain.len() as u64
2110            - 1
2111    }
2112
2113    fn get_arena_index_from_epoch_number(
2114        &self, epoch_number: u64,
2115    ) -> Result<usize, String> {
2116        if epoch_number >= self.cur_era_genesis_height {
2117            let pivot_index =
2118                (epoch_number - self.cur_era_genesis_height) as usize;
2119            if pivot_index >= self.pivot_chain.len() {
2120                Err("Epoch number larger than the current pivot chain tip"
2121                    .into())
2122            } else {
2123                Ok(self.get_pivot_block_arena_index(epoch_number))
2124            }
2125        } else {
2126            Err("Invalid params: epoch number is too old and not maintained by consensus graph".to_owned())
2127        }
2128    }
2129
2130    /// Get the pivot hash from an epoch number. This function will try to query
2131    /// the data manager if it is not available in the ConsensusGraph due to
2132    /// out of the current era.
2133    pub fn get_pivot_hash_from_epoch_number(
2134        &self, epoch_number: u64,
2135    ) -> Result<EpochId, ProviderBlockError> {
2136        let height = epoch_number;
2137        if height >= self.cur_era_genesis_height {
2138            let pivot_index = (height - self.cur_era_genesis_height) as usize;
2139            if pivot_index >= self.pivot_chain.len() {
2140                Err("Epoch number larger than the current pivot chain tip"
2141                    .into())
2142            } else {
2143                Ok(self.arena[self.get_pivot_block_arena_index(height)].hash)
2144            }
2145        } else {
2146            self.data_man.executed_epoch_set_hashes_from_db(epoch_number).ok_or(
2147                format!("get_hash_from_epoch_number: Epoch hash set not in db, epoch_number={}", epoch_number).into()
2148            ).and_then(|epoch_hashes|
2149                epoch_hashes.last().map(Clone::clone).ok_or("Epoch set is empty".into())
2150            )
2151        }
2152    }
2153
2154    /// This function differs from `get_pivot_hash_from_epoch_number` in that it
2155    /// only returns the hash if it is in the current consensus graph.
2156    pub fn epoch_hash(&self, epoch_number: u64) -> Option<H256> {
2157        let pivot_index = self.height_to_pivot_index(epoch_number);
2158        self.pivot_chain
2159            .get(pivot_index)
2160            .map(|idx| self.arena[*idx].hash)
2161    }
2162
2163    pub fn block_hashes_by_epoch(
2164        &self, epoch_number: u64,
2165    ) -> Result<Vec<H256>, ProviderBlockError> {
2166        debug!(
2167            "block_hashes_by_epoch epoch_number={:?} pivot_chain.len={:?}",
2168            epoch_number,
2169            self.pivot_chain.len()
2170        );
2171
2172        let e;
2173        // We first try to get it from the consensus. Note that we cannot use
2174        // the info for the genesis because it may contain out-of-era
2175        // blocks that is not maintained anymore.
2176        match self.get_arena_index_from_epoch_number(epoch_number) {
2177            Ok(pivot_arena_index) => {
2178                if pivot_arena_index != self.cur_era_genesis_block_arena_index {
2179                    return Ok(self
2180                        .get_ordered_executable_epoch_blocks(pivot_arena_index)
2181                        .iter()
2182                        .map(|index| self.arena[*index].hash)
2183                        .collect());
2184                }
2185                e = "Epoch set of the current genesis is not maintained".into();
2186            }
2187            Err(err) => e = err,
2188        }
2189
2190        self.data_man
2191            .executed_epoch_set_hashes_from_db(epoch_number)
2192            .ok_or(
2193                format!(
2194                    "Epoch set not in db epoch_number={}, in mem err={:?}",
2195                    epoch_number, e
2196                )
2197                .into(),
2198            )
2199    }
2200
2201    pub fn skipped_block_hashes_by_epoch(
2202        &self, epoch_number: u64,
2203    ) -> Result<Vec<H256>, ProviderBlockError> {
2204        debug!(
2205            "skipped_block_hashes_by_epoch epoch_number={:?} pivot_chain.len={:?}",
2206            epoch_number,
2207            self.pivot_chain.len()
2208        );
2209
2210        let e;
2211        // We first try to get it from the consensus. Note that we cannot use
2212        // the info for the genesis because it may contain out-of-era
2213        // blocks that is not maintained anymore.
2214        match self.get_arena_index_from_epoch_number(epoch_number) {
2215            Ok(pivot_arena_index) => {
2216                if pivot_arena_index != self.cur_era_genesis_block_arena_index {
2217                    if let Some(skipped_block_set) =
2218                        self.get_skipped_epoch_blocks(pivot_arena_index)
2219                    {
2220                        return Ok(skipped_block_set.clone());
2221                    }
2222                }
2223                e = "Skipped epoch set of the current genesis is not maintained".into();
2224            }
2225            Err(err) => e = err,
2226        }
2227
2228        self.data_man
2229            .skipped_epoch_set_hashes_from_db(epoch_number)
2230            .ok_or(
2231                format!(
2232                    "Skipped epoch set not in db epoch_number={}, in mem err={:?}",
2233                    epoch_number, e
2234                )
2235                    .into(),
2236            )
2237    }
2238
2239    fn get_epoch_hash_for_block(&self, hash: &H256) -> Option<H256> {
2240        self.get_block_epoch_number(&hash)
2241            .and_then(|epoch_number| self.epoch_hash(epoch_number))
2242    }
2243
2244    pub fn bounded_terminal_block_hashes(
2245        &mut self, referee_bound: usize,
2246    ) -> Vec<H256> {
2247        let best_block_arena_index = *self.pivot_chain.last().unwrap();
2248        if self.terminal_hashes.len() > referee_bound {
2249            self.best_terminals(best_block_arena_index, referee_bound)
2250        } else {
2251            self.terminal_hashes
2252                .iter()
2253                .map(|hash| hash.clone())
2254                .collect()
2255        }
2256    }
2257
2258    pub fn get_block_epoch_number(&self, hash: &H256) -> Option<u64> {
2259        self.hash_to_arena_indices.get(hash).and_then(|index| {
2260            match self.arena[*index].data.epoch_number {
2261                NULLU64 => None,
2262                epoch => Some(epoch),
2263            }
2264        })
2265    }
2266
2267    pub fn all_blocks_with_topo_order(&self) -> Vec<H256> {
2268        let epoch_number = self.best_epoch_number();
2269        let mut current_number = 0;
2270        let mut hashes = Vec::new();
2271        while current_number <= epoch_number {
2272            let epoch_hashes =
2273                self.block_hashes_by_epoch(current_number.into()).unwrap();
2274            for hash in epoch_hashes {
2275                hashes.push(hash);
2276            }
2277            current_number += 1;
2278        }
2279        hashes
2280    }
2281
2282    /// Return the block receipts in the current pivot view and the epoch block
2283    /// hash. If `hash` is not executed in the current view, return None.
2284    pub fn block_execution_results_by_hash(
2285        &self, hash: &H256, update_cache: bool,
2286    ) -> Option<BlockExecutionResultWithEpoch> {
2287        match self.get_epoch_hash_for_block(hash) {
2288            Some(epoch) => {
2289                trace!("Block {} is in epoch {}", hash, epoch);
2290                let execution_result =
2291                    self.data_man.block_execution_result_by_hash_with_epoch(
2292                        hash,
2293                        &epoch,
2294                        false, /* update_pivot_assumption */
2295                        update_cache,
2296                    )?;
2297                Some(DataVersionTuple(epoch, execution_result))
2298            }
2299            None => {
2300                debug!("Block {:?} not in mem, try to read from db", hash);
2301
2302                // result in db might be outdated
2303                // (after chain reorg but before re-execution)
2304                let res = match self
2305                    .data_man
2306                    .block_execution_result_by_hash_from_db(hash)
2307                {
2308                    None => return None,
2309                    Some(res) => res,
2310                };
2311
2312                let execution_pivot_hash = res.0;
2313                let epoch_number = self
2314                    .data_man
2315                    .block_header_by_hash(&execution_pivot_hash)?
2316                    .height();
2317
2318                match self.get_pivot_hash_from_epoch_number(epoch_number) {
2319                    // pivot chain has not changed, result should be correct
2320                    Ok(h) if h == execution_pivot_hash => Some(res),
2321
2322                    // pivot chain has changed, block is not re-executed yet
2323                    _ => None,
2324                }
2325            }
2326        }
2327    }
2328
2329    pub fn is_timer_block(&self, block_hash: &H256) -> Option<bool> {
2330        self.hash_to_arena_indices
2331            .get(block_hash)
2332            .and_then(|index| Some(self.arena[*index].is_timer))
2333    }
2334
2335    pub fn is_adaptive(&self, block_hash: &H256) -> Option<bool> {
2336        self.hash_to_arena_indices
2337            .get(block_hash)
2338            .and_then(|index| Some(self.arena[*index].adaptive))
2339    }
2340
2341    pub fn is_partial_invalid(&self, block_hash: &H256) -> Option<bool> {
2342        self.hash_to_arena_indices
2343            .get(block_hash)
2344            .and_then(|index| Some(self.arena[*index].data.partial_invalid))
2345    }
2346
2347    pub fn is_pending(&self, block_hash: &H256) -> Option<bool> {
2348        self.hash_to_arena_indices
2349            .get(block_hash)
2350            .and_then(|index| Some(self.arena[*index].data.pending))
2351    }
2352
2353    pub fn check_block_pivot_assumption(
2354        &self, pivot_hash: &H256, epoch: u64,
2355    ) -> Result<(), ProviderBlockError> {
2356        let last_number = self.best_epoch_number();
2357        let hash = self.get_pivot_hash_from_epoch_number(epoch)?;
2358        if epoch > last_number || hash != *pivot_hash {
2359            return Err("Error: pivot chain assumption failed".into());
2360        }
2361        Ok(())
2362    }
2363
2364    /// Compute the block weight following the GHAST algorithm:
2365    /// If a block is not adaptive, the weight is its difficulty
2366    /// If a block is adaptive, then for the heavy blocks, it equals to
2367    /// the heavy block ratio times the difficulty. Otherwise, it is zero.
2368    fn block_weight(&self, me: usize) -> i128 {
2369        if !self.arena[me].data.activated || self.arena[me].era_block == NULL {
2370            return 0 as i128;
2371        }
2372        let is_heavy = self.arena[me].is_heavy;
2373        let is_adaptive = self.arena[me].adaptive;
2374        if is_adaptive {
2375            if is_heavy {
2376                self.inner_conf.heavy_block_difficulty_ratio as i128
2377                    * i128::try_from(self.arena[me].difficulty.low_u128())
2378                        .unwrap()
2379            } else {
2380                0 as i128
2381            }
2382        } else {
2383            i128::try_from(self.arena[me].difficulty.low_u128()).unwrap()
2384        }
2385    }
2386
2387    /// ```text
2388    ///                   _________ 5 __________
2389    ///                   |                    |
2390    ///  state_valid:           t    f    f    f
2391    /// <----------------[Bl]-[Bk]-[Bj]-[Bi]-[Bp]-[Bm]----
2392    ///   [Dj]-[Di]-[Dp]-[Dm]
2393    ///
2394    /// [Bp] is the parent of [Bm]
2395    /// [Dm] is the deferred state root of [Bm]. This is a rough definition
2396    /// representing deferred state/receipt/blame root
2397    /// i([Bm]) is the arena index of [Bm]
2398    /// e([Bm]) is the execution commitment of [Bm]
2399    /// [Dm] can be generated from e([Bl])
2400    ///
2401    /// Param:
2402    ///   i([Bp]),
2403    ///   e([Bl]),
2404    /// Return:
2405    ///   The blame and the deferred blame roots information that should be
2406    ///   contained in header of [Bm].
2407    ///   (blame,
2408    ///    deferred_blame_state_root,
2409    ///    deferred_blame_receipt_root,
2410    ///    deferred_blame_bloom_root)
2411    ///
2412    /// Assumption:
2413    ///   * [Bm] is a pivot block on current pivot chain.
2414    ///   This assumption is derived from the following cases:
2415    ///   1. This function may be triggered when evaluating the reward for the
2416    ///      blocks in epoch of [Bm]. This relies on the state_valid value of
2417    ///      [Bm]. In other words, in this case, this function is triggered
2418    ///      when computing the state_valid value of [Bm].
2419    ///   2. This function may be triggered when mining [Bm].
2420    ///
2421    ///   * The execution commitments of blocks needed do exist.
2422    ///
2423    ///   * [Bm] is in stable era.
2424    ///   This assumption is derived from the following cases:
2425    ///   1. In normal run, before updating stable era genesis, we always
2426    ///      make state_valid of all the pivot blocks before the new
2427    ///      stable era genesis computed.
2428    ///   2. The recover phase (including both archive and full node)
2429    ///      prepares the graph state to the normal run state before
2430    ///      calling this function.
2431    ///
2432    /// This function searches backward for all the blocks whose
2433    /// state_valid are false, starting from [Bp]. The number of found
2434    /// blocks is the 'blame' of [Bm]. if 'blame' == 0, the deferred blame
2435    /// root information of [Bm] is simply [Dm], otherwise, it is computed
2436    /// from the vector of deferred state roots of these found blocks
2437    /// together with [Bm], e.g., in the above example, 'blame'==3, and
2438    /// the vector of deferred roots of these blocks is
2439    /// ([Dm], [Dp], [Di], [Dj]), therefore, the deferred blame root of
2440    /// [Bm] is keccak([Dm], keccak([Dp], keccak([Di], [Dj]))).
2441    /// The reason that we use this recursive way to compute deferred
2442    /// blame root is to be able to leverage the computed value of previous
2443    /// block. For example, the deferred blame root of [Bm] is exactly the
2444    /// keccak of [Dm] and the deferred blame root of [Bp].
2445    /// ```
2446    fn compute_blame_and_state_with_execution_result(
2447        &mut self, parent: usize, state_root_hash: H256,
2448        receipts_root_hash: H256, logs_bloom_hash: H256,
2449    ) -> Result<StateBlameInfo, String> {
2450        let mut cur = parent;
2451        let mut blame_cnt: u32 = 0;
2452        let mut state_blame_vec = Vec::new();
2453        let mut receipt_blame_vec = Vec::new();
2454        let mut bloom_blame_vec = Vec::new();
2455        let mut blame_info_to_fill = Vec::new();
2456        state_blame_vec.push(state_root_hash);
2457        receipt_blame_vec.push(receipts_root_hash);
2458        bloom_blame_vec.push(logs_bloom_hash);
2459        loop {
2460            if self.arena[cur]
2461                .data
2462                .state_valid
2463                .expect("computed by the caller")
2464            {
2465                // The state_valid for this block and blocks before have been
2466                // computed. In this case, we need to fill the last one with
2467                // blame 0.
2468                if let Some(last_blame_info) = blame_info_to_fill.pop() {
2469                    self.arena[last_blame_info].data.blame_info =
2470                        Some(StateBlameInfo {
2471                            blame: 0,
2472                            state_vec_root: state_blame_vec
2473                                .last()
2474                                .unwrap()
2475                                .clone(),
2476                            receipts_vec_root: receipt_blame_vec
2477                                .last()
2478                                .unwrap()
2479                                .clone(),
2480                            logs_bloom_vec_root: bloom_blame_vec
2481                                .last()
2482                                .unwrap()
2483                                .clone(),
2484                        });
2485                    blame_cnt = 1;
2486                }
2487                break;
2488            }
2489
2490            debug!("compute_blame_and_state_with_execution_result: cur={} height={}", cur, self.arena[cur].height);
2491            // Note that this function should never return errors for pivot
2492            // chain blocks, because our assumption is that stable
2493            // blocks will always already have `state_valid` and
2494            // `blame_info` computed.
2495            let deferred_arena_index =
2496                self.get_deferred_state_arena_index(cur)?;
2497            let deferred_block_commitment = self
2498                .data_man
2499                .get_epoch_execution_commitment(
2500                    &self.arena[deferred_arena_index].hash,
2501                )
2502                .ok_or("State block commitment missing")?;
2503            // We can retrieve the already filled info and maybe stop here
2504            if let Some(blame_info) = self.arena[cur].data.blame_info {
2505                blame_cnt = blame_info.blame + 1;
2506                state_blame_vec.push(blame_info.state_vec_root);
2507                receipt_blame_vec.push(blame_info.receipts_vec_root);
2508                bloom_blame_vec.push(blame_info.logs_bloom_vec_root);
2509                break;
2510            }
2511            blame_info_to_fill.push(cur);
2512            if self.arena[cur].height == self.cur_era_genesis_height {
2513                // Note that this should never happen for pivot chain blocks,
2514                // because we guarantee that the blame vector at
2515                // the stable genesis will not stretch beyond the checkpoint
2516                // genesis. So the blame vector should stop at
2517                // some point unless the stable genesis is reverted.
2518                return Err(
2519                    "Failed to compute blame and state due to out of era. The blockchain data is probably corrupted."
2520                        .to_owned(),
2521                );
2522            }
2523            state_blame_vec.push(
2524                deferred_block_commitment
2525                    .state_root_with_aux_info
2526                    .aux_info
2527                    .state_root_hash,
2528            );
2529            receipt_blame_vec
2530                .push(deferred_block_commitment.receipts_root.clone());
2531            bloom_blame_vec
2532                .push(deferred_block_commitment.logs_bloom_hash.clone());
2533            cur = self.arena[cur].parent;
2534        }
2535        let blame = blame_cnt + blame_info_to_fill.len() as u32;
2536
2537        // To this point:
2538        //                   _________ 5 __________
2539        //                   |                    |
2540        //  state_valid:           t    f    f    f
2541        // <----------------[Bl]-[Bk]-[Bj]-[Bi]-[Bp]-[Bm]----
2542        //   [Dj]-[Di]-[Dp]-[Dm]
2543        //                                   1    0
2544        //                                   |----|   blame_info_to_fill
2545        //    3    2    1    0
2546        //    |--------------|  state_blame_vec
2547        if blame > 0 {
2548            let mut accumulated_state_root =
2549                state_blame_vec.last().unwrap().clone();
2550            let mut accumulated_receipts_root =
2551                receipt_blame_vec.last().unwrap().clone();
2552            let mut accumulated_logs_bloom_root =
2553                bloom_blame_vec.last().unwrap().clone();
2554            for i in (0..blame_info_to_fill.len()).rev() {
2555                accumulated_state_root =
2556                    BlockHeaderBuilder::compute_blame_state_root_incremental(
2557                        state_blame_vec[i + 1],
2558                        accumulated_state_root,
2559                    );
2560                accumulated_receipts_root =
2561                    BlockHeaderBuilder::compute_blame_state_root_incremental(
2562                        receipt_blame_vec[i + 1],
2563                        accumulated_receipts_root,
2564                    );
2565                accumulated_logs_bloom_root =
2566                    BlockHeaderBuilder::compute_blame_state_root_incremental(
2567                        bloom_blame_vec[i + 1],
2568                        accumulated_logs_bloom_root,
2569                    );
2570                self.arena[blame_info_to_fill[i]].data.blame_info =
2571                    Some(StateBlameInfo {
2572                        blame: blame_cnt,
2573                        state_vec_root: accumulated_state_root,
2574                        receipts_vec_root: accumulated_receipts_root,
2575                        logs_bloom_vec_root: accumulated_logs_bloom_root,
2576                    });
2577                blame_cnt += 1;
2578            }
2579            let state_vec_root =
2580                BlockHeaderBuilder::compute_blame_state_root_incremental(
2581                    state_blame_vec[0],
2582                    accumulated_state_root,
2583                );
2584            let receipts_vec_root =
2585                BlockHeaderBuilder::compute_blame_state_root_incremental(
2586                    receipt_blame_vec[0],
2587                    accumulated_receipts_root,
2588                );
2589            let logs_bloom_vec_root =
2590                BlockHeaderBuilder::compute_blame_state_root_incremental(
2591                    bloom_blame_vec[0],
2592                    accumulated_logs_bloom_root,
2593                );
2594            Ok(StateBlameInfo {
2595                blame,
2596                state_vec_root,
2597                receipts_vec_root,
2598                logs_bloom_vec_root,
2599            })
2600        } else {
2601            Ok(StateBlameInfo {
2602                blame: 0,
2603                state_vec_root: state_blame_vec.pop().unwrap(),
2604                receipts_vec_root: receipt_blame_vec.pop().unwrap(),
2605                logs_bloom_vec_root: bloom_blame_vec.pop().unwrap(),
2606            })
2607        }
2608    }
2609
2610    /// Compute `state_valid` and `blame_info` for `me`.
2611    /// Assumption:
2612    ///   1. The precedents of `me` have computed state_valid
2613    ///   2. The execution_commitment for deferred state block of `me` exist.
2614    ///   3. `me` is in stable era.
2615    fn compute_state_valid_and_blame_info_for_block(
2616        &mut self, me: usize, executor: &ConsensusExecutor,
2617    ) -> Result<(), String> {
2618        let block_height = self.arena[me].height;
2619        let block_hash = self.arena[me].hash;
2620        debug!("compute_state_valid: me={} height={}", me, block_height);
2621        let deferred_state_arena_index =
2622            self.get_deferred_state_arena_index(me)?;
2623        let exec_commitment = self
2624            .data_man
2625            .get_epoch_execution_commitment(
2626                &self.arena[deferred_state_arena_index].hash,
2627            )
2628            .expect("Commitment exist");
2629        let parent = self.arena[me].parent;
2630        let original_deferred_state_root =
2631            exec_commitment.state_root_with_aux_info.clone();
2632        let original_deferred_receipt_root =
2633            exec_commitment.receipts_root.clone();
2634        let original_deferred_logs_bloom_hash =
2635            exec_commitment.logs_bloom_hash.clone();
2636
2637        let state_blame_info = self
2638            .compute_blame_and_state_with_execution_result(
2639                parent,
2640                original_deferred_state_root
2641                    .aux_info
2642                    .state_root_hash
2643                    .clone(),
2644                original_deferred_receipt_root.clone(),
2645                original_deferred_logs_bloom_hash.clone(),
2646            )?;
2647        let block_header = self
2648            .data_man
2649            .block_header_by_hash(&self.arena[me].hash)
2650            .unwrap();
2651        let state_valid = block_header.blame() == state_blame_info.blame
2652            && *block_header.deferred_state_root()
2653                == state_blame_info.state_vec_root
2654            && *block_header.deferred_receipts_root()
2655                == state_blame_info.receipts_vec_root
2656            && *block_header.deferred_logs_bloom_hash()
2657                == state_blame_info.logs_bloom_vec_root;
2658
2659        let mut debug_recompute = false;
2660        if state_valid {
2661            debug!(
2662                "compute_state_valid_for_block(): Block {} state/blame is valid.",
2663                block_hash,
2664            );
2665        } else {
2666            warn!(
2667                "compute_state_valid_for_block(): Block {:?} state/blame is invalid! \
2668                header blame {:?}, our blame {:?}, header state_root {:?}, \
2669                our state root {:?}, header receipt_root {:?}, our receipt root {:?}, \
2670                header logs_bloom_hash {:?}, our logs_bloom_hash {:?}.",
2671                block_hash, block_header.blame(), state_blame_info.blame,
2672                block_header.deferred_state_root(), state_blame_info.state_vec_root,
2673                block_header.deferred_receipts_root(), state_blame_info.receipts_vec_root,
2674                block_header.deferred_logs_bloom_hash(), state_blame_info.logs_bloom_vec_root,
2675            );
2676            INVALID_BLAME_OR_STATE_ROOT_COUNTER.inc(1);
2677
2678            if self.inner_conf.debug_dump_dir_invalid_state_root.is_some() {
2679                debug_recompute = true;
2680            }
2681        }
2682        if let Some(debug_epoch) =
2683            &self.inner_conf.debug_invalid_state_root_epoch
2684        {
2685            if block_hash.eq(debug_epoch) {
2686                debug_recompute = true;
2687            }
2688        }
2689        if debug_recompute {
2690            if let Ok(epoch_arena_index) =
2691                self.get_deferred_state_arena_index(me)
2692            {
2693                let state_availability_lower_bound = self
2694                    .data_man
2695                    .state_availability_boundary
2696                    .read()
2697                    .lower_bound;
2698                // Can not run debug_recompute when the parent state is not
2699                // available.
2700                if state_availability_lower_bound < block_height {
2701                    // Maybe this block isn't valid, when our node is correct.
2702                    log_invalid_state_root(
2703                        epoch_arena_index,
2704                        self,
2705                        executor,
2706                        block_hash,
2707                        block_height,
2708                        &original_deferred_state_root,
2709                    )
2710                    .ok();
2711
2712                    // Maybe this block is valid, but we are not. So we find the
2713                    // first state we think is correct however this block things
2714                    // wrong.
2715                    let header_blame = block_header.blame() as u64;
2716                    let mut block_arena_index = self.arena[me].parent;
2717                    let mut earliest_mismatch_arena_index = me;
2718                    for i in 1..header_blame {
2719                        if block_height - i <= state_availability_lower_bound {
2720                            break;
2721                        }
2722                        if self.arena[block_arena_index].data.state_valid
2723                            != Some(false)
2724                        {
2725                            earliest_mismatch_arena_index = block_arena_index;
2726                        }
2727                        block_arena_index =
2728                            self.arena[block_arena_index].parent;
2729                    }
2730                    if earliest_mismatch_arena_index != me {
2731                        if let Ok(state_epoch_arena_index) = self
2732                            .get_deferred_state_arena_index(
2733                                earliest_mismatch_arena_index,
2734                            )
2735                        {
2736                            let block_hash =
2737                                self.arena[earliest_mismatch_arena_index].hash;
2738                            let block_height = self.arena
2739                                [earliest_mismatch_arena_index]
2740                                .height;
2741                            let state_root = self
2742                                .data_man
2743                                .get_epoch_execution_commitment(
2744                                    &self.arena[state_epoch_arena_index].hash,
2745                                )
2746                                .expect("Commitment exist")
2747                                .state_root_with_aux_info
2748                                .clone();
2749                            log_invalid_state_root(
2750                                state_epoch_arena_index,
2751                                self,
2752                                executor,
2753                                block_hash,
2754                                block_height,
2755                                &state_root,
2756                            )
2757                            .ok();
2758                        }
2759                    }
2760                }
2761            }
2762        }
2763
2764        self.arena[me].data.state_valid = Some(state_valid);
2765        if !state_valid {
2766            self.arena[me].data.blame_info = Some(state_blame_info);
2767        }
2768
2769        if self.inner_conf.enable_state_expose {
2770            STATE_EXPOSER
2771                .consensus_graph
2772                .lock()
2773                .block_execution_state_vec
2774                .push(ConsensusGraphBlockExecutionState {
2775                    block_hash,
2776                    deferred_state_root: original_deferred_state_root
2777                        .aux_info
2778                        .state_root_hash,
2779                    deferred_receipt_root: original_deferred_receipt_root,
2780                    deferred_logs_bloom_hash: original_deferred_logs_bloom_hash,
2781                    state_valid: self.arena[me]
2782                        .data
2783                        .state_valid
2784                        .unwrap_or(true),
2785                })
2786        }
2787
2788        Ok(())
2789    }
2790
2791    fn compute_vote_valid_for_pivot_block(
2792        &mut self, me: usize, pivot_arena_index: usize,
2793    ) -> bool {
2794        let lca = self.lca(me, pivot_arena_index);
2795        let lca_height = self.arena[lca].height;
2796        debug!(
2797            "compute_vote_valid_for_pivot_block: lca={}, lca_height={}",
2798            lca, lca_height
2799        );
2800        let mut stack = Vec::new();
2801        stack.push((0, me, 0));
2802        while !stack.is_empty() {
2803            let (stage, index, a) = stack.pop().unwrap();
2804            if stage == 0 {
2805                if self.arena[index].data.vote_valid_lca_height != lca_height {
2806                    let header = self
2807                        .data_man
2808                        .block_header_by_hash(&self.arena[index].hash)
2809                        .unwrap();
2810                    let blame = header.blame();
2811                    if self.arena[index].height > lca_height + 1 + blame as u64
2812                    {
2813                        let ancestor = self.ancestor_at(
2814                            index,
2815                            self.arena[index].height - blame as u64 - 1,
2816                        );
2817                        stack.push((1, index, ancestor));
2818                        stack.push((0, ancestor, 0));
2819                    } else {
2820                        // We need to make sure the ancestor at height
2821                        // self.arena[index].height - blame - 1 is state valid,
2822                        // and the remainings are not
2823                        let start_height =
2824                            self.arena[index].height - blame as u64 - 1;
2825                        let mut cur_height = lca_height;
2826                        let mut cur = lca;
2827                        let mut vote_valid = true;
2828                        while cur_height > start_height {
2829                            if self.arena[cur].data.state_valid
2830                                .expect("state_valid for me has been computed in \
2831                                wait_and_compute_state_valid_locked by the caller, \
2832                                so the precedents should have state_valid") {
2833                                vote_valid = false;
2834                                break;
2835                            }
2836                            cur_height -= 1;
2837                            cur = self.arena[cur].parent;
2838                        }
2839                        if vote_valid
2840                            && !self.arena[cur].data.state_valid.expect(
2841                                "state_valid for me has been computed in \
2842                            wait_and_compute_state_valid_locked by the caller, \
2843                            so the precedents should have state_valid",
2844                            )
2845                        {
2846                            vote_valid = false;
2847                        }
2848                        self.arena[index].data.vote_valid_lca_height =
2849                            lca_height;
2850                        self.arena[index].data.vote_valid = vote_valid;
2851                    }
2852                }
2853            } else {
2854                self.arena[index].data.vote_valid_lca_height = lca_height;
2855                self.arena[index].data.vote_valid =
2856                    self.arena[a].data.vote_valid;
2857            }
2858        }
2859        self.arena[me].data.vote_valid
2860    }
2861
2862    /// Compute the total weight in the epoch represented by the block of
2863    /// my_hash.
2864    fn total_weight_in_own_epoch(
2865        &self, blockset_in_own_epoch: &Vec<usize>, genesis: usize,
2866    ) -> i128 {
2867        let gen_arena_index = if genesis != NULL {
2868            genesis
2869        } else {
2870            self.cur_era_genesis_block_arena_index
2871        };
2872        let gen_height = self.arena[gen_arena_index].height;
2873        let mut total_weight = 0 as i128;
2874        for index in blockset_in_own_epoch.iter() {
2875            if gen_arena_index != self.cur_era_genesis_block_arena_index {
2876                let height = self.arena[*index].height;
2877                if height < gen_height {
2878                    continue;
2879                }
2880                let era_arena_index = self.ancestor_at(*index, gen_height);
2881                if gen_arena_index != era_arena_index {
2882                    continue;
2883                }
2884            }
2885            total_weight += self.block_weight(*index);
2886        }
2887        total_weight
2888    }
2889
2890    /// Recompute metadata associated information on pivot chain changes
2891    fn recompute_metadata(
2892        &mut self, start_at: u64, mut to_update: HashSet<usize>,
2893    ) {
2894        self.pivot_chain_metadata
2895            .resize_with(self.pivot_chain.len(), Default::default);
2896        let pivot_height = self.get_pivot_height();
2897        for i in start_at..pivot_height {
2898            let me = self.get_pivot_block_arena_index(i);
2899            self.arena[me].data.last_pivot_in_past = i;
2900            let i_pivot_index = self.height_to_pivot_index(i);
2901            self.pivot_chain_metadata[i_pivot_index]
2902                .last_pivot_in_past_blocks
2903                .clear();
2904            self.pivot_chain_metadata[i_pivot_index]
2905                .last_pivot_in_past_blocks
2906                .insert(me);
2907            self.pivot_chain_metadata[i_pivot_index].past_weight =
2908                if i_pivot_index > 0 {
2909                    let blockset = self
2910                        .exchange_or_compute_blockset_in_own_view_of_epoch(
2911                            me, None,
2912                        );
2913                    let blockset_weight = self.total_weight_in_own_epoch(
2914                        &blockset,
2915                        self.cur_era_genesis_block_arena_index,
2916                    );
2917                    self.exchange_or_compute_blockset_in_own_view_of_epoch(
2918                        me,
2919                        Some(blockset),
2920                    );
2921                    self.pivot_chain_metadata[i_pivot_index - 1].past_weight
2922                        + blockset_weight
2923                        + self.block_weight(me)
2924                } else {
2925                    self.block_weight(me)
2926                };
2927            to_update.remove(&me);
2928        }
2929        let mut stack = Vec::new();
2930        let to_visit = to_update.clone();
2931        for i in &to_update {
2932            stack.push((0, *i));
2933        }
2934        while !stack.is_empty() {
2935            let (stage, me) = stack.pop().unwrap();
2936            if !to_visit.contains(&me) {
2937                continue;
2938            }
2939            let parent = self.arena[me].parent;
2940            if stage == 0 {
2941                if to_update.contains(&me) {
2942                    to_update.remove(&me);
2943                    stack.push((1, me));
2944                    stack.push((0, parent));
2945                    for referee in &self.arena[me].referees {
2946                        stack.push((0, *referee));
2947                    }
2948                }
2949            } else if stage == 1 && me != self.cur_era_genesis_block_arena_index
2950            {
2951                let mut last_pivot = if parent == NULL {
2952                    0
2953                } else {
2954                    self.arena[parent].data.last_pivot_in_past
2955                };
2956                for referee in &self.arena[me].referees {
2957                    let x = self.arena[*referee].data.last_pivot_in_past;
2958                    last_pivot = max(last_pivot, x);
2959                }
2960                self.arena[me].data.last_pivot_in_past = last_pivot;
2961                let last_pivot_index = self.height_to_pivot_index(last_pivot);
2962                self.pivot_chain_metadata[last_pivot_index]
2963                    .last_pivot_in_past_blocks
2964                    .insert(me);
2965            }
2966        }
2967    }
2968
2969    fn get_timer_chain_index(&self, me: usize) -> usize {
2970        if !self.arena[me].is_timer || self.arena[me].data.partial_invalid {
2971            return NULL;
2972        }
2973        if self.arena[me].data.ledger_view_timer_chain_height
2974            < self.cur_era_genesis_timer_chain_height
2975        {
2976            // This is only possible if `me` is in the anticone of
2977            // `cur_era_genesis`.
2978            return NULL;
2979        }
2980        let timer_chain_index =
2981            (self.arena[me].data.ledger_view_timer_chain_height
2982                - self.cur_era_genesis_timer_chain_height) as usize;
2983        if self.timer_chain.len() > timer_chain_index
2984            && self.timer_chain[timer_chain_index] == me
2985        {
2986            timer_chain_index
2987        } else {
2988            NULL
2989        }
2990    }
2991
2992    fn compute_timer_chain_past_view_info(
2993        &self, parent: usize, referees: &Vec<usize>,
2994    ) -> (i128, usize) {
2995        let mut timer_longest_difficulty = 0;
2996        let mut longest_referee = parent;
2997        if parent != NULL {
2998            timer_longest_difficulty =
2999                self.arena[parent].data.past_view_timer_longest_difficulty
3000                    + self.get_timer_difficulty(parent);
3001        }
3002        for referee in referees {
3003            let timer_difficulty =
3004                self.arena[*referee].data.past_view_timer_longest_difficulty
3005                    + self.get_timer_difficulty(*referee);
3006            if longest_referee == NULL
3007                || ConsensusGraphInner::is_heavier(
3008                    (timer_difficulty, &self.arena[*referee].hash),
3009                    (
3010                        timer_longest_difficulty,
3011                        &self.arena[longest_referee].hash,
3012                    ),
3013                )
3014            {
3015                timer_longest_difficulty = timer_difficulty;
3016                longest_referee = *referee;
3017            }
3018        }
3019        let last_timer_block_arena_index = if longest_referee == NULL
3020            || self.arena[longest_referee].is_timer
3021                && !self.arena[longest_referee].data.partial_invalid
3022        {
3023            longest_referee
3024        } else {
3025            self.arena[longest_referee]
3026                .data
3027                .past_view_last_timer_block_arena_index
3028        };
3029        (timer_longest_difficulty, last_timer_block_arena_index)
3030    }
3031
3032    fn compute_timer_chain_tuple(
3033        &self, parent: usize, referees: &Vec<usize>,
3034        anticone_opt: Option<&BitSet>,
3035    ) -> (u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>) {
3036        let empty_set = BitSet::new();
3037        let anticone = if let Some(a) = anticone_opt {
3038            a
3039        } else {
3040            &empty_set
3041        };
3042        let mut tmp_chain = Vec::new();
3043        let mut tmp_chain_set = HashSet::new();
3044        let (_, past_view_last_timer_block_arena_index) =
3045            self.compute_timer_chain_past_view_info(parent, referees);
3046        let mut i = past_view_last_timer_block_arena_index;
3047        while i != NULL && self.get_timer_chain_index(i) == NULL {
3048            tmp_chain.push(i);
3049            tmp_chain_set.insert(i);
3050            i = self.arena[i].data.past_view_last_timer_block_arena_index;
3051        }
3052        tmp_chain.reverse();
3053        let fork_at;
3054        let fork_at_index;
3055        if i != NULL {
3056            fork_at = self.arena[i].data.ledger_view_timer_chain_height + 1;
3057            assert!(fork_at >= self.cur_era_genesis_timer_chain_height);
3058            fork_at_index =
3059                (fork_at - self.cur_era_genesis_timer_chain_height) as usize;
3060        } else {
3061            fork_at = self.cur_era_genesis_timer_chain_height;
3062            fork_at_index = 0;
3063        }
3064
3065        let mut res = HashMap::new();
3066        if fork_at_index < self.timer_chain.len() {
3067            debug!("New block parent = {} referees = {:?} not extending timer chain (len = {}), fork at timer chain height {}, timer chain index {}", parent, referees, self.timer_chain.len(), fork_at, fork_at_index);
3068            // Now we need to update the timer_chain_height field of the
3069            // remaining blocks with topological sort
3070            let start_point = if i == NULL {
3071                self.cur_era_genesis_block_arena_index
3072            } else {
3073                self.timer_chain[fork_at_index - 1]
3074            };
3075            let mut start_set = HashSet::new();
3076            start_set.insert(start_point);
3077            let visited: BitSet = get_future(
3078                start_set,
3079                // TODO: This conversion overhead can be avoided
3080                |i| self.successor_edges(i as usize),
3081                |i| anticone.contains(i as u32),
3082            );
3083            let visited_in_order: Vec<usize> = topological_sort(
3084                visited,
3085                |i| {
3086                    self.predecessor_edges(i as usize)
3087                        .into_iter()
3088                        .map(|i| i as u32)
3089                        .collect()
3090                },
3091                |_| true,
3092            );
3093            for x in visited_in_order {
3094                let x = x as usize;
3095                let mut timer_chain_height = 0;
3096                for pred in &self.predecessor_edges(x) {
3097                    let mut height = if let Some(v) = res.get(pred) {
3098                        *v
3099                    } else {
3100                        self.arena[*pred].data.ledger_view_timer_chain_height
3101                    };
3102                    if tmp_chain_set.contains(pred)
3103                        || self.get_timer_chain_index(*pred) < fork_at_index
3104                    {
3105                        height += 1;
3106                    }
3107                    if height > timer_chain_height {
3108                        timer_chain_height = height;
3109                    }
3110                }
3111                res.insert(x, timer_chain_height);
3112            }
3113        }
3114
3115        // We compute the accumulative lca list after this
3116        let mut tmp_lca = Vec::new();
3117        if tmp_chain.len() > self.timer_chain.len() - fork_at_index
3118            && self.timer_chain.len() - fork_at_index
3119                < self.inner_conf.timer_chain_beta as usize
3120        {
3121            let mut last_lca = match self.timer_chain_accumulative_lca.last() {
3122                Some(last_lca) => *last_lca,
3123                None => self.cur_era_genesis_block_arena_index,
3124            };
3125            let s = max(
3126                self.timer_chain.len(),
3127                self.inner_conf.timer_chain_beta as usize,
3128            );
3129            let e =
3130                min(tmp_chain.len(), self.inner_conf.timer_chain_beta as usize);
3131            for i in s..(fork_at_index + e) {
3132                // `end` is the timer chain index of the end of
3133                // `timer_chain_beta` consecutive blocks which
3134                // we will compute accumulative lca.
3135                let end = i - self.inner_conf.timer_chain_beta as usize;
3136                if end < self.inner_conf.timer_chain_beta as usize {
3137                    tmp_lca.push(self.cur_era_genesis_block_arena_index);
3138                    continue;
3139                }
3140                let mut lca = self.timer_chain[end];
3141                for j in
3142                    (end - self.inner_conf.timer_chain_beta as usize + 1)..end
3143                {
3144                    // Note that we may have timer_chain blocks that are
3145                    // outside the genesis tree temporarily.
3146                    // Therefore we have to deal with the case that lca
3147                    // becomes NULL
3148                    if lca == NULL {
3149                        break;
3150                    }
3151                    lca = self.lca(lca, self.timer_chain[j]);
3152                }
3153                // Note that we have the assumption that the force
3154                // confirmation point will always move
3155                // along parental edges, i.e., it is not possible for the
3156                // point to move to a sibling tree. This
3157                // assumption is true if the timer_chain_beta
3158                // and the timer_chain_difficulty_ratio are set to large
3159                // enough values.
3160                //
3161                // It is therefore safe here to use the height to compare.
3162                if lca != NULL
3163                    && self.arena[last_lca].height < self.arena[lca].height
3164                {
3165                    last_lca = lca;
3166                }
3167                tmp_lca.push(last_lca);
3168            }
3169        }
3170        if tmp_chain.len() > self.inner_conf.timer_chain_beta as usize {
3171            let mut last_lca = if let Some(lca) = tmp_lca.last() {
3172                *lca
3173            } else if fork_at_index == 0 {
3174                self.cur_era_genesis_block_arena_index
3175            } else {
3176                // This is guaranteed to be inside the bound because it
3177                // means that the lca computation on old nodes do not need to
3178                // be extended. The gap between the fork_at_index and the
3179                // self.timer_chain.len() is greater than or equal to
3180                // timer_chain_beta.
3181                self.timer_chain_accumulative_lca[fork_at_index - 1]
3182            };
3183            for i in 0..(tmp_chain.len()
3184                - (self.inner_conf.timer_chain_beta as usize))
3185            {
3186                if fork_at_index + i + 1
3187                    < self.inner_conf.timer_chain_beta as usize
3188                {
3189                    tmp_lca.push(self.cur_era_genesis_block_arena_index)
3190                } else {
3191                    let mut lca = tmp_chain[i];
3192                    // We only go over timer_chain_beta elements to compute lca
3193                    let s = if i < self.inner_conf.timer_chain_beta as usize - 1
3194                    {
3195                        0
3196                    } else {
3197                        i + 1 - self.inner_conf.timer_chain_beta as usize
3198                    };
3199                    for j in s..i {
3200                        // Note that we may have timer_chain blocks that are
3201                        // outside the genesis tree temporarily.
3202                        // Therefore we have to deal with the case that lca
3203                        // becomes NULL
3204                        if lca == NULL {
3205                            break;
3206                        }
3207                        lca = self.lca(lca, tmp_chain[j]);
3208                    }
3209                    for j in (fork_at_index + i + 1
3210                        - self.inner_conf.timer_chain_beta as usize)
3211                        ..fork_at_index
3212                    {
3213                        // Note that we may have timer_chain blocks that are
3214                        // outside the genesis tree temporarily.
3215                        // Therefore we have to deal with the case that lca
3216                        // becomes NULL
3217                        if lca == NULL {
3218                            break;
3219                        }
3220                        lca = self.lca(lca, self.timer_chain[j]);
3221                    }
3222                    // Note that we have the assumption that the force
3223                    // confirmation point will always move
3224                    // along parental edges, i.e., it is not possible for the
3225                    // point to move to a sibling tree. This
3226                    // assumption is true if the timer_chain_beta
3227                    // and the timer_chain_difficulty_ratio are set to large
3228                    // enough values.
3229                    //
3230                    // It is therefore safe here to use the height to compare.
3231                    if lca != NULL
3232                        && self.arena[last_lca].height < self.arena[lca].height
3233                    {
3234                        last_lca = lca;
3235                    }
3236                    tmp_lca.push(last_lca);
3237                }
3238            }
3239        }
3240
3241        (fork_at, res, tmp_lca, tmp_chain)
3242    }
3243
3244    fn update_timer_chain(&mut self, me: usize) {
3245        let (fork_at, res, extra_lca, tmp_chain) = self
3246            .compute_timer_chain_tuple(
3247                self.arena[me].parent,
3248                &self.arena[me].referees,
3249                None,
3250            );
3251
3252        let fork_at_index =
3253            (fork_at - self.cur_era_genesis_timer_chain_height) as usize;
3254        self.timer_chain.resize(fork_at_index + tmp_chain.len(), 0);
3255        let new_chain_lca_size = if self.timer_chain.len()
3256            > self.inner_conf.timer_chain_beta as usize
3257        {
3258            self.timer_chain.len() - self.inner_conf.timer_chain_beta as usize
3259        } else {
3260            0
3261        };
3262        self.timer_chain_accumulative_lca
3263            .resize(new_chain_lca_size, 0);
3264        for i in 0..tmp_chain.len() {
3265            self.timer_chain[fork_at_index + i] = tmp_chain[i];
3266        }
3267        for i in 0..extra_lca.len() {
3268            self.timer_chain_accumulative_lca
3269                [new_chain_lca_size - extra_lca.len() + i] = extra_lca[i];
3270        }
3271        // In case of extending the key chain, me may not be inside the result
3272        // map and we will set it to the end of the timer chain.
3273        if !res.contains_key(&me) {
3274            assert!(
3275                self.cur_era_genesis_timer_chain_height
3276                    + self.timer_chain.len() as u64
3277                    == fork_at
3278            );
3279            self.arena[me].data.ledger_view_timer_chain_height = fork_at;
3280        }
3281        for (k, v) in res {
3282            self.arena[k].data.ledger_view_timer_chain_height = v;
3283        }
3284        if self.arena[me].is_timer && !self.arena[me].data.partial_invalid {
3285            self.timer_chain.push(me);
3286            if self.timer_chain.len()
3287                >= 2 * self.inner_conf.timer_chain_beta as usize
3288            {
3289                let s = self.timer_chain.len()
3290                    - 2 * self.inner_conf.timer_chain_beta as usize;
3291                let e = self.timer_chain.len()
3292                    - self.inner_conf.timer_chain_beta as usize;
3293                let mut lca = self.timer_chain[e - 1];
3294                for i in s..(e - 1) {
3295                    // Note that we may have timer_chain blocks that are outside
3296                    // the genesis tree temporarily.
3297                    // Therefore we have to deal with the case that lca becomes
3298                    // NULL
3299                    if lca == NULL {
3300                        break;
3301                    }
3302                    lca = self.lca(lca, self.timer_chain[i]);
3303                }
3304                let last_lca =
3305                    if let Some(x) = self.timer_chain_accumulative_lca.last() {
3306                        *x
3307                    } else {
3308                        self.cur_era_genesis_block_arena_index
3309                    };
3310                // Note that we have the assumption that the force confirmation
3311                // point will always move along parental edges,
3312                // i.e., it is not possible for the point
3313                // to move to a sibling tree. This assumption is true if the
3314                // timer_chain_beta
3315                // and the timer_chain_difficulty_ratio are set to large enough
3316                // values.
3317                //
3318                // It is therefore safe here to use the height to compare.
3319                if lca != NULL
3320                    && self.arena[last_lca].height < self.arena[lca].height
3321                {
3322                    self.timer_chain_accumulative_lca.push(lca);
3323                } else {
3324                    self.timer_chain_accumulative_lca.push(last_lca);
3325                }
3326                assert_eq!(
3327                    self.timer_chain_accumulative_lca.len(),
3328                    self.timer_chain.len()
3329                        - self.inner_conf.timer_chain_beta as usize
3330                );
3331            } else if self.timer_chain.len()
3332                > self.inner_conf.timer_chain_beta as usize
3333            {
3334                self.timer_chain_accumulative_lca
3335                    .push(self.cur_era_genesis_block_arena_index);
3336            }
3337        }
3338        debug!(
3339            "Timer chain updated to {:?} accumulated lca {:?}",
3340            self.timer_chain, self.timer_chain_accumulative_lca
3341        );
3342    }
3343
3344    pub fn total_processed_block_count(&self) -> u64 {
3345        self.sequence_number_of_block_entrance
3346    }
3347
3348    pub fn get_trusted_blame_block(
3349        &self, checkpoint_hash: &H256, plus_depth: usize,
3350    ) -> Option<H256> {
3351        let arena_index_opt = self.hash_to_arena_indices.get(checkpoint_hash);
3352        // checkpoint has changed, wait for next checkpoint
3353        if arena_index_opt.is_none() {
3354            debug!(
3355                "get_trusted_blame_block: block {:?} not in consensus",
3356                checkpoint_hash
3357            );
3358            return None;
3359        }
3360        let arena_index = *arena_index_opt.unwrap();
3361        let pivot_index =
3362            self.height_to_pivot_index(self.arena[arena_index].height);
3363        // the given checkpoint hash is invalid
3364        if pivot_index >= self.pivot_chain.len()
3365            || self.pivot_chain[pivot_index] != arena_index
3366        {
3367            debug!(
3368                "get_trusted_blame_block: block {:?} not on pivot chain",
3369                checkpoint_hash
3370            );
3371            return None;
3372        }
3373        self.find_first_index_with_correct_state_of(
3374            pivot_index + plus_depth,
3375            None, /* blame_bound */
3376            0,    /* min_vote_count */
3377        )
3378        .and_then(|index| Some(self.arena[self.pivot_chain[index]].hash))
3379    }
3380
3381    /// Find a trusted blame block for snapshot full sync
3382    pub fn get_trusted_blame_block_for_snapshot(
3383        &self, snapshot_epoch_id: &EpochId,
3384    ) -> Option<H256> {
3385        self.get_trusted_blame_block(
3386            snapshot_epoch_id,
3387            self.data_man.get_snapshot_blame_plus_depth(),
3388        )
3389    }
3390
3391    /// Return the epoch that we are going to sync the state
3392    pub fn get_to_sync_epoch_id(&self) -> EpochId {
3393        let height_to_sync = self.latest_snapshot_height();
3394        // The height_to_sync is within the range of `self.pivit_chain`.
3395        let epoch_to_sync = self.arena
3396            [self.pivot_chain[self.height_to_pivot_index(height_to_sync)]]
3397        .hash;
3398        epoch_to_sync
3399    }
3400
3401    /// FIXME Use snapshot-related information when we can sync snapshot states.
3402    /// Return the latest height that a snapshot should be available.
3403    fn latest_snapshot_height(&self) -> u64 { self.cur_era_stable_height }
3404
3405    fn collect_defer_blocks_missing_execution_commitments(
3406        &self, me: usize,
3407    ) -> Result<Vec<H256>, String> {
3408        let mut cur = self.get_deferred_state_arena_index(me)?;
3409        let mut waiting_blocks = Vec::new();
3410        debug!(
3411            "collect_blocks_missing_execution_commitments: me={}, height={}",
3412            me, self.arena[me].height
3413        );
3414        // FIXME: Same here. Be explicit about whether a checkpoint or a synced
3415        // FIXME: snapshot is requested, and distinguish two cases.
3416        let state_boundary_height =
3417            self.data_man.state_availability_boundary.read().lower_bound;
3418        loop {
3419            let deferred_block_hash = self.arena[cur].hash;
3420
3421            if self
3422                .data_man
3423                .get_epoch_execution_commitment(&deferred_block_hash)
3424                .is_some()
3425                || self.arena[cur].height <= state_boundary_height
3426            {
3427                // This block and the blocks before have been executed or will
3428                // not be executed
3429                break;
3430            }
3431            waiting_blocks.push(deferred_block_hash);
3432            cur = self.arena[cur].parent;
3433        }
3434        waiting_blocks.reverse();
3435        Ok(waiting_blocks)
3436    }
3437
3438    /// Compute missing `state_valid` for `me` and all the precedents.
3439    fn compute_state_valid_and_blame_info(
3440        &mut self, me: usize, executor: &ConsensusExecutor,
3441    ) -> Result<(), String> {
3442        // Collect all precedents whose state_valid is empty, and evaluate them
3443        // in order
3444        let mut blocks_to_compute = Vec::new();
3445        let mut cur = me;
3446        // FIXME: Same here. Be explicit about whether a checkpoint or a synced
3447        // FIXME: snapshot is requested, and distinguish two cases.
3448        //let state_boundary_height =
3449        //    self.data_man.state_availability_boundary.read().lower_bound;
3450        loop {
3451            if self.arena[cur].data.state_valid.is_some() {
3452                break;
3453            }
3454            // FIXME: the assersion isn't on state boundary,
3455            // FIXME: correct the assersion.
3456            // See comments on compute_blame_and_state_with_execution_result()
3457            // for explanation of this assumption.
3458            //assert!(self.arena[cur].height >= state_boundary_height);
3459            blocks_to_compute.push(cur);
3460            cur = self.arena[cur].parent;
3461        }
3462        blocks_to_compute.reverse();
3463
3464        for index in blocks_to_compute {
3465            self.compute_state_valid_and_blame_info_for_block(index, executor)?;
3466        }
3467        Ok(())
3468    }
3469
3470    fn split_root(&mut self, me: usize) {
3471        let parent = self.arena[me].parent;
3472        assert!(parent != NULL);
3473        self.weight_tree.split_root(parent, me);
3474        self.adaptive_tree.split_root(parent, me);
3475        self.arena[me].parent = NULL;
3476    }
3477
3478    pub fn reset_epoch_number_in_epoch(&mut self, pivot_arena_index: usize) {
3479        self.set_epoch_number_in_epoch(pivot_arena_index, NULLU64);
3480    }
3481
3482    fn set_epoch_number_in_epoch(
3483        &mut self, pivot_arena_index: usize, epoch_number: u64,
3484    ) {
3485        assert!(!self.arena[pivot_arena_index].data.blockset_cleared);
3486        let block_set = self.exchange_or_compute_blockset_in_own_view_of_epoch(
3487            pivot_arena_index,
3488            None,
3489        );
3490        for idx in &block_set {
3491            self.arena[*idx].data.epoch_number = epoch_number
3492        }
3493        self.exchange_or_compute_blockset_in_own_view_of_epoch(
3494            pivot_arena_index,
3495            Some(block_set),
3496        );
3497        self.arena[pivot_arena_index].data.epoch_number = epoch_number;
3498    }
3499
3500    fn get_deferred_state_arena_index(
3501        &self, me: usize,
3502    ) -> Result<usize, String> {
3503        let height = self.arena[me].height;
3504        // We are in the very early of the blockchain, here we can just
3505        // return cur_era_genesis_block_arena_index and it will be the true
3506        // genesis.
3507        if height <= DEFERRED_STATE_EPOCH_COUNT {
3508            return Ok(self.cur_era_genesis_block_arena_index);
3509        }
3510        // This is the case we cannot handle, the block is no longer maintained.
3511        if self.cur_era_genesis_height + DEFERRED_STATE_EPOCH_COUNT > height {
3512            return Err(
3513                "Parent is too old for computing the deferred state".to_owned()
3514            );
3515        }
3516        let target_height = height - DEFERRED_STATE_EPOCH_COUNT;
3517        let pivot_idx = self.height_to_pivot_index(height);
3518        // If it is on the pivot chain already, we can avoid O(log n) lca query
3519        if pivot_idx < self.pivot_chain.len()
3520            && self.pivot_chain[pivot_idx] == me
3521        {
3522            return Ok(
3523                self.pivot_chain[self.height_to_pivot_index(target_height)]
3524            );
3525        } else {
3526            return Ok(self.ancestor_at(me, target_height));
3527        }
3528    }
3529
3530    /// Find the first state valid block on the pivot chain after
3531    /// `state_boundary_height` and set `state_valid` of it and its blamed
3532    /// blocks. This block is found according to blame_ratio.
3533    pub fn recover_state_valid(&mut self) {
3534        // FIXME: Same here. Be explicit about whether a checkpoint or a synced
3535        // FIXME: snapshot is requested, and distinguish two cases.
3536        let start_pivot_index =
3537            (self.data_man.state_availability_boundary.read().lower_bound
3538                - self.cur_era_genesis_height) as usize;
3539        if start_pivot_index >= self.pivot_chain.len() {
3540            // TODO: Handle this after refactoring
3541            // `state_availability_boundary`.
3542            return;
3543        }
3544        let start_epoch_hash =
3545            self.arena[self.pivot_chain[start_pivot_index]].hash;
3546        // We will get the first
3547        // pivot block whose `state_valid` is `true` after `start_epoch_hash`
3548        // (include `start_epoch_hash` itself).
3549        let maybe_trusted_blame_block =
3550            self.get_trusted_blame_block(&start_epoch_hash, 0);
3551        debug!("recover_state_valid: checkpoint={:?}, maybe_trusted_blame_block={:?}", start_epoch_hash, maybe_trusted_blame_block);
3552
3553        // Set `state_valid` of `trusted_blame_block` to true,
3554        // and set that of the blocks blamed by it to false
3555        if let Some(trusted_blame_block) = maybe_trusted_blame_block {
3556            let mut cur = *self
3557                .hash_to_arena_indices
3558                .get(&trusted_blame_block)
3559                .unwrap();
3560            while cur != NULL {
3561                let blame = self
3562                    .data_man
3563                    .block_header_by_hash(&self.arena[cur].hash)
3564                    .unwrap()
3565                    .blame();
3566                for i in 0..blame + 1 {
3567                    self.arena[cur].data.state_valid = Some(i == 0);
3568                    trace!(
3569                        "recover_state_valid: index={} hash={} state_valid={}",
3570                        cur,
3571                        self.arena[cur].hash,
3572                        i == 0
3573                    );
3574                    cur = self.arena[cur].parent;
3575                    if cur == NULL {
3576                        break;
3577                    }
3578                }
3579            }
3580        } else {
3581            if start_epoch_hash != self.data_man.true_genesis.hash() {
3582                error!(
3583                    "Fail to recover state_valid: start_epoch_hash={:?}",
3584                    start_epoch_hash
3585                );
3586            }
3587        }
3588    }
3589
3590    pub fn block_node(&self, block_hash: &H256) -> Option<&ConsensusGraphNode> {
3591        self.hash_to_arena_indices
3592            .get(block_hash)
3593            .and_then(|arena_index| self.arena.get(*arena_index))
3594    }
3595
3596    /// Return the list of best terminals when respecting a bound (for
3597    /// referencing edges). We sort the terminals based on its lca so that
3598    /// it will not change the parent selection results if we exclude last
3599    /// few terminals in the sorted order.
3600    pub fn best_terminals(
3601        &mut self, best_index: usize, ref_bound: usize,
3602    ) -> Vec<H256> {
3603        let pastset_tmp;
3604        let pastset = if let Some(s) = self.pastset_cache.get(best_index) {
3605            s
3606        } else {
3607            pastset_tmp = self.compute_pastset_brutal(best_index);
3608            &pastset_tmp
3609        };
3610
3611        let lca_height_cache = mem::replace(
3612            &mut self.best_terminals_lca_height_cache,
3613            Default::default(),
3614        );
3615
3616        // We prepare a counter_map to denote the number of erased incoming
3617        // edges for each block.
3618        let mut counter_map = FastHashMap::new();
3619        let mut queue = BinaryHeap::new();
3620        for hash in self.terminal_hashes.iter() {
3621            let a_idx = self.hash_to_arena_indices.get(hash).unwrap();
3622            let mut a_lca_height = NULLU64;
3623            if let Some(h) = lca_height_cache.get(a_idx) {
3624                if *h < self.best_terminals_reorg_height {
3625                    a_lca_height = *h;
3626                }
3627            }
3628            if a_lca_height == NULLU64 {
3629                let a_lca = self.lca(*a_idx, best_index);
3630                a_lca_height = self.arena[a_lca].height;
3631            }
3632            self.best_terminals_lca_height_cache
3633                .insert(*a_idx, a_lca_height);
3634            queue.push((-(a_lca_height as i128), *a_idx));
3635        }
3636
3637        // The basic idea is to have a loop go over the refs in the priority
3638        // queue. We remove tips that have the smallest lca height. When
3639        // we remove a tip, we add those blocks the tip references back
3640        // to the queue. Eventually, we will get a set of referees
3641        // that is 1) within the ref_bound and 2) still holding best_index as
3642        // their parent.
3643        //
3644        // Note that we ignore the case where the force confirm mechanism will
3645        // influence the result here. The idea is that in normal
3646        // scenarios with good parameter setting. Force confirmation will
3647        // happen only when a block is already very stable.
3648        while queue.len() > ref_bound
3649            || queue
3650                .peek()
3651                .map_or(false, |(v, _)| *v == -(NULLU64 as i128))
3652        {
3653            let (_, idx) = queue.pop().unwrap();
3654            let parent = self.arena[idx].parent;
3655            if parent != NULL {
3656                if let Some(p) = counter_map.get_mut(&parent) {
3657                    *p = *p + 1;
3658                } else if !pastset.contains(parent as u32) {
3659                    counter_map.insert(parent, 1);
3660                }
3661                if let Some(p) = counter_map.get(&parent) {
3662                    if *p
3663                        == self.arena[parent].children.len()
3664                            + self.arena[parent].referrers.len()
3665                    {
3666                        // Note that although original terminal_hashes do not
3667                        // have out-of-era blocks,
3668                        // we can now get out-of-era blocks. We need to handle
3669                        // them.
3670                        if self.arena[parent].era_block == NULL {
3671                            queue.push((-(NULLU64 as i128), parent));
3672                        } else {
3673                            let mut a_lca_height = NULLU64;
3674                            if let Some(h) = lca_height_cache.get(&parent) {
3675                                if *h < self.best_terminals_reorg_height {
3676                                    a_lca_height = *h;
3677                                }
3678                            }
3679                            if a_lca_height == NULLU64 {
3680                                let a_lca = self.lca(parent, best_index);
3681                                a_lca_height = self.arena[a_lca].height;
3682                            }
3683                            self.best_terminals_lca_height_cache
3684                                .insert(parent, a_lca_height);
3685                            queue.push((-(a_lca_height as i128), parent));
3686                        }
3687                    }
3688                }
3689            }
3690            for referee in &self.arena[idx].referees {
3691                if let Some(p) = counter_map.get_mut(referee) {
3692                    *p = *p + 1;
3693                } else if !pastset.contains(*referee as u32) {
3694                    counter_map.insert(*referee, 1);
3695                }
3696                if let Some(p) = counter_map.get(referee) {
3697                    if *p
3698                        == self.arena[*referee].children.len()
3699                            + self.arena[*referee].referrers.len()
3700                    {
3701                        // Note that although original terminal_hashes do not
3702                        // have out-of-era blocks,
3703                        // we can now get out-of-era blocks. We need to handle
3704                        // them.
3705                        if self.arena[*referee].era_block == NULL {
3706                            queue.push((-(NULLU64 as i128), *referee));
3707                        } else {
3708                            let mut a_lca_height = NULLU64;
3709                            if let Some(h) = lca_height_cache.get(referee) {
3710                                if *h < self.best_terminals_reorg_height {
3711                                    a_lca_height = *h;
3712                                }
3713                            }
3714                            if a_lca_height == NULLU64 {
3715                                let a_lca = self.lca(*referee, best_index);
3716                                a_lca_height = self.arena[a_lca].height;
3717                            }
3718                            self.best_terminals_lca_height_cache
3719                                .insert(*referee, a_lca_height);
3720                            queue.push((-(a_lca_height as i128), *referee));
3721                        }
3722                    }
3723                }
3724            }
3725        }
3726        self.best_terminals_reorg_height = NULLU64;
3727        let bounded_hashes =
3728            queue.iter().map(|(_, b)| self.arena[*b].hash).collect();
3729        bounded_hashes
3730    }
3731
3732    pub fn finish_block_recovery(&mut self) { self.header_only = false; }
3733
3734    pub fn get_pivot_chain_and_weight(
3735        &self, height_range: Option<(u64, u64)>,
3736    ) -> Result<Vec<(H256, U256)>, String> {
3737        let min_height = self.get_cur_era_genesis_height();
3738        let max_height = self.arena[*self.pivot_chain.last().unwrap()].height;
3739        let (start, end) = height_range.unwrap_or((min_height, max_height));
3740        if start < min_height || end > max_height {
3741            bail!(
3742                "height_range out of bound: requested={:?} min={} max={}",
3743                height_range,
3744                min_height,
3745                max_height
3746            );
3747        }
3748
3749        let mut chain = Vec::new();
3750        for i in start..=end {
3751            let pivot_arena_index = self.get_pivot_block_arena_index(i);
3752            chain.push((
3753                self.arena[pivot_arena_index].hash.into(),
3754                (self.weight_tree.get(pivot_arena_index) as u64).into(),
3755            ));
3756        }
3757        Ok(chain)
3758    }
3759
3760    /// Return `None` if `root_block` is not in consensus.
3761    pub fn get_subtree(&self, root_block: &H256) -> Option<Vec<H256>> {
3762        let root_arena_index = *self.hash_to_arena_indices.get(root_block)?;
3763        let mut queue = VecDeque::new();
3764        let mut subtree = Vec::new();
3765        queue.push_back(root_arena_index);
3766        while let Some(i) = queue.pop_front() {
3767            subtree.push(self.arena[i].hash);
3768            for child in &self.arena[i].children {
3769                queue.push_back(*child);
3770            }
3771        }
3772        Some(subtree)
3773    }
3774
3775    pub fn get_next_pivot_decision(
3776        &self, parent_decision_hash: &H256, confirmed_height: u64,
3777    ) -> Option<(u64, H256)> {
3778        let r = match self.hash_to_arena_indices.get(parent_decision_hash) {
3779            None => {
3780                // parent_decision is before the current checkpoint, so we just
3781                // choose the latest block as a new pivot
3782                // decision.
3783                let new_decision_height = (confirmed_height.saturating_sub(
3784                    self.inner_conf
3785                        .pos_pivot_decision_defer_epoch_count(confirmed_height),
3786                )) / POS_TERM_EPOCHS
3787                    * POS_TERM_EPOCHS;
3788                if new_decision_height <= self.cur_era_genesis_height {
3789                    None
3790                } else {
3791                    let new_decision_arena_index =
3792                        self.get_pivot_block_arena_index(new_decision_height);
3793                    Some((
3794                        self.arena[new_decision_arena_index].height,
3795                        self.arena[new_decision_arena_index].hash,
3796                    ))
3797                }
3798            }
3799            Some(parent_decision) => {
3800                let parent_decision_height =
3801                    self.arena[*parent_decision].height;
3802                assert_eq!(parent_decision_height % POS_TERM_EPOCHS, 0);
3803                // `parent` should be on the pivot chain.
3804                if self.get_pivot_block_arena_index(parent_decision_height)
3805                    == *parent_decision
3806                {
3807                    // TODO(lpl): Use confirmed epoch with a delay in
3808                    // pos-finality spec.
3809                    let new_decision_height =
3810                        (confirmed_height.saturating_sub(
3811                            self.inner_conf
3812                                .pos_pivot_decision_defer_epoch_count(
3813                                    confirmed_height,
3814                                ),
3815                        )) / POS_TERM_EPOCHS
3816                            * POS_TERM_EPOCHS;
3817                    if new_decision_height <= parent_decision_height {
3818                        None
3819                    } else {
3820                        let new_decision_arena_index = self
3821                            .get_pivot_block_arena_index(new_decision_height);
3822                        Some((
3823                            self.arena[new_decision_arena_index].height,
3824                            self.arena[new_decision_arena_index].hash,
3825                        ))
3826                    }
3827                } else {
3828                    None
3829                }
3830            }
3831        };
3832        debug!(
3833            "next_pivot_decision: parent={:?} return={:?}",
3834            parent_decision_hash, r
3835        );
3836        r
3837    }
3838
3839    pub fn validate_pivot_decision(
3840        &self, ancestor_hash: &H256, me_hash: &H256,
3841    ) -> bool {
3842        if ancestor_hash == me_hash {
3843            return true;
3844        }
3845        debug!(
3846            "validate_pivot_decision: ancestor={:?}, me={:?}",
3847            ancestor_hash, me_hash
3848        );
3849        match (
3850            self.hash_to_arena_indices.get(ancestor_hash),
3851            self.hash_to_arena_indices.get(me_hash),
3852        ) {
3853            (Some(ancestor), Some(me)) => {
3854                if self.arena[*me].height % POS_TERM_EPOCHS != 0 {
3855                    return false;
3856                }
3857                // Both in memory. Just use Link-Cut-Tree.
3858                self.ancestor_at(*me, self.arena[*ancestor].height) == *ancestor
3859            }
3860            // TODO(lpl): Check if it's possible to go beyond checkpoint.
3861            // TODO(lpl): If we want to check ancestor and me are both on pivot
3862            // chain, we might need to always persist
3863            // block_execution_result for full nodes. Or include
3864            // height in the validation?
3865            (_, Some(_me)) => {
3866                // This should not happen after catching up for a normal node.
3867                if !self.header_only {
3868                    warn!(
3869                        "ancestor not in consensus graph: processed={}",
3870                        self.pivot_block_processed(ancestor_hash)
3871                    );
3872                }
3873                // ancestor is before checkpoint and is on pivot chain, so me
3874                // must be in the subtree.
3875                true
3876            }
3877            (_, _) => {
3878                // This should not happen after catching up for a normal node.
3879                if !self.header_only {
3880                    warn!("ancestor and me are both not in consensus graph, processed={} {}", self.pivot_block_processed(ancestor_hash), self.pivot_block_processed(me_hash));
3881                }
3882                true
3883            }
3884        }
3885    }
3886
3887    /// Return error if the header does not exist or the header does not have
3888    /// pos_reference or the pos_reference does not exist.
3889    fn get_pos_reference_pivot_decision(
3890        &self, block_hash: &H256,
3891    ) -> Result<H256, String> {
3892        let pos_reference = self
3893            .data_man
3894            .pos_reference_by_hash(block_hash)
3895            .ok_or("header exist".to_string())?
3896            .ok_or("pos reference checked in sync graph".to_string())?;
3897        self.pos_verifier
3898            .get_pivot_decision(&pos_reference)
3899            .ok_or("pos validity checked in sync graph".to_string())
3900    }
3901
3902    fn update_pos_pivot_decision(&mut self, me: usize) {
3903        let h = self.arena[me].hash;
3904        if let Ok(pivot_decision) = self.get_pos_reference_pivot_decision(&h) {
3905            let pivot_decision_height = self
3906                .data_man
3907                .block_height_by_hash(&pivot_decision)
3908                .expect("pos_reference checked");
3909            if pivot_decision_height > self.best_pos_pivot_decision.1 {
3910                self.best_pos_pivot_decision =
3911                    (pivot_decision, pivot_decision_height);
3912            }
3913        }
3914    }
3915
3916    // TODO(lpl): Copied from `check_mining_adaptive_block`.
3917    /// Return possibly new parent.
3918    pub fn choose_correct_parent(
3919        &mut self, parent_arena_index: usize, referee_indices: Vec<usize>,
3920        pos_reference: Option<PosBlockId>,
3921    ) -> usize {
3922        // We first compute anticone barrier for newly mined block
3923        let parent_anticone_opt = self.anticone_cache.get(parent_arena_index);
3924        let mut anticone;
3925        if parent_anticone_opt.is_none() {
3926            anticone = consensus_new_block_handler::ConsensusNewBlockHandler::compute_anticone_bruteforce(
3927                self, parent_arena_index,
3928            );
3929            for i in self.compute_future_bitset(parent_arena_index) {
3930                anticone.add(i);
3931            }
3932        } else {
3933            anticone = self.compute_future_bitset(parent_arena_index);
3934            for index in parent_anticone_opt.unwrap() {
3935                anticone.add(*index as u32);
3936            }
3937        }
3938        let mut my_past = BitSet::new();
3939        let mut queue: VecDeque<usize> = VecDeque::new();
3940        for index in &referee_indices {
3941            queue.push_back(*index);
3942        }
3943        while let Some(index) = queue.pop_front() {
3944            if my_past.contains(index as u32) {
3945                continue;
3946            }
3947            my_past.add(index as u32);
3948            let idx_parent = self.arena[index].parent;
3949            if idx_parent != NULL {
3950                if anticone.contains(idx_parent as u32)
3951                    || self.arena[idx_parent].era_block == NULL
3952                {
3953                    queue.push_back(idx_parent);
3954                }
3955            }
3956            for referee in &self.arena[index].referees {
3957                if anticone.contains(*referee as u32)
3958                    || self.arena[*referee].era_block == NULL
3959                {
3960                    queue.push_back(*referee);
3961                }
3962            }
3963        }
3964        for index in my_past.drain() {
3965            anticone.remove(index);
3966        }
3967
3968        let mut anticone_barrier = BitSet::new();
3969        for index in (&anticone).iter() {
3970            let parent = self.arena[index as usize].parent as u32;
3971            if self.arena[index as usize].era_block != NULL
3972                && !anticone.contains(parent)
3973            {
3974                anticone_barrier.add(index);
3975            }
3976        }
3977
3978        let timer_chain_tuple = self.compute_timer_chain_tuple(
3979            parent_arena_index,
3980            &referee_indices,
3981            Some(&anticone),
3982        );
3983
3984        self.choose_correct_parent_impl(
3985            parent_arena_index,
3986            &anticone_barrier,
3987            &timer_chain_tuple,
3988            pos_reference,
3989        )
3990    }
3991
3992    fn choose_correct_parent_impl(
3993        &mut self, parent: usize, anticone_barrier: &BitSet,
3994        timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
3995        pos_reference: Option<PosBlockId>,
3996    ) -> usize {
3997        let force_confirm =
3998            self.compute_block_force_confirm(timer_chain_tuple, pos_reference);
3999        let force_confirm_height = self.arena[force_confirm].height;
4000
4001        if self.ancestor_at(parent, force_confirm_height) == force_confirm {
4002            // `parent` is the correct parent.
4003            return parent;
4004        }
4005
4006        let mut weight_delta = HashMap::new();
4007
4008        for index in anticone_barrier.iter() {
4009            assert!(!self.is_legacy_block(index as usize));
4010            weight_delta
4011                .insert(index as usize, self.weight_tree.get(index as usize));
4012        }
4013
4014        for (index, delta) in &weight_delta {
4015            self.weight_tree.path_apply(*index, -*delta);
4016        }
4017
4018        let mut new_parent = force_confirm;
4019        // Recursively find the correct pivot chain with the heaviest subtree
4020        // weight.
4021        while !self.arena[new_parent].children.is_empty() {
4022            let mut children = self.arena[new_parent].children.clone();
4023            let mut pivot = children.pop().expect("non-empty");
4024            for child in children {
4025                if ConsensusGraphInner::is_heavier(
4026                    (self.weight_tree.get(child), &self.arena[child].hash),
4027                    (self.weight_tree.get(pivot), &self.arena[pivot].hash),
4028                ) {
4029                    pivot = child;
4030                }
4031            }
4032            new_parent = pivot;
4033        }
4034
4035        for (index, delta) in &weight_delta {
4036            self.weight_tree.path_apply(*index, *delta);
4037        }
4038
4039        new_parent
4040    }
4041
4042    pub fn pivot_block_processed(&self, pivot_hash: &H256) -> bool {
4043        if let Some(height) = self.data_man.block_height_by_hash(pivot_hash) {
4044            if let Ok(epoch_hash) =
4045                self.get_pivot_hash_from_epoch_number(height)
4046            {
4047                if epoch_hash == *pivot_hash {
4048                    return true;
4049                } else {
4050                    debug!(
4051                        "pivot_block_processed: {:?} is not on pivot chain",
4052                        pivot_hash
4053                    );
4054                }
4055            } else {
4056                debug!("pivot_block_processed: epoch not processed height={:?} pivot={:?}", height, pivot_hash);
4057            }
4058        } else {
4059            debug!("pivot_block_processed: {:?} is not processed", pivot_hash);
4060        }
4061        false
4062    }
4063
4064    /// Return if a block has been confirmed by the pivot decision by the latest
4065    /// committed PoS block.
4066    ///
4067    /// This function needs persisted `BlockExecutionResult` to respond
4068    /// correctly for blocks before the checkpoint. If the data are not
4069    /// persisted, it will return `false` for blocks before the checkpoint even
4070    /// though they have been confirmed.
4071    pub fn is_confirmed_by_pos(&self, block_hash: &H256) -> bool {
4072        let epoch_number = match self.get_block_epoch_number(block_hash) {
4073            Some(epoch_number) => epoch_number,
4074            None => match self
4075                .data_man
4076                .block_execution_result_by_hash_from_db(block_hash)
4077            {
4078                Some(r) => {
4079                    let epoch_hash = r.0;
4080                    self.data_man
4081                        .block_height_by_hash(&epoch_hash)
4082                        .expect("executed header exists")
4083                }
4084                // We cannot find the BlockExecutionResult.
4085                None => return false,
4086            },
4087        };
4088        // All epochs before the confirmed epoch are regarded PoS-confirmed.
4089        epoch_number <= self.best_pos_pivot_decision.1
4090    }
4091
4092    /// Return the latest PoS pivot decision processed in ConsensusGraph.
4093    pub fn latest_epoch_confirmed_by_pos(&self) -> &(H256, u64) {
4094        &self.best_pos_pivot_decision
4095    }
4096}
4097
4098impl pow::ConsensusProvider for &ConsensusGraphInner {
4099    fn num_blocks_in_epoch(&self, h: &H256) -> u64 {
4100        let index = self.hash_to_arena_indices.get(h).unwrap(); // TODO handle None
4101        let parent = self.arena[*index].parent;
4102        (self.arena[*index].past_num_blocks
4103            - self.arena[parent].past_num_blocks) as u64
4104    }
4105
4106    fn block_header_by_hash(&self, hash: &H256) -> Option<Arc<BlockHeader>> {
4107        self.data_man.block_header_by_hash(hash)
4108    }
4109}
4110
4111impl Graph for ConsensusGraphInner {
4112    type NodeIndex = usize;
4113}
4114
4115impl TreeGraph for ConsensusGraphInner {
4116    fn parent(&self, node_index: Self::NodeIndex) -> Option<Self::NodeIndex> {
4117        if self.arena[node_index].parent != NULL {
4118            Some(self.arena[node_index].parent)
4119        } else {
4120            None
4121        }
4122    }
4123
4124    fn referees(&self, node_index: Self::NodeIndex) -> Vec<Self::NodeIndex> {
4125        self.arena[node_index].referees.clone()
4126    }
4127}
4128
4129impl RichTreeGraph for ConsensusGraphInner {
4130    fn children(&self, node_index: Self::NodeIndex) -> Vec<Self::NodeIndex> {
4131        self.arena[node_index].children.clone()
4132    }
4133
4134    fn referrers(&self, node_index: Self::NodeIndex) -> Vec<Self::NodeIndex> {
4135        self.arena[node_index].referrers.clone()
4136    }
4137}
4138
4139impl StateMaintenanceTrait for ConsensusGraphInner {
4140    fn get_pivot_hash_from_epoch_number(
4141        &self, epoch_number: u64,
4142    ) -> Result<EpochId, String> {
4143        ConsensusGraphInner::get_pivot_hash_from_epoch_number(
4144            self,
4145            epoch_number,
4146        )
4147        .map_err(|e| e.to_string())
4148    }
4149
4150    fn get_epoch_execution_commitment_with_db(
4151        &self, block_hash: &EpochId,
4152    ) -> Option<EpochExecutionCommitment> {
4153        self.data_man
4154            .get_epoch_execution_commitment_with_db(block_hash)
4155    }
4156}